4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * DSP/BIOS Bridge Node Manager.
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
19 #include <linux/types.h>
20 /* ----------------------------------- Host OS */
21 #include <dspbridge/host_os.h>
23 /* ----------------------------------- DSP/BIOS Bridge */
24 #include <dspbridge/dbdefs.h>
26 /* ----------------------------------- Trace & Debug */
27 #include <dspbridge/dbc.h>
29 /* ----------------------------------- OS Adaptation Layer */
30 #include <dspbridge/list.h>
31 #include <dspbridge/memdefs.h>
32 #include <dspbridge/proc.h>
33 #include <dspbridge/strm.h>
34 #include <dspbridge/sync.h>
35 #include <dspbridge/ntfy.h>
37 /* ----------------------------------- Platform Manager */
38 #include <dspbridge/cmm.h>
39 #include <dspbridge/cod.h>
40 #include <dspbridge/dev.h>
41 #include <dspbridge/msg.h>
43 /* ----------------------------------- Resource Manager */
44 #include <dspbridge/dbdcd.h>
45 #include <dspbridge/disp.h>
46 #include <dspbridge/rms_sh.h>
48 /* ----------------------------------- Link Driver */
49 #include <dspbridge/dspdefs.h>
50 #include <dspbridge/dspioctl.h>
52 /* ----------------------------------- Others */
53 #include <dspbridge/gb.h>
54 #include <dspbridge/uuidutil.h>
56 /* ----------------------------------- This */
57 #include <dspbridge/nodepriv.h>
58 #include <dspbridge/node.h>
59 #include <dspbridge/dmm.h>
61 /* Static/Dynamic Loader includes */
62 #include <dspbridge/dbll.h>
63 #include <dspbridge/nldr.h>
65 #include <dspbridge/drv.h>
66 #include <dspbridge/drvdefs.h>
67 #include <dspbridge/resourcecleanup.h>
70 #include <dspbridge/dspdeh.h>
72 #define HOSTPREFIX "/host"
73 #define PIPEPREFIX "/dbpipe"
75 #define MAX_INPUTS(h) \
76 ((h)->dcd_props.obj_data.node_obj.ndb_props.num_input_streams)
77 #define MAX_OUTPUTS(h) \
78 ((h)->dcd_props.obj_data.node_obj.ndb_props.num_output_streams)
80 #define NODE_GET_PRIORITY(h) ((h)->prio)
81 #define NODE_SET_PRIORITY(hnode, prio) ((hnode)->prio = prio)
82 #define NODE_SET_STATE(hnode, state) ((hnode)->node_state = state)
84 #define MAXPIPES 100 /* Max # of /pipe connections (CSL limit) */
85 #define MAXDEVSUFFIXLEN 2 /* Max(Log base 10 of MAXPIPES, MAXSTREAMS) */
87 #define PIPENAMELEN (sizeof(PIPEPREFIX) + MAXDEVSUFFIXLEN)
88 #define HOSTNAMELEN (sizeof(HOSTPREFIX) + MAXDEVSUFFIXLEN)
90 #define MAXDEVNAMELEN 32 /* dsp_ndbprops.ac_name size */
92 #define EXECUTEPHASE 2
95 /* Define default STRM parameters */
97 * TBD: Put in header file, make global DSP_STRMATTRS with defaults,
98 * or make defaults configurable.
100 #define DEFAULTBUFSIZE 32
101 #define DEFAULTNBUFS 2
102 #define DEFAULTSEGID 0
103 #define DEFAULTALIGNMENT 0
104 #define DEFAULTTIMEOUT 10000
106 #define RMSQUERYSERVER 0
107 #define RMSCONFIGURESERVER 1
108 #define RMSCREATENODE 2
109 #define RMSEXECUTENODE 3
110 #define RMSDELETENODE 4
111 #define RMSCHANGENODEPRIORITY 5
112 #define RMSREADMEMORY 6
113 #define RMSWRITEMEMORY 7
115 #define MAXTIMEOUT 2000
119 #define PWR_TIMEOUT 500 /* default PWR timeout in msec */
121 #define STACKSEGLABEL "L1DSRAM_HEAP" /* Label for DSP Stack Segment Addr */
124 * ======== node_mgr ========
127 struct dev_object *hdev_obj; /* Device object */
128 /* Function interface to Bridge driver */
129 struct bridge_drv_interface *intf_fxns;
130 struct dcd_manager *hdcd_mgr; /* Proc/Node data manager */
131 struct disp_object *disp_obj; /* Node dispatcher */
132 struct lst_list *node_list; /* List of all allocated nodes */
133 u32 num_nodes; /* Number of nodes in node_list */
134 u32 num_created; /* Number of nodes *created* on DSP */
135 struct gb_t_map *pipe_map; /* Pipe connection bit map */
136 struct gb_t_map *pipe_done_map; /* Pipes that are half free */
137 struct gb_t_map *chnl_map; /* Channel allocation bit map */
138 struct gb_t_map *dma_chnl_map; /* DMA Channel allocation bit map */
139 struct gb_t_map *zc_chnl_map; /* Zero-Copy Channel alloc bit map */
140 struct ntfy_object *ntfy_obj; /* Manages registered notifications */
141 struct mutex node_mgr_lock; /* For critical sections */
142 u32 ul_fxn_addrs[NUMRMSFXNS]; /* RMS function addresses */
143 struct msg_mgr *msg_mgr_obj;
145 /* Processor properties needed by Node Dispatcher */
146 u32 ul_num_chnls; /* Total number of channels */
147 u32 ul_chnl_offset; /* Offset of chnl ids rsvd for RMS */
148 u32 ul_chnl_buf_size; /* Buffer size for data to RMS */
149 int proc_family; /* eg, 5000 */
150 int proc_type; /* eg, 5510 */
151 u32 udsp_word_size; /* Size of DSP word on host bytes */
152 u32 udsp_data_mau_size; /* Size of DSP data MAU */
153 u32 udsp_mau_size; /* Size of MAU */
154 s32 min_pri; /* Minimum runtime priority for node */
155 s32 max_pri; /* Maximum runtime priority for node */
157 struct strm_mgr *strm_mgr_obj; /* STRM manager */
159 /* Loader properties */
160 struct nldr_object *nldr_obj; /* Handle to loader */
161 struct node_ldr_fxns nldr_fxns; /* Handle to loader functions */
162 bool loader_init; /* Loader Init function succeeded? */
166 * ======== connecttype ========
176 * ======== stream_chnl ========
179 enum connecttype type; /* Type of stream connection */
180 u32 dev_id; /* pipe or channel id */
184 * ======== node_object ========
187 struct list_head list_elem;
188 struct node_mgr *hnode_mgr; /* The manager of this node */
189 struct proc_object *hprocessor; /* Back pointer to processor */
190 struct dsp_uuid node_uuid; /* Node's ID */
191 s32 prio; /* Node's current priority */
192 u32 utimeout; /* Timeout for blocking NODE calls */
193 u32 heap_size; /* Heap Size */
194 u32 udsp_heap_virt_addr; /* Heap Size */
195 u32 ugpp_heap_virt_addr; /* Heap Size */
196 enum node_type ntype; /* Type of node: message, task, etc */
197 enum node_state node_state; /* NODE_ALLOCATED, NODE_CREATED, ... */
198 u32 num_inputs; /* Current number of inputs */
199 u32 num_outputs; /* Current number of outputs */
200 u32 max_input_index; /* Current max input stream index */
201 u32 max_output_index; /* Current max output stream index */
202 struct stream_chnl *inputs; /* Node's input streams */
203 struct stream_chnl *outputs; /* Node's output streams */
204 struct node_createargs create_args; /* Args for node create func */
205 nodeenv node_env; /* Environment returned by RMS */
206 struct dcd_genericobj dcd_props; /* Node properties from DCD */
207 struct dsp_cbdata *pargs; /* Optional args to pass to node */
208 struct ntfy_object *ntfy_obj; /* Manages registered notifications */
209 char *pstr_dev_name; /* device name, if device node */
210 struct sync_object *sync_done; /* Synchronize node_terminate */
211 s32 exit_status; /* execute function return status */
213 /* Information needed for node_get_attr() */
214 void *device_owner; /* If dev node, task that owns it */
215 u32 num_gpp_inputs; /* Current # of from GPP streams */
216 u32 num_gpp_outputs; /* Current # of to GPP streams */
217 /* Current stream connections */
218 struct dsp_streamconnect *stream_connect;
221 struct msg_queue *msg_queue_obj;
223 /* These fields used for SM messaging */
224 struct cmm_xlatorobject *xlator; /* Node's SM addr translator */
226 /* Handle to pass to dynamic loader */
227 struct nldr_nodeobject *nldr_node_obj;
228 bool loaded; /* Code is (dynamically) loaded */
229 bool phase_split; /* Phases split in many libs or ovly */
233 /* Default buffer attributes */
234 static struct dsp_bufferattr node_dfltbufattrs = {
237 0, /* buf_alignment */
240 static void delete_node(struct node_object *hnode,
241 struct process_context *pr_ctxt);
242 static void delete_node_mgr(struct node_mgr *hnode_mgr);
243 static void fill_stream_connect(struct node_object *node1,
244 struct node_object *node2, u32 stream1,
246 static void fill_stream_def(struct node_object *hnode,
247 struct node_strmdef *pstrm_def,
248 struct dsp_strmattr *pattrs);
249 static void free_stream(struct node_mgr *hnode_mgr, struct stream_chnl stream);
250 static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
252 static int get_node_props(struct dcd_manager *hdcd_mgr,
253 struct node_object *hnode,
254 const struct dsp_uuid *node_uuid,
255 struct dcd_genericobj *dcd_prop);
256 static int get_proc_props(struct node_mgr *hnode_mgr,
257 struct dev_object *hdev_obj);
258 static int get_rms_fxns(struct node_mgr *hnode_mgr);
259 static u32 ovly(void *priv_ref, u32 dsp_run_addr, u32 dsp_load_addr,
260 u32 ul_num_bytes, u32 mem_space);
261 static u32 mem_write(void *priv_ref, u32 dsp_add, void *pbuf,
262 u32 ul_num_bytes, u32 mem_space);
264 static u32 refs; /* module reference count */
266 /* Dynamic loader functions. */
267 static struct node_ldr_fxns nldr_fxns = {
278 enum node_state node_get_state(void *hnode)
280 struct node_object *pnode = (struct node_object *)hnode;
284 return pnode->node_state;
288 * ======== node_allocate ========
290 * Allocate GPP resources to manage a node on the DSP.
292 int node_allocate(struct proc_object *hprocessor,
293 const struct dsp_uuid *node_uuid,
294 const struct dsp_cbdata *pargs,
295 const struct dsp_nodeattrin *attr_in,
296 struct node_res_object **noderes,
297 struct process_context *pr_ctxt)
299 struct node_mgr *hnode_mgr;
300 struct dev_object *hdev_obj;
301 struct node_object *pnode = NULL;
302 enum node_type node_type = NODE_TASK;
303 struct node_msgargs *pmsg_args;
304 struct node_taskargs *ptask_args;
306 struct bridge_drv_interface *intf_fxns;
308 struct cmm_object *hcmm_mgr = NULL; /* Shared memory manager hndl */
313 u32 ul_stack_seg_addr, ul_stack_seg_val;
315 struct cfg_hostres *host_res;
316 struct bridge_dev_context *pbridge_context;
319 struct dsp_processorstate proc_state;
321 struct dmm_object *dmm_mgr;
322 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
327 DBC_REQUIRE(refs > 0);
328 DBC_REQUIRE(hprocessor != NULL);
329 DBC_REQUIRE(noderes != NULL);
330 DBC_REQUIRE(node_uuid != NULL);
334 status = proc_get_processor_id(hprocessor, &proc_id);
336 if (proc_id != DSP_UNIT)
339 status = proc_get_dev_object(hprocessor, &hdev_obj);
341 status = dev_get_node_manager(hdev_obj, &hnode_mgr);
342 if (hnode_mgr == NULL)
350 status = dev_get_bridge_context(hdev_obj, &pbridge_context);
351 if (!pbridge_context) {
356 status = proc_get_state(hprocessor, &proc_state,
357 sizeof(struct dsp_processorstate));
360 /* If processor is in error state then don't attempt
361 to send the message */
362 if (proc_state.proc_state == PROC_ERROR) {
367 /* Assuming that 0 is not a valid function address */
368 if (hnode_mgr->ul_fxn_addrs[0] == 0) {
369 /* No RMS on target - we currently can't handle this */
370 pr_err("%s: Failed, no RMS in base image\n", __func__);
373 /* Validate attr_in fields, if non-NULL */
375 /* Check if attr_in->prio is within range */
376 if (attr_in->prio < hnode_mgr->min_pri ||
377 attr_in->prio > hnode_mgr->max_pri)
381 /* Allocate node object and fill in */
385 pnode = kzalloc(sizeof(struct node_object), GFP_KERNEL);
390 pnode->hnode_mgr = hnode_mgr;
391 /* This critical section protects get_node_props */
392 mutex_lock(&hnode_mgr->node_mgr_lock);
394 /* Get dsp_ndbprops from node database */
395 status = get_node_props(hnode_mgr->hdcd_mgr, pnode, node_uuid,
396 &(pnode->dcd_props));
400 pnode->node_uuid = *node_uuid;
401 pnode->hprocessor = hprocessor;
402 pnode->ntype = pnode->dcd_props.obj_data.node_obj.ndb_props.ntype;
403 pnode->utimeout = pnode->dcd_props.obj_data.node_obj.ndb_props.utimeout;
404 pnode->prio = pnode->dcd_props.obj_data.node_obj.ndb_props.prio;
406 /* Currently only C64 DSP builds support Node Dynamic * heaps */
407 /* Allocate memory for node heap */
408 pnode->create_args.asa.task_arg_obj.heap_size = 0;
409 pnode->create_args.asa.task_arg_obj.udsp_heap_addr = 0;
410 pnode->create_args.asa.task_arg_obj.udsp_heap_res_addr = 0;
411 pnode->create_args.asa.task_arg_obj.ugpp_heap_addr = 0;
415 /* Check if we have a user allocated node heap */
416 if (!(attr_in->pgpp_virt_addr))
419 /* check for page aligned Heap size */
420 if (((attr_in->heap_size) & (PG_SIZE4K - 1))) {
421 pr_err("%s: node heap size not aligned to 4K, size = 0x%x \n",
422 __func__, attr_in->heap_size);
425 pnode->create_args.asa.task_arg_obj.heap_size =
427 pnode->create_args.asa.task_arg_obj.ugpp_heap_addr =
428 (u32) attr_in->pgpp_virt_addr;
433 status = proc_reserve_memory(hprocessor,
434 pnode->create_args.asa.task_arg_obj.
435 heap_size + PAGE_SIZE,
436 (void **)&(pnode->create_args.asa.
437 task_arg_obj.udsp_heap_res_addr),
440 pr_err("%s: Failed to reserve memory for heap: 0x%x\n",
445 status = dmm_get_handle(p_proc_object, &dmm_mgr);
447 status = DSP_EHANDLE;
451 dmm_mem_map_dump(dmm_mgr);
454 map_attrs |= DSP_MAPLITTLEENDIAN;
455 map_attrs |= DSP_MAPELEMSIZE32;
456 map_attrs |= DSP_MAPVIRTUALADDR;
457 status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr,
458 pnode->create_args.asa.task_arg_obj.heap_size,
459 (void *)pnode->create_args.asa.task_arg_obj.
460 udsp_heap_res_addr, (void **)&mapped_addr, map_attrs,
463 pr_err("%s: Failed to map memory for Heap: 0x%x\n",
466 pnode->create_args.asa.task_arg_obj.udsp_heap_addr =
470 mutex_unlock(&hnode_mgr->node_mgr_lock);
471 if (attr_in != NULL) {
472 /* Overrides of NBD properties */
473 pnode->utimeout = attr_in->utimeout;
474 pnode->prio = attr_in->prio;
476 /* Create object to manage notifications */
478 pnode->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
481 ntfy_init(pnode->ntfy_obj);
487 node_type = node_get_type(pnode);
488 /* Allocate dsp_streamconnect array for device, task, and
489 * dais socket nodes. */
490 if (node_type != NODE_MESSAGE) {
491 num_streams = MAX_INPUTS(pnode) + MAX_OUTPUTS(pnode);
492 pnode->stream_connect = kzalloc(num_streams *
493 sizeof(struct dsp_streamconnect),
495 if (num_streams > 0 && pnode->stream_connect == NULL)
499 if (!status && (node_type == NODE_TASK ||
500 node_type == NODE_DAISSOCKET)) {
501 /* Allocate arrays for maintainig stream connections */
502 pnode->inputs = kzalloc(MAX_INPUTS(pnode) *
503 sizeof(struct stream_chnl), GFP_KERNEL);
504 pnode->outputs = kzalloc(MAX_OUTPUTS(pnode) *
505 sizeof(struct stream_chnl), GFP_KERNEL);
506 ptask_args = &(pnode->create_args.asa.task_arg_obj);
507 ptask_args->strm_in_def = kzalloc(MAX_INPUTS(pnode) *
508 sizeof(struct node_strmdef),
510 ptask_args->strm_out_def = kzalloc(MAX_OUTPUTS(pnode) *
511 sizeof(struct node_strmdef),
513 if ((MAX_INPUTS(pnode) > 0 && (pnode->inputs == NULL ||
514 ptask_args->strm_in_def
516 || (MAX_OUTPUTS(pnode) > 0
517 && (pnode->outputs == NULL
518 || ptask_args->strm_out_def == NULL)))
522 if (!status && (node_type != NODE_DEVICE)) {
523 /* Create an event that will be posted when RMS_EXIT is
525 pnode->sync_done = kzalloc(sizeof(struct sync_object),
527 if (pnode->sync_done)
528 sync_init_event(pnode->sync_done);
533 /*Get the shared mem mgr for this nodes dev object */
534 status = cmm_get_handle(hprocessor, &hcmm_mgr);
536 /* Allocate a SM addr translator for this node
538 status = cmm_xlator_create(&pnode->xlator,
543 /* Fill in message args */
544 if ((pargs != NULL) && (pargs->cb_data > 0)) {
546 &(pnode->create_args.asa.node_msg_args);
547 pmsg_args->pdata = kzalloc(pargs->cb_data,
549 if (pmsg_args->pdata == NULL) {
552 pmsg_args->arg_length = pargs->cb_data;
553 memcpy(pmsg_args->pdata,
561 if (!status && node_type != NODE_DEVICE) {
562 /* Create a message queue for this node */
563 intf_fxns = hnode_mgr->intf_fxns;
565 (*intf_fxns->pfn_msg_create_queue) (hnode_mgr->msg_mgr_obj,
566 &pnode->msg_queue_obj,
568 pnode->create_args.asa.
569 node_msg_args.max_msgs,
574 /* Create object for dynamic loading */
576 status = hnode_mgr->nldr_fxns.pfn_allocate(hnode_mgr->nldr_obj,
582 &pnode->phase_split);
585 /* Compare value read from Node Properties and check if it is same as
586 * STACKSEGLABEL, if yes read the Address of STACKSEGLABEL, calculate
587 * GPP Address, Read the value in that address and override the
588 * stack_seg value in task args */
590 (char *)pnode->dcd_props.obj_data.node_obj.ndb_props.
591 stack_seg_name != NULL) {
593 pnode->dcd_props.obj_data.node_obj.ndb_props.
594 stack_seg_name, STACKSEGLABEL) == 0) {
596 hnode_mgr->nldr_fxns.
597 pfn_get_fxn_addr(pnode->nldr_node_obj, "DYNEXT_BEG",
600 pr_err("%s: Failed to get addr for DYNEXT_BEG"
601 " status = 0x%x\n", __func__, status);
604 hnode_mgr->nldr_fxns.
605 pfn_get_fxn_addr(pnode->nldr_node_obj,
606 "L1DSRAM_HEAP", &pul_value);
609 pr_err("%s: Failed to get addr for L1DSRAM_HEAP"
610 " status = 0x%x\n", __func__, status);
612 host_res = pbridge_context->resources;
617 pr_err("%s: Failed to get host resource, status"
618 " = 0x%x\n", __func__, status);
622 ul_gpp_mem_base = (u32) host_res->dw_mem_base[1];
623 off_set = pul_value - dynext_base;
624 ul_stack_seg_addr = ul_gpp_mem_base + off_set;
625 ul_stack_seg_val = readl(ul_stack_seg_addr);
627 dev_dbg(bridge, "%s: StackSegVal = 0x%x, StackSegAddr ="
628 " 0x%x\n", __func__, ul_stack_seg_val,
631 pnode->create_args.asa.task_arg_obj.stack_seg =
638 /* Add the node to the node manager's list of allocated
640 lst_init_elem((struct list_head *)pnode);
641 NODE_SET_STATE(pnode, NODE_ALLOCATED);
643 mutex_lock(&hnode_mgr->node_mgr_lock);
645 lst_put_tail(hnode_mgr->node_list, (struct list_head *) pnode);
646 ++(hnode_mgr->num_nodes);
648 /* Exit critical section */
649 mutex_unlock(&hnode_mgr->node_mgr_lock);
651 /* Preset this to assume phases are split
652 * (for overlay and dll) */
653 pnode->phase_split = true;
655 /* Notify all clients registered for DSP_NODESTATECHANGE. */
656 proc_notify_all_clients(hprocessor, DSP_NODESTATECHANGE);
660 delete_node(pnode, pr_ctxt);
665 status = drv_insert_node_res_element(pnode, &node_res, pr_ctxt);
667 delete_node(pnode, pr_ctxt);
671 *noderes = (struct node_res_object *)node_res;
672 drv_proc_node_update_heap_status(node_res, true);
673 drv_proc_node_update_status(node_res, true);
675 DBC_ENSURE((status && *noderes == NULL) || (!status && *noderes));
677 dev_dbg(bridge, "%s: hprocessor: %p pNodeId: %p pargs: %p attr_in: %p "
678 "node_res: %p status: 0x%x\n", __func__, hprocessor,
679 node_uuid, pargs, attr_in, noderes, status);
684 * ======== node_alloc_msg_buf ========
686 * Allocates buffer for zero copy messaging.
688 DBAPI node_alloc_msg_buf(struct node_object *hnode, u32 usize,
689 struct dsp_bufferattr *pattr,
692 struct node_object *pnode = (struct node_object *)hnode;
694 bool va_flag = false;
698 DBC_REQUIRE(refs > 0);
699 DBC_REQUIRE(pbuffer != NULL);
701 DBC_REQUIRE(usize > 0);
705 else if (node_get_type(pnode) == NODE_DEVICE)
712 pattr = &node_dfltbufattrs; /* set defaults */
714 status = proc_get_processor_id(pnode->hprocessor, &proc_id);
715 if (proc_id != DSP_UNIT) {
719 /* If segment ID includes MEM_SETVIRTUALSEGID then pbuffer is a
720 * virt address, so set this info in this node's translator
721 * object for future ref. If MEM_GETVIRTUALSEGID then retrieve
722 * virtual address from node's translator. */
723 if ((pattr->segment_id & MEM_SETVIRTUALSEGID) ||
724 (pattr->segment_id & MEM_GETVIRTUALSEGID)) {
726 set_info = (pattr->segment_id & MEM_SETVIRTUALSEGID) ?
728 /* Clear mask bits */
729 pattr->segment_id &= ~MEM_MASKVIRTUALSEGID;
730 /* Set/get this node's translators virtual address base/size */
731 status = cmm_xlator_info(pnode->xlator, pbuffer, usize,
732 pattr->segment_id, set_info);
734 if (!status && (!va_flag)) {
735 if (pattr->segment_id != 1) {
736 /* Node supports single SM segment only. */
739 /* Arbitrary SM buffer alignment not supported for host side
740 * allocs, but guaranteed for the following alignment
742 switch (pattr->buf_alignment) {
749 /* alignment value not suportted */
754 /* allocate physical buffer from seg_id in node's
756 (void)cmm_xlator_alloc_buf(pnode->xlator, pbuffer,
758 if (*pbuffer == NULL) {
759 pr_err("%s: error - Out of shared memory\n",
770 * ======== node_change_priority ========
772 * Change the priority of a node in the allocated state, or that is
773 * currently running or paused on the target.
775 int node_change_priority(struct node_object *hnode, s32 prio)
777 struct node_object *pnode = (struct node_object *)hnode;
778 struct node_mgr *hnode_mgr = NULL;
779 enum node_type node_type;
780 enum node_state state;
784 DBC_REQUIRE(refs > 0);
786 if (!hnode || !hnode->hnode_mgr) {
789 hnode_mgr = hnode->hnode_mgr;
790 node_type = node_get_type(hnode);
791 if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
793 else if (prio < hnode_mgr->min_pri || prio > hnode_mgr->max_pri)
799 /* Enter critical section */
800 mutex_lock(&hnode_mgr->node_mgr_lock);
802 state = node_get_state(hnode);
803 if (state == NODE_ALLOCATED || state == NODE_PAUSED) {
804 NODE_SET_PRIORITY(hnode, prio);
806 if (state != NODE_RUNNING) {
810 status = proc_get_processor_id(pnode->hprocessor, &proc_id);
811 if (proc_id == DSP_UNIT) {
813 disp_node_change_priority(hnode_mgr->disp_obj,
815 hnode_mgr->ul_fxn_addrs
816 [RMSCHANGENODEPRIORITY],
817 hnode->node_env, prio);
820 NODE_SET_PRIORITY(hnode, prio);
824 /* Leave critical section */
825 mutex_unlock(&hnode_mgr->node_mgr_lock);
831 * ======== node_connect ========
833 * Connect two nodes on the DSP, or a node on the DSP to the GPP.
835 int node_connect(struct node_object *node1, u32 stream1,
836 struct node_object *node2,
837 u32 stream2, struct dsp_strmattr *pattrs,
838 struct dsp_cbdata *conn_param)
840 struct node_mgr *hnode_mgr;
841 char *pstr_dev_name = NULL;
842 enum node_type node1_type = NODE_TASK;
843 enum node_type node2_type = NODE_TASK;
844 struct node_strmdef *pstrm_def;
845 struct node_strmdef *input = NULL;
846 struct node_strmdef *output = NULL;
847 struct node_object *dev_node_obj;
848 struct node_object *hnode;
849 struct stream_chnl *pstream;
850 u32 pipe_id = GB_NOBITS;
851 u32 chnl_id = GB_NOBITS;
855 DBC_REQUIRE(refs > 0);
857 if ((node1 != (struct node_object *)DSP_HGPPNODE && !node1) ||
858 (node2 != (struct node_object *)DSP_HGPPNODE && !node2))
862 /* The two nodes must be on the same processor */
863 if (node1 != (struct node_object *)DSP_HGPPNODE &&
864 node2 != (struct node_object *)DSP_HGPPNODE &&
865 node1->hnode_mgr != node2->hnode_mgr)
867 /* Cannot connect a node to itself */
873 /* node_get_type() will return NODE_GPP if hnode =
875 node1_type = node_get_type(node1);
876 node2_type = node_get_type(node2);
877 /* Check stream indices ranges */
878 if ((node1_type != NODE_GPP && node1_type != NODE_DEVICE &&
879 stream1 >= MAX_OUTPUTS(node1)) || (node2_type != NODE_GPP
888 * Only the following types of connections are allowed:
889 * task/dais socket < == > task/dais socket
890 * task/dais socket < == > device
891 * task/dais socket < == > GPP
893 * ie, no message nodes, and at least one task or dais
896 if (node1_type == NODE_MESSAGE || node2_type == NODE_MESSAGE ||
897 (node1_type != NODE_TASK && node1_type != NODE_DAISSOCKET &&
898 node2_type != NODE_TASK && node2_type != NODE_DAISSOCKET))
902 * Check stream mode. Default is STRMMODE_PROCCOPY.
904 if (!status && pattrs) {
905 if (pattrs->strm_mode != STRMMODE_PROCCOPY)
906 status = -EPERM; /* illegal stream mode */
912 if (node1_type != NODE_GPP) {
913 hnode_mgr = node1->hnode_mgr;
915 DBC_ASSERT(node2 != (struct node_object *)DSP_HGPPNODE);
916 hnode_mgr = node2->hnode_mgr;
918 /* Enter critical section */
919 mutex_lock(&hnode_mgr->node_mgr_lock);
921 /* Nodes must be in the allocated state */
922 if (node1_type != NODE_GPP && node_get_state(node1) != NODE_ALLOCATED)
925 if (node2_type != NODE_GPP && node_get_state(node2) != NODE_ALLOCATED)
929 /* Check that stream indices for task and dais socket nodes
930 * are not already be used. (Device nodes checked later) */
931 if (node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) {
933 &(node1->create_args.asa.
934 task_arg_obj.strm_out_def[stream1]);
935 if (output->sz_device != NULL)
939 if (node2_type == NODE_TASK || node2_type == NODE_DAISSOCKET) {
941 &(node2->create_args.asa.
942 task_arg_obj.strm_in_def[stream2]);
943 if (input->sz_device != NULL)
948 /* Connecting two task nodes? */
949 if (!status && ((node1_type == NODE_TASK ||
950 node1_type == NODE_DAISSOCKET)
951 && (node2_type == NODE_TASK
952 || node2_type == NODE_DAISSOCKET))) {
953 /* Find available pipe */
954 pipe_id = gb_findandset(hnode_mgr->pipe_map);
955 if (pipe_id == GB_NOBITS) {
956 status = -ECONNREFUSED;
958 node1->outputs[stream1].type = NODECONNECT;
959 node2->inputs[stream2].type = NODECONNECT;
960 node1->outputs[stream1].dev_id = pipe_id;
961 node2->inputs[stream2].dev_id = pipe_id;
962 output->sz_device = kzalloc(PIPENAMELEN + 1,
964 input->sz_device = kzalloc(PIPENAMELEN + 1, GFP_KERNEL);
965 if (output->sz_device == NULL ||
966 input->sz_device == NULL) {
967 /* Undo the connection */
968 kfree(output->sz_device);
970 kfree(input->sz_device);
972 output->sz_device = NULL;
973 input->sz_device = NULL;
974 gb_clear(hnode_mgr->pipe_map, pipe_id);
977 /* Copy "/dbpipe<pipId>" name to device names */
978 sprintf(output->sz_device, "%s%d",
979 PIPEPREFIX, pipe_id);
980 strcpy(input->sz_device, output->sz_device);
984 /* Connecting task node to host? */
985 if (!status && (node1_type == NODE_GPP ||
986 node2_type == NODE_GPP)) {
987 if (node1_type == NODE_GPP) {
988 chnl_mode = CHNL_MODETODSP;
990 DBC_ASSERT(node2_type == NODE_GPP);
991 chnl_mode = CHNL_MODEFROMDSP;
993 /* Reserve a channel id. We need to put the name "/host<id>"
994 * in the node's create_args, but the host
995 * side channel will not be opened until DSPStream_Open is
996 * called for this node. */
998 if (pattrs->strm_mode == STRMMODE_RDMA) {
1000 gb_findandset(hnode_mgr->dma_chnl_map);
1001 /* dma chans are 2nd transport chnl set
1002 * ids(e.g. 16-31) */
1003 (chnl_id != GB_NOBITS) ?
1006 hnode_mgr->ul_num_chnls) : chnl_id;
1007 } else if (pattrs->strm_mode == STRMMODE_ZEROCOPY) {
1008 chnl_id = gb_findandset(hnode_mgr->zc_chnl_map);
1009 /* zero-copy chans are 3nd transport set
1011 (chnl_id != GB_NOBITS) ? (chnl_id = chnl_id +
1016 } else { /* must be PROCCOPY */
1017 DBC_ASSERT(pattrs->strm_mode ==
1019 chnl_id = gb_findandset(hnode_mgr->chnl_map);
1023 /* default to PROCCOPY */
1024 chnl_id = gb_findandset(hnode_mgr->chnl_map);
1026 if (chnl_id == GB_NOBITS) {
1027 status = -ECONNREFUSED;
1030 pstr_dev_name = kzalloc(HOSTNAMELEN + 1, GFP_KERNEL);
1031 if (pstr_dev_name != NULL)
1035 if (pattrs->strm_mode == STRMMODE_RDMA) {
1036 gb_clear(hnode_mgr->dma_chnl_map, chnl_id -
1037 hnode_mgr->ul_num_chnls);
1038 } else if (pattrs->strm_mode == STRMMODE_ZEROCOPY) {
1039 gb_clear(hnode_mgr->zc_chnl_map, chnl_id -
1040 (2 * hnode_mgr->ul_num_chnls));
1042 DBC_ASSERT(pattrs->strm_mode ==
1044 gb_clear(hnode_mgr->chnl_map, chnl_id);
1047 gb_clear(hnode_mgr->chnl_map, chnl_id);
1052 if (node1 == (struct node_object *)DSP_HGPPNODE) {
1053 node2->inputs[stream2].type = HOSTCONNECT;
1054 node2->inputs[stream2].dev_id = chnl_id;
1055 input->sz_device = pstr_dev_name;
1057 node1->outputs[stream1].type = HOSTCONNECT;
1058 node1->outputs[stream1].dev_id = chnl_id;
1059 output->sz_device = pstr_dev_name;
1061 sprintf(pstr_dev_name, "%s%d", HOSTPREFIX, chnl_id);
1064 /* Connecting task node to device node? */
1065 if (!status && ((node1_type == NODE_DEVICE) ||
1066 (node2_type == NODE_DEVICE))) {
1067 if (node2_type == NODE_DEVICE) {
1068 /* node1 == > device */
1069 dev_node_obj = node2;
1071 pstream = &(node1->outputs[stream1]);
1074 /* device == > node2 */
1075 dev_node_obj = node1;
1077 pstream = &(node2->inputs[stream2]);
1080 /* Set up create args */
1081 pstream->type = DEVICECONNECT;
1082 dw_length = strlen(dev_node_obj->pstr_dev_name);
1083 if (conn_param != NULL) {
1084 pstrm_def->sz_device = kzalloc(dw_length + 1 +
1085 conn_param->cb_data,
1088 pstrm_def->sz_device = kzalloc(dw_length + 1,
1091 if (pstrm_def->sz_device == NULL) {
1094 /* Copy device name */
1095 strncpy(pstrm_def->sz_device,
1096 dev_node_obj->pstr_dev_name, dw_length);
1097 if (conn_param != NULL) {
1098 strncat(pstrm_def->sz_device,
1099 (char *)conn_param->node_data,
1100 (u32) conn_param->cb_data);
1102 dev_node_obj->device_owner = hnode;
1106 /* Fill in create args */
1107 if (node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) {
1108 node1->create_args.asa.task_arg_obj.num_outputs++;
1109 fill_stream_def(node1, output, pattrs);
1111 if (node2_type == NODE_TASK || node2_type == NODE_DAISSOCKET) {
1112 node2->create_args.asa.task_arg_obj.num_inputs++;
1113 fill_stream_def(node2, input, pattrs);
1115 /* Update node1 and node2 stream_connect */
1116 if (node1_type != NODE_GPP && node1_type != NODE_DEVICE) {
1117 node1->num_outputs++;
1118 if (stream1 > node1->max_output_index)
1119 node1->max_output_index = stream1;
1122 if (node2_type != NODE_GPP && node2_type != NODE_DEVICE) {
1123 node2->num_inputs++;
1124 if (stream2 > node2->max_input_index)
1125 node2->max_input_index = stream2;
1128 fill_stream_connect(node1, node2, stream1, stream2);
1130 /* end of sync_enter_cs */
1131 /* Exit critical section */
1132 mutex_unlock(&hnode_mgr->node_mgr_lock);
1134 dev_dbg(bridge, "%s: node1: %p stream1: %d node2: %p stream2: %d"
1135 "pattrs: %p status: 0x%x\n", __func__, node1,
1136 stream1, node2, stream2, pattrs, status);
1141 * ======== node_create ========
1143 * Create a node on the DSP by remotely calling the node's create function.
1145 int node_create(struct node_object *hnode)
1147 struct node_object *pnode = (struct node_object *)hnode;
1148 struct node_mgr *hnode_mgr;
1149 struct bridge_drv_interface *intf_fxns;
1151 enum node_type node_type;
1154 struct dsp_cbdata cb_data;
1156 struct dsp_processorstate proc_state;
1157 struct proc_object *hprocessor;
1158 #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1159 struct dspbridge_platform_data *pdata =
1160 omap_dspbridge_dev->dev.platform_data;
1163 DBC_REQUIRE(refs > 0);
1168 hprocessor = hnode->hprocessor;
1169 status = proc_get_state(hprocessor, &proc_state,
1170 sizeof(struct dsp_processorstate));
1173 /* If processor is in error state then don't attempt to create
1175 if (proc_state.proc_state == PROC_ERROR) {
1179 /* create struct dsp_cbdata struct for PWR calls */
1180 cb_data.cb_data = PWR_TIMEOUT;
1181 node_type = node_get_type(hnode);
1182 hnode_mgr = hnode->hnode_mgr;
1183 intf_fxns = hnode_mgr->intf_fxns;
1184 /* Get access to node dispatcher */
1185 mutex_lock(&hnode_mgr->node_mgr_lock);
1187 /* Check node state */
1188 if (node_get_state(hnode) != NODE_ALLOCATED)
1192 status = proc_get_processor_id(pnode->hprocessor, &proc_id);
1197 if (proc_id != DSP_UNIT)
1200 /* Make sure streams are properly connected */
1201 if ((hnode->num_inputs && hnode->max_input_index >
1202 hnode->num_inputs - 1) ||
1203 (hnode->num_outputs && hnode->max_output_index >
1204 hnode->num_outputs - 1))
1208 /* If node's create function is not loaded, load it */
1209 /* Boost the OPP level to max level that DSP can be requested */
1210 #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1211 if (pdata->cpu_set_freq)
1212 (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP3]);
1214 status = hnode_mgr->nldr_fxns.pfn_load(hnode->nldr_node_obj,
1216 /* Get address of node's create function */
1218 hnode->loaded = true;
1219 if (node_type != NODE_DEVICE) {
1220 status = get_fxn_address(hnode, &ul_create_fxn,
1224 pr_err("%s: failed to load create code: 0x%x\n",
1227 /* Request the lowest OPP level */
1228 #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1229 if (pdata->cpu_set_freq)
1230 (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP1]);
1232 /* Get address of iAlg functions, if socket node */
1234 if (node_type == NODE_DAISSOCKET) {
1235 status = hnode_mgr->nldr_fxns.pfn_get_fxn_addr
1236 (hnode->nldr_node_obj,
1237 hnode->dcd_props.obj_data.node_obj.
1239 &hnode->create_args.asa.
1240 task_arg_obj.ul_dais_arg);
1245 if (node_type != NODE_DEVICE) {
1246 status = disp_node_create(hnode_mgr->disp_obj, hnode,
1247 hnode_mgr->ul_fxn_addrs
1250 &(hnode->create_args),
1251 &(hnode->node_env));
1253 /* Set the message queue id to the node env
1255 intf_fxns = hnode_mgr->intf_fxns;
1256 (*intf_fxns->pfn_msg_set_queue_id) (hnode->
1262 /* Phase II/Overlays: Create, execute, delete phases possibly in
1263 * different files/sections. */
1264 if (hnode->loaded && hnode->phase_split) {
1265 /* If create code was dynamically loaded, we can now unload
1267 status1 = hnode_mgr->nldr_fxns.pfn_unload(hnode->nldr_node_obj,
1269 hnode->loaded = false;
1272 pr_err("%s: Failed to unload create code: 0x%x\n",
1275 /* Update node state and node manager state */
1277 NODE_SET_STATE(hnode, NODE_CREATED);
1278 hnode_mgr->num_created++;
1281 if (status != -EBADR) {
1282 /* Put back in NODE_ALLOCATED state if error occurred */
1283 NODE_SET_STATE(hnode, NODE_ALLOCATED);
1286 /* Free access to node dispatcher */
1287 mutex_unlock(&hnode_mgr->node_mgr_lock);
1290 proc_notify_clients(hnode->hprocessor, DSP_NODESTATECHANGE);
1291 ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
1294 dev_dbg(bridge, "%s: hnode: %p status: 0x%x\n", __func__,
1300 * ======== node_create_mgr ========
1302 * Create a NODE Manager object.
1304 int node_create_mgr(struct node_mgr **node_man,
1305 struct dev_object *hdev_obj)
1308 struct node_mgr *node_mgr_obj = NULL;
1309 struct disp_attr disp_attr_obj;
1310 char *sz_zl_file = "";
1311 struct nldr_attrs nldr_attrs_obj;
1314 DBC_REQUIRE(refs > 0);
1315 DBC_REQUIRE(node_man != NULL);
1316 DBC_REQUIRE(hdev_obj != NULL);
1319 /* Allocate Node manager object */
1320 node_mgr_obj = kzalloc(sizeof(struct node_mgr), GFP_KERNEL);
1322 node_mgr_obj->hdev_obj = hdev_obj;
1323 node_mgr_obj->node_list = kzalloc(sizeof(struct lst_list),
1325 node_mgr_obj->pipe_map = gb_create(MAXPIPES);
1326 node_mgr_obj->pipe_done_map = gb_create(MAXPIPES);
1327 if (node_mgr_obj->node_list == NULL
1328 || node_mgr_obj->pipe_map == NULL
1329 || node_mgr_obj->pipe_done_map == NULL) {
1332 INIT_LIST_HEAD(&node_mgr_obj->node_list->head);
1333 node_mgr_obj->ntfy_obj = kmalloc(
1334 sizeof(struct ntfy_object), GFP_KERNEL);
1335 if (node_mgr_obj->ntfy_obj)
1336 ntfy_init(node_mgr_obj->ntfy_obj);
1340 node_mgr_obj->num_created = 0;
1344 /* get devNodeType */
1346 status = dev_get_dev_type(hdev_obj, &dev_type);
1348 /* Create the DCD Manager */
1351 dcd_create_manager(sz_zl_file, &node_mgr_obj->hdcd_mgr);
1353 status = get_proc_props(node_mgr_obj, hdev_obj);
1356 /* Create NODE Dispatcher */
1358 disp_attr_obj.ul_chnl_offset = node_mgr_obj->ul_chnl_offset;
1359 disp_attr_obj.ul_chnl_buf_size = node_mgr_obj->ul_chnl_buf_size;
1360 disp_attr_obj.proc_family = node_mgr_obj->proc_family;
1361 disp_attr_obj.proc_type = node_mgr_obj->proc_type;
1363 disp_create(&node_mgr_obj->disp_obj, hdev_obj,
1366 /* Create a STRM Manager */
1368 status = strm_create(&node_mgr_obj->strm_mgr_obj, hdev_obj);
1371 dev_get_intf_fxns(hdev_obj, &node_mgr_obj->intf_fxns);
1372 /* Get msg_ctrl queue manager */
1373 dev_get_msg_mgr(hdev_obj, &node_mgr_obj->msg_mgr_obj);
1374 mutex_init(&node_mgr_obj->node_mgr_lock);
1375 node_mgr_obj->chnl_map = gb_create(node_mgr_obj->ul_num_chnls);
1376 /* dma chnl map. ul_num_chnls is # per transport */
1377 node_mgr_obj->dma_chnl_map =
1378 gb_create(node_mgr_obj->ul_num_chnls);
1379 node_mgr_obj->zc_chnl_map =
1380 gb_create(node_mgr_obj->ul_num_chnls);
1381 if ((node_mgr_obj->chnl_map == NULL)
1382 || (node_mgr_obj->dma_chnl_map == NULL)
1383 || (node_mgr_obj->zc_chnl_map == NULL)) {
1386 /* Block out reserved channels */
1387 for (i = 0; i < node_mgr_obj->ul_chnl_offset; i++)
1388 gb_set(node_mgr_obj->chnl_map, i);
1390 /* Block out channels reserved for RMS */
1391 gb_set(node_mgr_obj->chnl_map,
1392 node_mgr_obj->ul_chnl_offset);
1393 gb_set(node_mgr_obj->chnl_map,
1394 node_mgr_obj->ul_chnl_offset + 1);
1398 /* NO RM Server on the IVA */
1399 if (dev_type != IVA_UNIT) {
1400 /* Get addresses of any RMS functions loaded */
1401 status = get_rms_fxns(node_mgr_obj);
1405 /* Get loader functions and create loader */
1407 node_mgr_obj->nldr_fxns = nldr_fxns; /* Dyn loader funcs */
1410 nldr_attrs_obj.pfn_ovly = ovly;
1411 nldr_attrs_obj.pfn_write = mem_write;
1412 nldr_attrs_obj.us_dsp_word_size = node_mgr_obj->udsp_word_size;
1413 nldr_attrs_obj.us_dsp_mau_size = node_mgr_obj->udsp_mau_size;
1414 node_mgr_obj->loader_init = node_mgr_obj->nldr_fxns.pfn_init();
1416 node_mgr_obj->nldr_fxns.pfn_create(&node_mgr_obj->nldr_obj,
1421 *node_man = node_mgr_obj;
1423 delete_node_mgr(node_mgr_obj);
1425 DBC_ENSURE((status && *node_man == NULL) || (!status && *node_man));
1431 * ======== node_delete ========
1433 * Delete a node on the DSP by remotely calling the node's delete function.
1434 * Loads the node's delete function if necessary. Free GPP side resources
1435 * after node's delete function returns.
1437 int node_delete(struct node_res_object *noderes,
1438 struct process_context *pr_ctxt)
1440 struct node_object *pnode = noderes->hnode;
1441 struct node_mgr *hnode_mgr;
1442 struct proc_object *hprocessor;
1443 struct disp_object *disp_obj;
1445 enum node_type node_type;
1446 enum node_state state;
1449 struct dsp_cbdata cb_data;
1451 struct bridge_drv_interface *intf_fxns;
1453 void *node_res = noderes;
1455 struct dsp_processorstate proc_state;
1456 DBC_REQUIRE(refs > 0);
1462 /* create struct dsp_cbdata struct for PWR call */
1463 cb_data.cb_data = PWR_TIMEOUT;
1464 hnode_mgr = pnode->hnode_mgr;
1465 hprocessor = pnode->hprocessor;
1466 disp_obj = hnode_mgr->disp_obj;
1467 node_type = node_get_type(pnode);
1468 intf_fxns = hnode_mgr->intf_fxns;
1469 /* Enter critical section */
1470 mutex_lock(&hnode_mgr->node_mgr_lock);
1472 state = node_get_state(pnode);
1473 /* Execute delete phase code for non-device node in all cases
1474 * except when the node was only allocated. Delete phase must be
1475 * executed even if create phase was executed, but failed.
1476 * If the node environment pointer is non-NULL, the delete phase
1477 * code must be executed. */
1478 if (!(state == NODE_ALLOCATED && pnode->node_env == (u32) NULL) &&
1479 node_type != NODE_DEVICE) {
1480 status = proc_get_processor_id(pnode->hprocessor, &proc_id);
1484 if (proc_id == DSP_UNIT || proc_id == IVA_UNIT) {
1485 /* If node has terminated, execute phase code will
1486 * have already been unloaded in node_on_exit(). If the
1487 * node is PAUSED, the execute phase is loaded, and it
1488 * is now ok to unload it. If the node is running, we
1489 * will unload the execute phase only after deleting
1491 if (state == NODE_PAUSED && pnode->loaded &&
1492 pnode->phase_split) {
1493 /* Ok to unload execute code as long as node
1494 * is not * running */
1496 hnode_mgr->nldr_fxns.
1497 pfn_unload(pnode->nldr_node_obj,
1499 pnode->loaded = false;
1500 NODE_SET_STATE(pnode, NODE_DONE);
1502 /* Load delete phase code if not loaded or if haven't
1503 * * unloaded EXECUTE phase */
1504 if ((!(pnode->loaded) || (state == NODE_RUNNING)) &&
1505 pnode->phase_split) {
1507 hnode_mgr->nldr_fxns.
1508 pfn_load(pnode->nldr_node_obj, NLDR_DELETE);
1510 pnode->loaded = true;
1512 pr_err("%s: fail - load delete code:"
1513 " 0x%x\n", __func__, status);
1518 /* Unblock a thread trying to terminate the node */
1519 (void)sync_set_event(pnode->sync_done);
1520 if (proc_id == DSP_UNIT) {
1521 /* ul_delete_fxn = address of node's delete
1523 status = get_fxn_address(pnode, &ul_delete_fxn,
1525 } else if (proc_id == IVA_UNIT)
1526 ul_delete_fxn = (u32) pnode->node_env;
1528 status = proc_get_state(hprocessor,
1531 dsp_processorstate));
1532 if (proc_state.proc_state != PROC_ERROR) {
1534 disp_node_delete(disp_obj, pnode,
1541 NODE_SET_STATE(pnode, NODE_DONE);
1543 /* Unload execute, if not unloaded, and delete
1545 if (state == NODE_RUNNING &&
1546 pnode->phase_split) {
1548 hnode_mgr->nldr_fxns.
1549 pfn_unload(pnode->nldr_node_obj,
1553 pr_err("%s: fail - unload execute code:"
1554 " 0x%x\n", __func__, status1);
1557 hnode_mgr->nldr_fxns.pfn_unload(pnode->
1560 pnode->loaded = false;
1562 pr_err("%s: fail - unload delete code: "
1563 "0x%x\n", __func__, status1);
1567 /* Free host side resources even if a failure occurred */
1568 /* Remove node from hnode_mgr->node_list */
1569 lst_remove_elem(hnode_mgr->node_list, (struct list_head *)pnode);
1570 hnode_mgr->num_nodes--;
1571 /* Decrement count of nodes created on DSP */
1572 if ((state != NODE_ALLOCATED) || ((state == NODE_ALLOCATED) &&
1573 (pnode->node_env != (u32) NULL)))
1574 hnode_mgr->num_created--;
1575 /* Free host-side resources allocated by node_create()
1576 * delete_node() fails if SM buffers not freed by client! */
1577 drv_proc_node_update_status(node_res, false);
1578 delete_node(pnode, pr_ctxt);
1581 * Release all Node resources and its context
1583 idr_remove(pr_ctxt->node_id, ((struct node_res_object *)node_res)->id);
1586 /* Exit critical section */
1587 mutex_unlock(&hnode_mgr->node_mgr_lock);
1588 proc_notify_clients(hprocessor, DSP_NODESTATECHANGE);
1590 dev_dbg(bridge, "%s: pnode: %p status 0x%x\n", __func__, pnode, status);
1595 * ======== node_delete_mgr ========
1597 * Delete the NODE Manager.
1599 int node_delete_mgr(struct node_mgr *hnode_mgr)
1603 DBC_REQUIRE(refs > 0);
1606 delete_node_mgr(hnode_mgr);
1614 * ======== node_enum_nodes ========
1616 * Enumerate currently allocated nodes.
1618 int node_enum_nodes(struct node_mgr *hnode_mgr, void **node_tab,
1619 u32 node_tab_size, u32 *pu_num_nodes,
1622 struct node_object *hnode;
1625 DBC_REQUIRE(refs > 0);
1626 DBC_REQUIRE(node_tab != NULL || node_tab_size == 0);
1627 DBC_REQUIRE(pu_num_nodes != NULL);
1628 DBC_REQUIRE(pu_allocated != NULL);
1634 /* Enter critical section */
1635 mutex_lock(&hnode_mgr->node_mgr_lock);
1637 if (hnode_mgr->num_nodes > node_tab_size) {
1638 *pu_allocated = hnode_mgr->num_nodes;
1642 hnode = (struct node_object *)lst_first(hnode_mgr->
1644 for (i = 0; i < hnode_mgr->num_nodes; i++) {
1646 node_tab[i] = hnode;
1647 hnode = (struct node_object *)lst_next
1648 (hnode_mgr->node_list,
1649 (struct list_head *)hnode);
1651 *pu_allocated = *pu_num_nodes = hnode_mgr->num_nodes;
1653 /* end of sync_enter_cs */
1654 /* Exit critical section */
1655 mutex_unlock(&hnode_mgr->node_mgr_lock);
1661 * ======== node_exit ========
1663 * Discontinue usage of NODE module.
1665 void node_exit(void)
1667 DBC_REQUIRE(refs > 0);
1671 DBC_ENSURE(refs >= 0);
1675 * ======== node_free_msg_buf ========
1677 * Frees the message buffer.
1679 int node_free_msg_buf(struct node_object *hnode, u8 * pbuffer,
1680 struct dsp_bufferattr *pattr)
1682 struct node_object *pnode = (struct node_object *)hnode;
1685 DBC_REQUIRE(refs > 0);
1686 DBC_REQUIRE(pbuffer != NULL);
1687 DBC_REQUIRE(pnode != NULL);
1688 DBC_REQUIRE(pnode->xlator != NULL);
1694 status = proc_get_processor_id(pnode->hprocessor, &proc_id);
1695 if (proc_id == DSP_UNIT) {
1697 if (pattr == NULL) {
1699 pattr = &node_dfltbufattrs;
1701 /* Node supports single SM segment only */
1702 if (pattr->segment_id != 1)
1705 /* pbuffer is clients Va. */
1706 status = cmm_xlator_free_buf(pnode->xlator, pbuffer);
1709 DBC_ASSERT(NULL); /* BUG */
1716 * ======== node_get_attr ========
1718 * Copy the current attributes of the specified node into a dsp_nodeattr
1721 int node_get_attr(struct node_object *hnode,
1722 struct dsp_nodeattr *pattr, u32 attr_size)
1724 struct node_mgr *hnode_mgr;
1726 DBC_REQUIRE(refs > 0);
1727 DBC_REQUIRE(pattr != NULL);
1728 DBC_REQUIRE(attr_size >= sizeof(struct dsp_nodeattr));
1733 hnode_mgr = hnode->hnode_mgr;
1734 /* Enter hnode_mgr critical section (since we're accessing
1735 * data that could be changed by node_change_priority() and
1736 * node_connect(). */
1737 mutex_lock(&hnode_mgr->node_mgr_lock);
1738 pattr->cb_struct = sizeof(struct dsp_nodeattr);
1739 /* dsp_nodeattrin */
1740 pattr->in_node_attr_in.cb_struct =
1741 sizeof(struct dsp_nodeattrin);
1742 pattr->in_node_attr_in.prio = hnode->prio;
1743 pattr->in_node_attr_in.utimeout = hnode->utimeout;
1744 pattr->in_node_attr_in.heap_size =
1745 hnode->create_args.asa.task_arg_obj.heap_size;
1746 pattr->in_node_attr_in.pgpp_virt_addr = (void *)
1747 hnode->create_args.asa.task_arg_obj.ugpp_heap_addr;
1748 pattr->node_attr_inputs = hnode->num_gpp_inputs;
1749 pattr->node_attr_outputs = hnode->num_gpp_outputs;
1751 get_node_info(hnode, &(pattr->node_info));
1752 /* end of sync_enter_cs */
1753 /* Exit critical section */
1754 mutex_unlock(&hnode_mgr->node_mgr_lock);
1760 * ======== node_get_channel_id ========
1762 * Get the channel index reserved for a stream connection between the
1765 int node_get_channel_id(struct node_object *hnode, u32 dir, u32 index,
1768 enum node_type node_type;
1769 int status = -EINVAL;
1770 DBC_REQUIRE(refs > 0);
1771 DBC_REQUIRE(dir == DSP_TONODE || dir == DSP_FROMNODE);
1772 DBC_REQUIRE(chan_id != NULL);
1778 node_type = node_get_type(hnode);
1779 if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET) {
1783 if (dir == DSP_TONODE) {
1784 if (index < MAX_INPUTS(hnode)) {
1785 if (hnode->inputs[index].type == HOSTCONNECT) {
1786 *chan_id = hnode->inputs[index].dev_id;
1791 DBC_ASSERT(dir == DSP_FROMNODE);
1792 if (index < MAX_OUTPUTS(hnode)) {
1793 if (hnode->outputs[index].type == HOSTCONNECT) {
1794 *chan_id = hnode->outputs[index].dev_id;
1803 * ======== node_get_message ========
1805 * Retrieve a message from a node on the DSP.
1807 int node_get_message(struct node_object *hnode,
1808 struct dsp_msg *message, u32 utimeout)
1810 struct node_mgr *hnode_mgr;
1811 enum node_type node_type;
1812 struct bridge_drv_interface *intf_fxns;
1815 struct dsp_processorstate proc_state;
1816 struct proc_object *hprocessor;
1818 DBC_REQUIRE(refs > 0);
1819 DBC_REQUIRE(message != NULL);
1825 hprocessor = hnode->hprocessor;
1826 status = proc_get_state(hprocessor, &proc_state,
1827 sizeof(struct dsp_processorstate));
1830 /* If processor is in error state then don't attempt to get the
1832 if (proc_state.proc_state == PROC_ERROR) {
1836 hnode_mgr = hnode->hnode_mgr;
1837 node_type = node_get_type(hnode);
1838 if (node_type != NODE_MESSAGE && node_type != NODE_TASK &&
1839 node_type != NODE_DAISSOCKET) {
1843 /* This function will block unless a message is available. Since
1844 * DSPNode_RegisterNotify() allows notification when a message
1845 * is available, the system can be designed so that
1846 * DSPNode_GetMessage() is only called when a message is
1848 intf_fxns = hnode_mgr->intf_fxns;
1850 (*intf_fxns->pfn_msg_get) (hnode->msg_queue_obj, message, utimeout);
1851 /* Check if message contains SM descriptor */
1852 if (status || !(message->dw_cmd & DSP_RMSBUFDESC))
1855 /* Translate DSP byte addr to GPP Va. */
1856 tmp_buf = cmm_xlator_translate(hnode->xlator,
1857 (void *)(message->dw_arg1 *
1859 udsp_word_size), CMM_DSPPA2PA);
1860 if (tmp_buf != NULL) {
1861 /* now convert this GPP Pa to Va */
1862 tmp_buf = cmm_xlator_translate(hnode->xlator, tmp_buf,
1864 if (tmp_buf != NULL) {
1865 /* Adjust SM size in msg */
1866 message->dw_arg1 = (u32) tmp_buf;
1867 message->dw_arg2 *= hnode->hnode_mgr->udsp_word_size;
1875 dev_dbg(bridge, "%s: hnode: %p message: %p utimeout: 0x%x\n", __func__,
1876 hnode, message, utimeout);
1881 * ======== node_get_nldr_obj ========
1883 int node_get_nldr_obj(struct node_mgr *hnode_mgr,
1884 struct nldr_object **nldr_ovlyobj)
1887 struct node_mgr *node_mgr_obj = hnode_mgr;
1888 DBC_REQUIRE(nldr_ovlyobj != NULL);
1893 *nldr_ovlyobj = node_mgr_obj->nldr_obj;
1895 DBC_ENSURE(!status || (nldr_ovlyobj != NULL && *nldr_ovlyobj == NULL));
1900 * ======== node_get_strm_mgr ========
1902 * Returns the Stream manager.
1904 int node_get_strm_mgr(struct node_object *hnode,
1905 struct strm_mgr **strm_man)
1909 DBC_REQUIRE(refs > 0);
1914 *strm_man = hnode->hnode_mgr->strm_mgr_obj;
1920 * ======== node_get_load_type ========
1922 enum nldr_loadtype node_get_load_type(struct node_object *hnode)
1924 DBC_REQUIRE(refs > 0);
1927 dev_dbg(bridge, "%s: Failed. hnode: %p\n", __func__, hnode);
1930 return hnode->dcd_props.obj_data.node_obj.us_load_type;
1935 * ======== node_get_timeout ========
1937 * Returns the timeout value for this node.
1939 u32 node_get_timeout(struct node_object *hnode)
1941 DBC_REQUIRE(refs > 0);
1944 dev_dbg(bridge, "%s: failed. hnode: %p\n", __func__, hnode);
1947 return hnode->utimeout;
1952 * ======== node_get_type ========
1954 * Returns the node type.
1956 enum node_type node_get_type(struct node_object *hnode)
1958 enum node_type node_type;
1960 if (hnode == (struct node_object *)DSP_HGPPNODE)
1961 node_type = NODE_GPP;
1966 node_type = hnode->ntype;
1972 * ======== node_init ========
1974 * Initialize the NODE module.
1976 bool node_init(void)
1978 DBC_REQUIRE(refs >= 0);
1986 * ======== node_on_exit ========
1988 * Gets called when RMS_EXIT is received for a node.
1990 void node_on_exit(struct node_object *hnode, s32 node_status)
1995 /* Set node state to done */
1996 NODE_SET_STATE(hnode, NODE_DONE);
1997 hnode->exit_status = node_status;
1998 if (hnode->loaded && hnode->phase_split) {
1999 (void)hnode->hnode_mgr->nldr_fxns.pfn_unload(hnode->
2002 hnode->loaded = false;
2004 /* Unblock call to node_terminate */
2005 (void)sync_set_event(hnode->sync_done);
2006 /* Notify clients */
2007 proc_notify_clients(hnode->hprocessor, DSP_NODESTATECHANGE);
2008 ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
2012 * ======== node_pause ========
2014 * Suspend execution of a node currently running on the DSP.
2016 int node_pause(struct node_object *hnode)
2018 struct node_object *pnode = (struct node_object *)hnode;
2019 enum node_type node_type;
2020 enum node_state state;
2021 struct node_mgr *hnode_mgr;
2024 struct dsp_processorstate proc_state;
2025 struct proc_object *hprocessor;
2027 DBC_REQUIRE(refs > 0);
2032 node_type = node_get_type(hnode);
2033 if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
2039 status = proc_get_processor_id(pnode->hprocessor, &proc_id);
2041 if (proc_id == IVA_UNIT)
2045 hnode_mgr = hnode->hnode_mgr;
2047 /* Enter critical section */
2048 mutex_lock(&hnode_mgr->node_mgr_lock);
2049 state = node_get_state(hnode);
2050 /* Check node state */
2051 if (state != NODE_RUNNING)
2056 hprocessor = hnode->hprocessor;
2057 status = proc_get_state(hprocessor, &proc_state,
2058 sizeof(struct dsp_processorstate));
2061 /* If processor is in error state then don't attempt
2062 to send the message */
2063 if (proc_state.proc_state == PROC_ERROR) {
2068 status = disp_node_change_priority(hnode_mgr->disp_obj, hnode,
2069 hnode_mgr->ul_fxn_addrs[RMSCHANGENODEPRIORITY],
2070 hnode->node_env, NODE_SUSPENDEDPRI);
2074 NODE_SET_STATE(hnode, NODE_PAUSED);
2077 /* End of sync_enter_cs */
2078 /* Leave critical section */
2079 mutex_unlock(&hnode_mgr->node_mgr_lock);
2081 proc_notify_clients(hnode->hprocessor,
2082 DSP_NODESTATECHANGE);
2083 ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
2087 dev_dbg(bridge, "%s: hnode: %p status 0x%x\n", __func__, hnode, status);
2092 * ======== node_put_message ========
2094 * Send a message to a message node, task node, or XDAIS socket node. This
2095 * function will block until the message stream can accommodate the
2096 * message, or a timeout occurs.
2098 int node_put_message(struct node_object *hnode,
2099 const struct dsp_msg *pmsg, u32 utimeout)
2101 struct node_mgr *hnode_mgr = NULL;
2102 enum node_type node_type;
2103 struct bridge_drv_interface *intf_fxns;
2104 enum node_state state;
2107 struct dsp_msg new_msg;
2108 struct dsp_processorstate proc_state;
2109 struct proc_object *hprocessor;
2111 DBC_REQUIRE(refs > 0);
2112 DBC_REQUIRE(pmsg != NULL);
2118 hprocessor = hnode->hprocessor;
2119 status = proc_get_state(hprocessor, &proc_state,
2120 sizeof(struct dsp_processorstate));
2123 /* If processor is in bad state then don't attempt sending the
2125 if (proc_state.proc_state == PROC_ERROR) {
2129 hnode_mgr = hnode->hnode_mgr;
2130 node_type = node_get_type(hnode);
2131 if (node_type != NODE_MESSAGE && node_type != NODE_TASK &&
2132 node_type != NODE_DAISSOCKET)
2136 /* Check node state. Can't send messages to a node after
2137 * we've sent the RMS_EXIT command. There is still the
2138 * possibility that node_terminate can be called after we've
2139 * checked the state. Could add another SYNC object to
2140 * prevent this (can't use node_mgr_lock, since we don't
2141 * want to block other NODE functions). However, the node may
2142 * still exit on its own, before this message is sent. */
2143 mutex_lock(&hnode_mgr->node_mgr_lock);
2144 state = node_get_state(hnode);
2145 if (state == NODE_TERMINATING || state == NODE_DONE)
2148 /* end of sync_enter_cs */
2149 mutex_unlock(&hnode_mgr->node_mgr_lock);
2154 /* assign pmsg values to new msg */
2156 /* Now, check if message contains a SM buffer descriptor */
2157 if (pmsg->dw_cmd & DSP_RMSBUFDESC) {
2158 /* Translate GPP Va to DSP physical buf Ptr. */
2159 tmp_buf = cmm_xlator_translate(hnode->xlator,
2160 (void *)new_msg.dw_arg1,
2162 if (tmp_buf != NULL) {
2163 /* got translation, convert to MAUs in msg */
2164 if (hnode->hnode_mgr->udsp_word_size != 0) {
2167 hnode->hnode_mgr->udsp_word_size;
2169 new_msg.dw_arg2 /= hnode->hnode_mgr->
2172 pr_err("%s: udsp_word_size is zero!\n",
2174 status = -EPERM; /* bad DSPWordSize */
2176 } else { /* failed to translate buffer address */
2181 intf_fxns = hnode_mgr->intf_fxns;
2182 status = (*intf_fxns->pfn_msg_put) (hnode->msg_queue_obj,
2183 &new_msg, utimeout);
2186 dev_dbg(bridge, "%s: hnode: %p pmsg: %p utimeout: 0x%x, "
2187 "status 0x%x\n", __func__, hnode, pmsg, utimeout, status);
2192 * ======== node_register_notify ========
2194 * Register to be notified on specific events for this node.
2196 int node_register_notify(struct node_object *hnode, u32 event_mask,
2198 struct dsp_notification *hnotification)
2200 struct bridge_drv_interface *intf_fxns;
2203 DBC_REQUIRE(refs > 0);
2204 DBC_REQUIRE(hnotification != NULL);
2209 /* Check if event mask is a valid node related event */
2210 if (event_mask & ~(DSP_NODESTATECHANGE | DSP_NODEMESSAGEREADY))
2213 /* Check if notify type is valid */
2214 if (notify_type != DSP_SIGNALEVENT)
2217 /* Only one Notification can be registered at a
2218 * time - Limitation */
2219 if (event_mask == (DSP_NODESTATECHANGE | DSP_NODEMESSAGEREADY))
2223 if (event_mask == DSP_NODESTATECHANGE) {
2224 status = ntfy_register(hnode->ntfy_obj, hnotification,
2225 event_mask & DSP_NODESTATECHANGE,
2228 /* Send Message part of event mask to msg_ctrl */
2229 intf_fxns = hnode->hnode_mgr->intf_fxns;
2230 status = (*intf_fxns->pfn_msg_register_notify)
2231 (hnode->msg_queue_obj,
2232 event_mask & DSP_NODEMESSAGEREADY, notify_type,
2237 dev_dbg(bridge, "%s: hnode: %p event_mask: 0x%x notify_type: 0x%x "
2238 "hnotification: %p status 0x%x\n", __func__, hnode,
2239 event_mask, notify_type, hnotification, status);
2244 * ======== node_run ========
2246 * Start execution of a node's execute phase, or resume execution of a node
2247 * that has been suspended (via NODE_NodePause()) on the DSP. Load the
2248 * node's execute function if necessary.
2250 int node_run(struct node_object *hnode)
2252 struct node_object *pnode = (struct node_object *)hnode;
2253 struct node_mgr *hnode_mgr;
2254 enum node_type node_type;
2255 enum node_state state;
2260 struct bridge_drv_interface *intf_fxns;
2261 struct dsp_processorstate proc_state;
2262 struct proc_object *hprocessor;
2264 DBC_REQUIRE(refs > 0);
2270 hprocessor = hnode->hprocessor;
2271 status = proc_get_state(hprocessor, &proc_state,
2272 sizeof(struct dsp_processorstate));
2275 /* If processor is in error state then don't attempt to run the node */
2276 if (proc_state.proc_state == PROC_ERROR) {
2280 node_type = node_get_type(hnode);
2281 if (node_type == NODE_DEVICE)
2286 hnode_mgr = hnode->hnode_mgr;
2291 intf_fxns = hnode_mgr->intf_fxns;
2292 /* Enter critical section */
2293 mutex_lock(&hnode_mgr->node_mgr_lock);
2295 state = node_get_state(hnode);
2296 if (state != NODE_CREATED && state != NODE_PAUSED)
2300 status = proc_get_processor_id(pnode->hprocessor, &proc_id);
2305 if ((proc_id != DSP_UNIT) && (proc_id != IVA_UNIT))
2308 if (state == NODE_CREATED) {
2309 /* If node's execute function is not loaded, load it */
2310 if (!(hnode->loaded) && hnode->phase_split) {
2312 hnode_mgr->nldr_fxns.pfn_load(hnode->nldr_node_obj,
2315 hnode->loaded = true;
2317 pr_err("%s: fail - load execute code: 0x%x\n",
2322 /* Get address of node's execute function */
2323 if (proc_id == IVA_UNIT)
2324 ul_execute_fxn = (u32) hnode->node_env;
2326 status = get_fxn_address(hnode, &ul_execute_fxn,
2331 ul_fxn_addr = hnode_mgr->ul_fxn_addrs[RMSEXECUTENODE];
2333 disp_node_run(hnode_mgr->disp_obj, hnode,
2334 ul_fxn_addr, ul_execute_fxn,
2337 } else if (state == NODE_PAUSED) {
2338 ul_fxn_addr = hnode_mgr->ul_fxn_addrs[RMSCHANGENODEPRIORITY];
2339 status = disp_node_change_priority(hnode_mgr->disp_obj, hnode,
2340 ul_fxn_addr, hnode->node_env,
2341 NODE_GET_PRIORITY(hnode));
2343 /* We should never get here */
2347 /* Update node state. */
2349 NODE_SET_STATE(hnode, NODE_RUNNING);
2350 else /* Set state back to previous value */
2351 NODE_SET_STATE(hnode, state);
2352 /*End of sync_enter_cs */
2353 /* Exit critical section */
2354 mutex_unlock(&hnode_mgr->node_mgr_lock);
2356 proc_notify_clients(hnode->hprocessor, DSP_NODESTATECHANGE);
2357 ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
2360 dev_dbg(bridge, "%s: hnode: %p status 0x%x\n", __func__, hnode, status);
2365 * ======== node_terminate ========
2367 * Signal a node running on the DSP that it should exit its execute phase
2370 int node_terminate(struct node_object *hnode, int *pstatus)
2372 struct node_object *pnode = (struct node_object *)hnode;
2373 struct node_mgr *hnode_mgr = NULL;
2374 enum node_type node_type;
2375 struct bridge_drv_interface *intf_fxns;
2376 enum node_state state;
2377 struct dsp_msg msg, killmsg;
2379 u32 proc_id, kill_time_out;
2380 struct deh_mgr *hdeh_mgr;
2381 struct dsp_processorstate proc_state;
2383 DBC_REQUIRE(refs > 0);
2384 DBC_REQUIRE(pstatus != NULL);
2386 if (!hnode || !hnode->hnode_mgr) {
2390 if (pnode->hprocessor == NULL) {
2394 status = proc_get_processor_id(pnode->hprocessor, &proc_id);
2397 hnode_mgr = hnode->hnode_mgr;
2398 node_type = node_get_type(hnode);
2399 if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
2403 /* Check node state */
2404 mutex_lock(&hnode_mgr->node_mgr_lock);
2405 state = node_get_state(hnode);
2406 if (state != NODE_RUNNING) {
2408 /* Set the exit status if node terminated on
2410 if (state == NODE_DONE)
2411 *pstatus = hnode->exit_status;
2414 NODE_SET_STATE(hnode, NODE_TERMINATING);
2416 /* end of sync_enter_cs */
2417 mutex_unlock(&hnode_mgr->node_mgr_lock);
2421 * Send exit message. Do not change state to NODE_DONE
2422 * here. That will be done in callback.
2424 status = proc_get_state(pnode->hprocessor, &proc_state,
2425 sizeof(struct dsp_processorstate));
2428 /* If processor is in error state then don't attempt to send
2429 * A kill task command */
2430 if (proc_state.proc_state == PROC_ERROR) {
2435 msg.dw_cmd = RMS_EXIT;
2436 msg.dw_arg1 = hnode->node_env;
2437 killmsg.dw_cmd = RMS_KILLTASK;
2438 killmsg.dw_arg1 = hnode->node_env;
2439 intf_fxns = hnode_mgr->intf_fxns;
2441 if (hnode->utimeout > MAXTIMEOUT)
2442 kill_time_out = MAXTIMEOUT;
2444 kill_time_out = (hnode->utimeout) * 2;
2446 status = (*intf_fxns->pfn_msg_put) (hnode->msg_queue_obj, &msg,
2452 * Wait on synchronization object that will be
2453 * posted in the callback on receiving RMS_EXIT
2454 * message, or by node_delete. Check for valid hnode,
2455 * in case posted by node_delete().
2457 status = sync_wait_on_event(hnode->sync_done,
2459 if (status != ETIME)
2462 status = (*intf_fxns->pfn_msg_put)(hnode->msg_queue_obj,
2463 &killmsg, hnode->utimeout);
2466 status = sync_wait_on_event(hnode->sync_done,
2470 * Here it goes the part of the simulation of
2471 * the DSP exception.
2473 dev_get_deh_mgr(hnode_mgr->hdev_obj, &hdeh_mgr);
2477 bridge_deh_notify(hdeh_mgr, DSP_SYSERROR, DSP_EXCEPTIONABORT);
2482 /* Enter CS before getting exit status, in case node was
2484 mutex_lock(&hnode_mgr->node_mgr_lock);
2485 /* Make sure node wasn't deleted while we blocked */
2489 *pstatus = hnode->exit_status;
2490 dev_dbg(bridge, "%s: hnode: %p env 0x%x status 0x%x\n",
2491 __func__, hnode, hnode->node_env, status);
2493 mutex_unlock(&hnode_mgr->node_mgr_lock);
2494 } /*End of sync_enter_cs */
2500 * ======== delete_node ========
2502 * Free GPP resources allocated in node_allocate() or node_connect().
2504 static void delete_node(struct node_object *hnode,
2505 struct process_context *pr_ctxt)
2507 struct node_mgr *hnode_mgr;
2508 struct bridge_drv_interface *intf_fxns;
2510 enum node_type node_type;
2511 struct stream_chnl stream;
2512 struct node_msgargs node_msg_args;
2513 struct node_taskargs task_arg_obj;
2514 #ifdef DSP_DMM_DEBUG
2515 struct dmm_object *dmm_mgr;
2516 struct proc_object *p_proc_object =
2517 (struct proc_object *)hnode->hprocessor;
2522 hnode_mgr = hnode->hnode_mgr;
2526 node_type = node_get_type(hnode);
2527 if (node_type != NODE_DEVICE) {
2528 node_msg_args = hnode->create_args.asa.node_msg_args;
2529 kfree(node_msg_args.pdata);
2531 /* Free msg_ctrl queue */
2532 if (hnode->msg_queue_obj) {
2533 intf_fxns = hnode_mgr->intf_fxns;
2534 (*intf_fxns->pfn_msg_delete_queue) (hnode->
2536 hnode->msg_queue_obj = NULL;
2539 kfree(hnode->sync_done);
2541 /* Free all stream info */
2542 if (hnode->inputs) {
2543 for (i = 0; i < MAX_INPUTS(hnode); i++) {
2544 stream = hnode->inputs[i];
2545 free_stream(hnode_mgr, stream);
2547 kfree(hnode->inputs);
2548 hnode->inputs = NULL;
2550 if (hnode->outputs) {
2551 for (i = 0; i < MAX_OUTPUTS(hnode); i++) {
2552 stream = hnode->outputs[i];
2553 free_stream(hnode_mgr, stream);
2555 kfree(hnode->outputs);
2556 hnode->outputs = NULL;
2558 task_arg_obj = hnode->create_args.asa.task_arg_obj;
2559 if (task_arg_obj.strm_in_def) {
2560 for (i = 0; i < MAX_INPUTS(hnode); i++) {
2561 kfree(task_arg_obj.strm_in_def[i].sz_device);
2562 task_arg_obj.strm_in_def[i].sz_device = NULL;
2564 kfree(task_arg_obj.strm_in_def);
2565 task_arg_obj.strm_in_def = NULL;
2567 if (task_arg_obj.strm_out_def) {
2568 for (i = 0; i < MAX_OUTPUTS(hnode); i++) {
2569 kfree(task_arg_obj.strm_out_def[i].sz_device);
2570 task_arg_obj.strm_out_def[i].sz_device = NULL;
2572 kfree(task_arg_obj.strm_out_def);
2573 task_arg_obj.strm_out_def = NULL;
2575 if (task_arg_obj.udsp_heap_res_addr) {
2576 status = proc_un_map(hnode->hprocessor, (void *)
2577 task_arg_obj.udsp_heap_addr,
2580 status = proc_un_reserve_memory(hnode->hprocessor,
2585 #ifdef DSP_DMM_DEBUG
2586 status = dmm_get_handle(p_proc_object, &dmm_mgr);
2588 dmm_mem_map_dump(dmm_mgr);
2590 status = DSP_EHANDLE;
2594 if (node_type != NODE_MESSAGE) {
2595 kfree(hnode->stream_connect);
2596 hnode->stream_connect = NULL;
2598 kfree(hnode->pstr_dev_name);
2599 hnode->pstr_dev_name = NULL;
2601 if (hnode->ntfy_obj) {
2602 ntfy_delete(hnode->ntfy_obj);
2603 kfree(hnode->ntfy_obj);
2604 hnode->ntfy_obj = NULL;
2607 /* These were allocated in dcd_get_object_def (via node_allocate) */
2608 kfree(hnode->dcd_props.obj_data.node_obj.pstr_create_phase_fxn);
2609 hnode->dcd_props.obj_data.node_obj.pstr_create_phase_fxn = NULL;
2611 kfree(hnode->dcd_props.obj_data.node_obj.pstr_execute_phase_fxn);
2612 hnode->dcd_props.obj_data.node_obj.pstr_execute_phase_fxn = NULL;
2614 kfree(hnode->dcd_props.obj_data.node_obj.pstr_delete_phase_fxn);
2615 hnode->dcd_props.obj_data.node_obj.pstr_delete_phase_fxn = NULL;
2617 kfree(hnode->dcd_props.obj_data.node_obj.pstr_i_alg_name);
2618 hnode->dcd_props.obj_data.node_obj.pstr_i_alg_name = NULL;
2620 /* Free all SM address translator resources */
2621 kfree(hnode->xlator);
2622 kfree(hnode->nldr_node_obj);
2623 hnode->nldr_node_obj = NULL;
2624 hnode->hnode_mgr = NULL;
2632 * ======== delete_node_mgr ========
2634 * Frees the node manager.
2636 static void delete_node_mgr(struct node_mgr *hnode_mgr)
2638 struct node_object *hnode;
2641 /* Free resources */
2642 if (hnode_mgr->hdcd_mgr)
2643 dcd_destroy_manager(hnode_mgr->hdcd_mgr);
2645 /* Remove any elements remaining in lists */
2646 if (hnode_mgr->node_list) {
2647 while ((hnode = (struct node_object *)
2648 lst_get_head(hnode_mgr->node_list)))
2649 delete_node(hnode, NULL);
2651 DBC_ASSERT(LST_IS_EMPTY(hnode_mgr->node_list));
2652 kfree(hnode_mgr->node_list);
2654 mutex_destroy(&hnode_mgr->node_mgr_lock);
2655 if (hnode_mgr->ntfy_obj) {
2656 ntfy_delete(hnode_mgr->ntfy_obj);
2657 kfree(hnode_mgr->ntfy_obj);
2660 if (hnode_mgr->pipe_map)
2661 gb_delete(hnode_mgr->pipe_map);
2663 if (hnode_mgr->pipe_done_map)
2664 gb_delete(hnode_mgr->pipe_done_map);
2666 if (hnode_mgr->chnl_map)
2667 gb_delete(hnode_mgr->chnl_map);
2669 if (hnode_mgr->dma_chnl_map)
2670 gb_delete(hnode_mgr->dma_chnl_map);
2672 if (hnode_mgr->zc_chnl_map)
2673 gb_delete(hnode_mgr->zc_chnl_map);
2675 if (hnode_mgr->disp_obj)
2676 disp_delete(hnode_mgr->disp_obj);
2678 if (hnode_mgr->strm_mgr_obj)
2679 strm_delete(hnode_mgr->strm_mgr_obj);
2681 /* Delete the loader */
2682 if (hnode_mgr->nldr_obj)
2683 hnode_mgr->nldr_fxns.pfn_delete(hnode_mgr->nldr_obj);
2685 if (hnode_mgr->loader_init)
2686 hnode_mgr->nldr_fxns.pfn_exit();
2693 * ======== fill_stream_connect ========
2695 * Fills stream information.
2697 static void fill_stream_connect(struct node_object *node1,
2698 struct node_object *node2,
2699 u32 stream1, u32 stream2)
2702 struct dsp_streamconnect *strm1 = NULL;
2703 struct dsp_streamconnect *strm2 = NULL;
2704 enum node_type node1_type = NODE_TASK;
2705 enum node_type node2_type = NODE_TASK;
2707 node1_type = node_get_type(node1);
2708 node2_type = node_get_type(node2);
2709 if (node1 != (struct node_object *)DSP_HGPPNODE) {
2711 if (node1_type != NODE_DEVICE) {
2712 strm_index = node1->num_inputs +
2713 node1->num_outputs - 1;
2714 strm1 = &(node1->stream_connect[strm_index]);
2715 strm1->cb_struct = sizeof(struct dsp_streamconnect);
2716 strm1->this_node_stream_index = stream1;
2719 if (node2 != (struct node_object *)DSP_HGPPNODE) {
2720 /* NODE == > NODE */
2721 if (node1_type != NODE_DEVICE) {
2722 strm1->connected_node = node2;
2723 strm1->ui_connected_node_id = node2->node_uuid;
2724 strm1->connected_node_stream_index = stream2;
2725 strm1->connect_type = CONNECTTYPE_NODEOUTPUT;
2727 if (node2_type != NODE_DEVICE) {
2728 strm_index = node2->num_inputs +
2729 node2->num_outputs - 1;
2730 strm2 = &(node2->stream_connect[strm_index]);
2732 sizeof(struct dsp_streamconnect);
2733 strm2->this_node_stream_index = stream2;
2734 strm2->connected_node = node1;
2735 strm2->ui_connected_node_id = node1->node_uuid;
2736 strm2->connected_node_stream_index = stream1;
2737 strm2->connect_type = CONNECTTYPE_NODEINPUT;
2739 } else if (node1_type != NODE_DEVICE)
2740 strm1->connect_type = CONNECTTYPE_GPPOUTPUT;
2743 DBC_ASSERT(node2 != (struct node_object *)DSP_HGPPNODE);
2744 strm_index = node2->num_inputs + node2->num_outputs - 1;
2745 strm2 = &(node2->stream_connect[strm_index]);
2746 strm2->cb_struct = sizeof(struct dsp_streamconnect);
2747 strm2->this_node_stream_index = stream2;
2748 strm2->connect_type = CONNECTTYPE_GPPINPUT;
2753 * ======== fill_stream_def ========
2755 * Fills Stream attributes.
2757 static void fill_stream_def(struct node_object *hnode,
2758 struct node_strmdef *pstrm_def,
2759 struct dsp_strmattr *pattrs)
2761 struct node_mgr *hnode_mgr = hnode->hnode_mgr;
2763 if (pattrs != NULL) {
2764 pstrm_def->num_bufs = pattrs->num_bufs;
2765 pstrm_def->buf_size =
2766 pattrs->buf_size / hnode_mgr->udsp_data_mau_size;
2767 pstrm_def->seg_id = pattrs->seg_id;
2768 pstrm_def->buf_alignment = pattrs->buf_alignment;
2769 pstrm_def->utimeout = pattrs->utimeout;
2771 pstrm_def->num_bufs = DEFAULTNBUFS;
2772 pstrm_def->buf_size =
2773 DEFAULTBUFSIZE / hnode_mgr->udsp_data_mau_size;
2774 pstrm_def->seg_id = DEFAULTSEGID;
2775 pstrm_def->buf_alignment = DEFAULTALIGNMENT;
2776 pstrm_def->utimeout = DEFAULTTIMEOUT;
2781 * ======== free_stream ========
2783 * Updates the channel mask and frees the pipe id.
2785 static void free_stream(struct node_mgr *hnode_mgr, struct stream_chnl stream)
2787 /* Free up the pipe id unless other node has not yet been deleted. */
2788 if (stream.type == NODECONNECT) {
2789 if (gb_test(hnode_mgr->pipe_done_map, stream.dev_id)) {
2790 /* The other node has already been deleted */
2791 gb_clear(hnode_mgr->pipe_done_map, stream.dev_id);
2792 gb_clear(hnode_mgr->pipe_map, stream.dev_id);
2794 /* The other node has not been deleted yet */
2795 gb_set(hnode_mgr->pipe_done_map, stream.dev_id);
2797 } else if (stream.type == HOSTCONNECT) {
2798 if (stream.dev_id < hnode_mgr->ul_num_chnls) {
2799 gb_clear(hnode_mgr->chnl_map, stream.dev_id);
2800 } else if (stream.dev_id < (2 * hnode_mgr->ul_num_chnls)) {
2802 gb_clear(hnode_mgr->dma_chnl_map, stream.dev_id -
2803 (1 * hnode_mgr->ul_num_chnls));
2804 } else if (stream.dev_id < (3 * hnode_mgr->ul_num_chnls)) {
2806 gb_clear(hnode_mgr->zc_chnl_map, stream.dev_id -
2807 (2 * hnode_mgr->ul_num_chnls));
2813 * ======== get_fxn_address ========
2815 * Retrieves the address for create, execute or delete phase for a node.
2817 static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
2820 char *pstr_fxn_name = NULL;
2821 struct node_mgr *hnode_mgr = hnode->hnode_mgr;
2823 DBC_REQUIRE(node_get_type(hnode) == NODE_TASK ||
2824 node_get_type(hnode) == NODE_DAISSOCKET ||
2825 node_get_type(hnode) == NODE_MESSAGE);
2830 hnode->dcd_props.obj_data.node_obj.pstr_create_phase_fxn;
2834 hnode->dcd_props.obj_data.node_obj.pstr_execute_phase_fxn;
2838 hnode->dcd_props.obj_data.node_obj.pstr_delete_phase_fxn;
2841 /* Should never get here */
2847 hnode_mgr->nldr_fxns.pfn_get_fxn_addr(hnode->nldr_node_obj,
2848 pstr_fxn_name, fxn_addr);
2854 * ======== get_node_info ========
2856 * Retrieves the node information.
2858 void get_node_info(struct node_object *hnode, struct dsp_nodeinfo *node_info)
2863 DBC_REQUIRE(node_info != NULL);
2865 node_info->cb_struct = sizeof(struct dsp_nodeinfo);
2866 node_info->nb_node_database_props =
2867 hnode->dcd_props.obj_data.node_obj.ndb_props;
2868 node_info->execution_priority = hnode->prio;
2869 node_info->device_owner = hnode->device_owner;
2870 node_info->number_streams = hnode->num_inputs + hnode->num_outputs;
2871 node_info->node_env = hnode->node_env;
2873 node_info->ns_execution_state = node_get_state(hnode);
2875 /* Copy stream connect data */
2876 for (i = 0; i < hnode->num_inputs + hnode->num_outputs; i++)
2877 node_info->sc_stream_connection[i] = hnode->stream_connect[i];
2882 * ======== get_node_props ========
2884 * Retrieve node properties.
2886 static int get_node_props(struct dcd_manager *hdcd_mgr,
2887 struct node_object *hnode,
2888 const struct dsp_uuid *node_uuid,
2889 struct dcd_genericobj *dcd_prop)
2892 struct node_msgargs *pmsg_args;
2893 struct node_taskargs *task_arg_obj;
2894 enum node_type node_type = NODE_TASK;
2895 struct dsp_ndbprops *pndb_props =
2896 &(dcd_prop->obj_data.node_obj.ndb_props);
2898 char sz_uuid[MAXUUIDLEN];
2900 status = dcd_get_object_def(hdcd_mgr, (struct dsp_uuid *)node_uuid,
2901 DSP_DCDNODETYPE, dcd_prop);
2904 hnode->ntype = node_type = pndb_props->ntype;
2906 /* Create UUID value to set in registry. */
2907 uuid_uuid_to_string((struct dsp_uuid *)node_uuid, sz_uuid,
2909 dev_dbg(bridge, "(node) UUID: %s\n", sz_uuid);
2911 /* Fill in message args that come from NDB */
2912 if (node_type != NODE_DEVICE) {
2913 pmsg_args = &(hnode->create_args.asa.node_msg_args);
2915 dcd_prop->obj_data.node_obj.msg_segid;
2916 pmsg_args->notify_type =
2917 dcd_prop->obj_data.node_obj.msg_notify_type;
2918 pmsg_args->max_msgs = pndb_props->message_depth;
2919 dev_dbg(bridge, "(node) Max Number of Messages: 0x%x\n",
2920 pmsg_args->max_msgs);
2922 /* Copy device name */
2923 DBC_REQUIRE(pndb_props->ac_name);
2924 len = strlen(pndb_props->ac_name);
2925 DBC_ASSERT(len < MAXDEVNAMELEN);
2926 hnode->pstr_dev_name = kzalloc(len + 1, GFP_KERNEL);
2927 if (hnode->pstr_dev_name == NULL) {
2930 strncpy(hnode->pstr_dev_name,
2931 pndb_props->ac_name, len);
2936 /* Fill in create args that come from NDB */
2937 if (node_type == NODE_TASK || node_type == NODE_DAISSOCKET) {
2938 task_arg_obj = &(hnode->create_args.asa.task_arg_obj);
2939 task_arg_obj->prio = pndb_props->prio;
2940 task_arg_obj->stack_size = pndb_props->stack_size;
2941 task_arg_obj->sys_stack_size =
2942 pndb_props->sys_stack_size;
2943 task_arg_obj->stack_seg = pndb_props->stack_seg;
2944 dev_dbg(bridge, "(node) Priority: 0x%x Stack Size: "
2945 "0x%x words System Stack Size: 0x%x words "
2946 "Stack Segment: 0x%x profile count : 0x%x\n",
2947 task_arg_obj->prio, task_arg_obj->stack_size,
2948 task_arg_obj->sys_stack_size,
2949 task_arg_obj->stack_seg,
2950 pndb_props->count_profiles);
2958 * ======== get_proc_props ========
2960 * Retrieve the processor properties.
2962 static int get_proc_props(struct node_mgr *hnode_mgr,
2963 struct dev_object *hdev_obj)
2965 struct cfg_hostres *host_res;
2966 struct bridge_dev_context *pbridge_context;
2969 status = dev_get_bridge_context(hdev_obj, &pbridge_context);
2970 if (!pbridge_context)
2974 host_res = pbridge_context->resources;
2977 hnode_mgr->ul_chnl_offset = host_res->dw_chnl_offset;
2978 hnode_mgr->ul_chnl_buf_size = host_res->dw_chnl_buf_size;
2979 hnode_mgr->ul_num_chnls = host_res->dw_num_chnls;
2982 * PROC will add an API to get dsp_processorinfo.
2983 * Fill in default values for now.
2985 /* TODO -- Instead of hard coding, take from registry */
2986 hnode_mgr->proc_family = 6000;
2987 hnode_mgr->proc_type = 6410;
2988 hnode_mgr->min_pri = DSP_NODE_MIN_PRIORITY;
2989 hnode_mgr->max_pri = DSP_NODE_MAX_PRIORITY;
2990 hnode_mgr->udsp_word_size = DSPWORDSIZE;
2991 hnode_mgr->udsp_data_mau_size = DSPWORDSIZE;
2992 hnode_mgr->udsp_mau_size = 1;
2999 * ======== node_get_uuid_props ========
3001 * Fetch Node UUID properties from DCD/DOF file.
3003 int node_get_uuid_props(void *hprocessor,
3004 const struct dsp_uuid *node_uuid,
3005 struct dsp_ndbprops *node_props)
3007 struct node_mgr *hnode_mgr = NULL;
3008 struct dev_object *hdev_obj;
3010 struct dcd_nodeprops dcd_node_props;
3011 struct dsp_processorstate proc_state;
3013 DBC_REQUIRE(refs > 0);
3014 DBC_REQUIRE(hprocessor != NULL);
3015 DBC_REQUIRE(node_uuid != NULL);
3017 if (hprocessor == NULL || node_uuid == NULL) {
3021 status = proc_get_state(hprocessor, &proc_state,
3022 sizeof(struct dsp_processorstate));
3025 /* If processor is in error state then don't attempt
3026 to send the message */
3027 if (proc_state.proc_state == PROC_ERROR) {
3032 status = proc_get_dev_object(hprocessor, &hdev_obj);
3034 status = dev_get_node_manager(hdev_obj, &hnode_mgr);
3035 if (hnode_mgr == NULL) {
3042 * Enter the critical section. This is needed because
3043 * dcd_get_object_def will ultimately end up calling dbll_open/close,
3044 * which needs to be protected in order to not corrupt the zlib manager
3047 mutex_lock(&hnode_mgr->node_mgr_lock);
3049 dcd_node_props.pstr_create_phase_fxn = NULL;
3050 dcd_node_props.pstr_execute_phase_fxn = NULL;
3051 dcd_node_props.pstr_delete_phase_fxn = NULL;
3052 dcd_node_props.pstr_i_alg_name = NULL;
3054 status = dcd_get_object_def(hnode_mgr->hdcd_mgr,
3055 (struct dsp_uuid *)node_uuid, DSP_DCDNODETYPE,
3056 (struct dcd_genericobj *)&dcd_node_props);
3059 *node_props = dcd_node_props.ndb_props;
3060 kfree(dcd_node_props.pstr_create_phase_fxn);
3062 kfree(dcd_node_props.pstr_execute_phase_fxn);
3064 kfree(dcd_node_props.pstr_delete_phase_fxn);
3066 kfree(dcd_node_props.pstr_i_alg_name);
3068 /* Leave the critical section, we're done. */
3069 mutex_unlock(&hnode_mgr->node_mgr_lock);
3075 * ======== get_rms_fxns ========
3077 * Retrieve the RMS functions.
3079 static int get_rms_fxns(struct node_mgr *hnode_mgr)
3082 struct dev_object *dev_obj = hnode_mgr->hdev_obj;
3085 static char *psz_fxns[NUMRMSFXNS] = {
3086 "RMS_queryServer", /* RMSQUERYSERVER */
3087 "RMS_configureServer", /* RMSCONFIGURESERVER */
3088 "RMS_createNode", /* RMSCREATENODE */
3089 "RMS_executeNode", /* RMSEXECUTENODE */
3090 "RMS_deleteNode", /* RMSDELETENODE */
3091 "RMS_changeNodePriority", /* RMSCHANGENODEPRIORITY */
3092 "RMS_readMemory", /* RMSREADMEMORY */
3093 "RMS_writeMemory", /* RMSWRITEMEMORY */
3094 "RMS_copy", /* RMSCOPY */
3097 for (i = 0; i < NUMRMSFXNS; i++) {
3098 status = dev_get_symbol(dev_obj, psz_fxns[i],
3099 &(hnode_mgr->ul_fxn_addrs[i]));
3101 if (status == -ESPIPE) {
3103 * May be loaded dynamically (in the future),
3104 * but return an error for now.
3106 dev_dbg(bridge, "%s: RMS function: %s currently"
3107 " not loaded\n", __func__, psz_fxns[i]);
3109 dev_dbg(bridge, "%s: Symbol not found: %s "
3110 "status = 0x%x\n", __func__,
3111 psz_fxns[i], status);
3121 * ======== ovly ========
3123 * Called during overlay.Sends command to RMS to copy a block of data.
3125 static u32 ovly(void *priv_ref, u32 dsp_run_addr, u32 dsp_load_addr,
3126 u32 ul_num_bytes, u32 mem_space)
3128 struct node_object *hnode = (struct node_object *)priv_ref;
3129 struct node_mgr *hnode_mgr;
3134 struct bridge_dev_context *hbridge_context;
3135 /* Function interface to Bridge driver*/
3136 struct bridge_drv_interface *intf_fxns;
3140 hnode_mgr = hnode->hnode_mgr;
3142 ul_size = ul_num_bytes / hnode_mgr->udsp_word_size;
3143 ul_timeout = hnode->utimeout;
3145 /* Call new MemCopy function */
3146 intf_fxns = hnode_mgr->intf_fxns;
3147 status = dev_get_bridge_context(hnode_mgr->hdev_obj, &hbridge_context);
3150 (*intf_fxns->pfn_brd_mem_copy) (hbridge_context,
3151 dsp_run_addr, dsp_load_addr,
3152 ul_num_bytes, (u32) mem_space);
3154 ul_bytes = ul_num_bytes;
3156 pr_debug("%s: failed to copy brd memory, status 0x%x\n",
3159 pr_debug("%s: failed to get Bridge context, status 0x%x\n",
3167 * ======== mem_write ========
3169 static u32 mem_write(void *priv_ref, u32 dsp_add, void *pbuf,
3170 u32 ul_num_bytes, u32 mem_space)
3172 struct node_object *hnode = (struct node_object *)priv_ref;
3173 struct node_mgr *hnode_mgr;
3177 struct bridge_dev_context *hbridge_context;
3178 /* Function interface to Bridge driver */
3179 struct bridge_drv_interface *intf_fxns;
3182 DBC_REQUIRE(mem_space & DBLL_CODE || mem_space & DBLL_DATA);
3184 hnode_mgr = hnode->hnode_mgr;
3186 ul_timeout = hnode->utimeout;
3187 mem_sect_type = (mem_space & DBLL_CODE) ? RMS_CODE : RMS_DATA;
3189 /* Call new MemWrite function */
3190 intf_fxns = hnode_mgr->intf_fxns;
3191 status = dev_get_bridge_context(hnode_mgr->hdev_obj, &hbridge_context);
3192 status = (*intf_fxns->pfn_brd_mem_write) (hbridge_context, pbuf,
3193 dsp_add, ul_num_bytes, mem_sect_type);
3195 return ul_num_bytes;
3198 #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
3200 * ======== node_find_addr ========
3202 int node_find_addr(struct node_mgr *node_mgr, u32 sym_addr,
3203 u32 offset_range, void *sym_addr_output, char *sym_name)
3205 struct node_object *node_obj;
3206 int status = -ENOENT;
3209 pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x, %s)\n", __func__,
3210 (unsigned int) node_mgr,
3211 sym_addr, offset_range,
3212 (unsigned int) sym_addr_output, sym_name);
3214 node_obj = (struct node_object *)(node_mgr->node_list->head.next);
3216 for (n = 0; n < node_mgr->num_nodes; n++) {
3217 status = nldr_find_addr(node_obj->nldr_node_obj, sym_addr,
3218 offset_range, sym_addr_output, sym_name);
3223 node_obj = (struct node_object *) (node_obj->list_elem.next);