]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/staging/tidspbridge/rmgr/proc.c
staging: tidspbridge - rename bridge_brd_mem_map/unmap to a proper name
[net-next-2.6.git] / drivers / staging / tidspbridge / rmgr / proc.c
CommitLineData
7d55524d
ORL
1/*
2 * proc.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Processor interface at the driver level.
7 *
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
2094f12d 19#include <linux/types.h>
7d55524d
ORL
20/* ------------------------------------ Host OS */
21#include <linux/dma-mapping.h>
22#include <linux/scatterlist.h>
23#include <dspbridge/host_os.h>
24
25/* ----------------------------------- DSP/BIOS Bridge */
7d55524d
ORL
26#include <dspbridge/dbdefs.h>
27
28/* ----------------------------------- Trace & Debug */
29#include <dspbridge/dbc.h>
30
31/* ----------------------------------- OS Adaptation Layer */
7d55524d
ORL
32#include <dspbridge/list.h>
33#include <dspbridge/ntfy.h>
34#include <dspbridge/sync.h>
35/* ----------------------------------- Bridge Driver */
36#include <dspbridge/dspdefs.h>
37#include <dspbridge/dspdeh.h>
38/* ----------------------------------- Platform Manager */
39#include <dspbridge/cod.h>
40#include <dspbridge/dev.h>
41#include <dspbridge/procpriv.h>
42#include <dspbridge/dmm.h>
43
44/* ----------------------------------- Resource Manager */
45#include <dspbridge/mgr.h>
46#include <dspbridge/node.h>
47#include <dspbridge/nldr.h>
48#include <dspbridge/rmm.h>
49
50/* ----------------------------------- Others */
51#include <dspbridge/dbdcd.h>
52#include <dspbridge/msg.h>
53#include <dspbridge/dspioctl.h>
54#include <dspbridge/drv.h>
4dd1944a 55#include <_tiomap.h>
7d55524d
ORL
56
57/* ----------------------------------- This */
58#include <dspbridge/proc.h>
59#include <dspbridge/pwr.h>
60
61#include <dspbridge/resourcecleanup.h>
62/* ----------------------------------- Defines, Data Structures, Typedefs */
63#define MAXCMDLINELEN 255
64#define PROC_ENVPROCID "PROC_ID=%d"
65#define MAXPROCIDLEN (8 + 5)
66#define PROC_DFLT_TIMEOUT 10000 /* Time out in milliseconds */
67#define PWR_TIMEOUT 500 /* Sleep/wake timout in msec */
68#define EXTEND "_EXT_END" /* Extmem end addr in DSP binary */
69
70#define DSP_CACHE_LINE 128
71
72#define BUFMODE_MASK (3 << 14)
73
74/* Buffer modes from DSP perspective */
75#define RBUF 0x4000 /* Input buffer */
76#define WBUF 0x8000 /* Output Buffer */
77
78extern struct device *bridge;
79
80/* ----------------------------------- Globals */
81
82/* The proc_object structure. */
83struct proc_object {
84 struct list_head link; /* Link to next proc_object */
85 struct dev_object *hdev_obj; /* Device this PROC represents */
86 u32 process; /* Process owning this Processor */
87 struct mgr_object *hmgr_obj; /* Manager Object Handle */
88 u32 attach_count; /* Processor attach count */
89 u32 processor_id; /* Processor number */
90 u32 utimeout; /* Time out count */
91 enum dsp_procstate proc_state; /* Processor state */
92 u32 ul_unit; /* DDSP unit number */
93 bool is_already_attached; /*
94 * True if the Device below has
95 * GPP Client attached
96 */
97 struct ntfy_object *ntfy_obj; /* Manages notifications */
98 /* Bridge Context Handle */
99 struct bridge_dev_context *hbridge_context;
100 /* Function interface to Bridge driver */
101 struct bridge_drv_interface *intf_fxns;
102 char *psz_last_coff;
103 struct list_head proc_list;
104};
105
106static u32 refs;
107
108DEFINE_MUTEX(proc_lock); /* For critical sections */
109
110/* ----------------------------------- Function Prototypes */
c8c1ad8c 111static int proc_monitor(struct proc_object *proc_obj);
7d55524d
ORL
112static s32 get_envp_count(char **envp);
113static char **prepend_envp(char **new_envp, char **envp, s32 envp_elems,
0cd343a4 114 s32 cnew_envp, char *sz_var);
7d55524d
ORL
115
116/* remember mapping information */
117static struct dmm_map_object *add_mapping_info(struct process_context *pr_ctxt,
118 u32 mpu_addr, u32 dsp_addr, u32 size)
119{
120 struct dmm_map_object *map_obj;
121
122 u32 num_usr_pgs = size / PG_SIZE4K;
123
124 pr_debug("%s: adding map info: mpu_addr 0x%x virt 0x%x size 0x%x\n",
125 __func__, mpu_addr,
126 dsp_addr, size);
127
128 map_obj = kzalloc(sizeof(struct dmm_map_object), GFP_KERNEL);
129 if (!map_obj) {
130 pr_err("%s: kzalloc failed\n", __func__);
131 return NULL;
132 }
133 INIT_LIST_HEAD(&map_obj->link);
134
135 map_obj->pages = kcalloc(num_usr_pgs, sizeof(struct page *),
136 GFP_KERNEL);
137 if (!map_obj->pages) {
138 pr_err("%s: kzalloc failed\n", __func__);
139 kfree(map_obj);
140 return NULL;
141 }
142
143 map_obj->mpu_addr = mpu_addr;
144 map_obj->dsp_addr = dsp_addr;
145 map_obj->size = size;
146 map_obj->num_usr_pgs = num_usr_pgs;
147
148 spin_lock(&pr_ctxt->dmm_map_lock);
149 list_add(&map_obj->link, &pr_ctxt->dmm_map_list);
150 spin_unlock(&pr_ctxt->dmm_map_lock);
151
152 return map_obj;
153}
154
155static int match_exact_map_obj(struct dmm_map_object *map_obj,
156 u32 dsp_addr, u32 size)
157{
158 if (map_obj->dsp_addr == dsp_addr && map_obj->size != size)
159 pr_err("%s: addr match (0x%x), size don't (0x%x != 0x%x)\n",
160 __func__, dsp_addr, map_obj->size, size);
161
162 return map_obj->dsp_addr == dsp_addr &&
163 map_obj->size == size;
164}
165
166static void remove_mapping_information(struct process_context *pr_ctxt,
167 u32 dsp_addr, u32 size)
168{
169 struct dmm_map_object *map_obj;
170
171 pr_debug("%s: looking for virt 0x%x size 0x%x\n", __func__,
172 dsp_addr, size);
173
174 spin_lock(&pr_ctxt->dmm_map_lock);
175 list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) {
176 pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n",
177 __func__,
178 map_obj->mpu_addr,
179 map_obj->dsp_addr,
180 map_obj->size);
181
182 if (match_exact_map_obj(map_obj, dsp_addr, size)) {
183 pr_debug("%s: match, deleting map info\n", __func__);
184 list_del(&map_obj->link);
185 kfree(map_obj->dma_info.sg);
186 kfree(map_obj->pages);
187 kfree(map_obj);
188 goto out;
189 }
190 pr_debug("%s: candidate didn't match\n", __func__);
191 }
192
193 pr_err("%s: failed to find given map info\n", __func__);
194out:
195 spin_unlock(&pr_ctxt->dmm_map_lock);
196}
197
198static int match_containing_map_obj(struct dmm_map_object *map_obj,
199 u32 mpu_addr, u32 size)
200{
201 u32 map_obj_end = map_obj->mpu_addr + map_obj->size;
202
203 return mpu_addr >= map_obj->mpu_addr &&
204 mpu_addr + size <= map_obj_end;
205}
206
207static struct dmm_map_object *find_containing_mapping(
208 struct process_context *pr_ctxt,
209 u32 mpu_addr, u32 size)
210{
211 struct dmm_map_object *map_obj;
212 pr_debug("%s: looking for mpu_addr 0x%x size 0x%x\n", __func__,
213 mpu_addr, size);
214
215 spin_lock(&pr_ctxt->dmm_map_lock);
216 list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) {
217 pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n",
218 __func__,
219 map_obj->mpu_addr,
220 map_obj->dsp_addr,
221 map_obj->size);
222 if (match_containing_map_obj(map_obj, mpu_addr, size)) {
223 pr_debug("%s: match!\n", __func__);
224 goto out;
225 }
226
227 pr_debug("%s: no match!\n", __func__);
228 }
229
230 map_obj = NULL;
231out:
232 spin_unlock(&pr_ctxt->dmm_map_lock);
233 return map_obj;
234}
235
236static int find_first_page_in_cache(struct dmm_map_object *map_obj,
237 unsigned long mpu_addr)
238{
239 u32 mapped_base_page = map_obj->mpu_addr >> PAGE_SHIFT;
240 u32 requested_base_page = mpu_addr >> PAGE_SHIFT;
241 int pg_index = requested_base_page - mapped_base_page;
242
243 if (pg_index < 0 || pg_index >= map_obj->num_usr_pgs) {
244 pr_err("%s: failed (got %d)\n", __func__, pg_index);
245 return -1;
246 }
247
248 pr_debug("%s: first page is %d\n", __func__, pg_index);
249 return pg_index;
250}
251
252static inline struct page *get_mapping_page(struct dmm_map_object *map_obj,
253 int pg_i)
254{
255 pr_debug("%s: looking for pg_i %d, num_usr_pgs: %d\n", __func__,
256 pg_i, map_obj->num_usr_pgs);
257
258 if (pg_i < 0 || pg_i >= map_obj->num_usr_pgs) {
259 pr_err("%s: requested pg_i %d is out of mapped range\n",
260 __func__, pg_i);
261 return NULL;
262 }
263
264 return map_obj->pages[pg_i];
265}
266
267/*
268 * ======== proc_attach ========
269 * Purpose:
270 * Prepare for communication with a particular DSP processor, and return
271 * a handle to the processor object.
272 */
273int
274proc_attach(u32 processor_id,
21aaf42e 275 const struct dsp_processorattrin *attr_in,
7d55524d
ORL
276 void **ph_processor, struct process_context *pr_ctxt)
277{
278 int status = 0;
279 struct dev_object *hdev_obj;
280 struct proc_object *p_proc_object = NULL;
281 struct mgr_object *hmgr_obj = NULL;
282 struct drv_object *hdrv_obj = NULL;
73b87a91 283 struct drv_data *drv_datap = dev_get_drvdata(bridge);
7d55524d
ORL
284 u8 dev_type;
285
286 DBC_REQUIRE(refs > 0);
287 DBC_REQUIRE(ph_processor != NULL);
288
289 if (pr_ctxt->hprocessor) {
290 *ph_processor = pr_ctxt->hprocessor;
291 return status;
292 }
293
294 /* Get the Driver and Manager Object Handles */
73b87a91
IGC
295 if (!drv_datap || !drv_datap->drv_object || !drv_datap->mgr_object) {
296 status = -ENODATA;
297 pr_err("%s: Failed to get object handles\n", __func__);
298 } else {
299 hdrv_obj = drv_datap->drv_object;
300 hmgr_obj = drv_datap->mgr_object;
301 }
7d55524d 302
a741ea6e 303 if (!status) {
7d55524d
ORL
304 /* Get the Device Object */
305 status = drv_get_dev_object(processor_id, hdrv_obj, &hdev_obj);
306 }
a741ea6e 307 if (!status)
7d55524d
ORL
308 status = dev_get_dev_type(hdev_obj, &dev_type);
309
b66e0986 310 if (status)
7d55524d
ORL
311 goto func_end;
312
313 /* If we made it this far, create the Proceesor object: */
314 p_proc_object = kzalloc(sizeof(struct proc_object), GFP_KERNEL);
315 /* Fill out the Processor Object: */
316 if (p_proc_object == NULL) {
317 status = -ENOMEM;
318 goto func_end;
319 }
320 p_proc_object->hdev_obj = hdev_obj;
321 p_proc_object->hmgr_obj = hmgr_obj;
322 p_proc_object->processor_id = dev_type;
323 /* Store TGID instead of process handle */
324 p_proc_object->process = current->tgid;
325
326 INIT_LIST_HEAD(&p_proc_object->proc_list);
327
328 if (attr_in)
329 p_proc_object->utimeout = attr_in->utimeout;
330 else
331 p_proc_object->utimeout = PROC_DFLT_TIMEOUT;
332
333 status = dev_get_intf_fxns(hdev_obj, &p_proc_object->intf_fxns);
a741ea6e 334 if (!status) {
7d55524d
ORL
335 status = dev_get_bridge_context(hdev_obj,
336 &p_proc_object->hbridge_context);
b66e0986 337 if (status)
7d55524d
ORL
338 kfree(p_proc_object);
339 } else
340 kfree(p_proc_object);
341
b66e0986 342 if (status)
7d55524d
ORL
343 goto func_end;
344
345 /* Create the Notification Object */
346 /* This is created with no event mask, no notify mask
347 * and no valid handle to the notification. They all get
348 * filled up when proc_register_notify is called */
349 p_proc_object->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
350 GFP_KERNEL);
351 if (p_proc_object->ntfy_obj)
352 ntfy_init(p_proc_object->ntfy_obj);
353 else
354 status = -ENOMEM;
355
a741ea6e 356 if (!status) {
7d55524d
ORL
357 /* Insert the Processor Object into the DEV List.
358 * Return handle to this Processor Object:
359 * Find out if the Device is already attached to a
360 * Processor. If so, return AlreadyAttached status */
361 lst_init_elem(&p_proc_object->link);
362 status = dev_insert_proc_object(p_proc_object->hdev_obj,
363 (u32) p_proc_object,
364 &p_proc_object->
365 is_already_attached);
a741ea6e 366 if (!status) {
7d55524d
ORL
367 if (p_proc_object->is_already_attached)
368 status = 0;
369 } else {
370 if (p_proc_object->ntfy_obj) {
371 ntfy_delete(p_proc_object->ntfy_obj);
372 kfree(p_proc_object->ntfy_obj);
373 }
374
375 kfree(p_proc_object);
376 }
a741ea6e 377 if (!status) {
7d55524d
ORL
378 *ph_processor = (void *)p_proc_object;
379 pr_ctxt->hprocessor = *ph_processor;
380 (void)proc_notify_clients(p_proc_object,
381 DSP_PROCESSORATTACH);
382 }
383 } else {
b66e0986 384 /* Don't leak memory if status is failed */
7d55524d
ORL
385 kfree(p_proc_object);
386 }
387func_end:
388 DBC_ENSURE((status == -EPERM && *ph_processor == NULL) ||
a741ea6e 389 (!status && p_proc_object) ||
7d55524d
ORL
390 (status == 0 && p_proc_object));
391
392 return status;
393}
394
395static int get_exec_file(struct cfg_devnode *dev_node_obj,
396 struct dev_object *hdev_obj,
b301c858 397 u32 size, char *exec_file)
7d55524d
ORL
398{
399 u8 dev_type;
400 s32 len;
315a1a20 401 struct drv_data *drv_datap = dev_get_drvdata(bridge);
7d55524d
ORL
402
403 dev_get_dev_type(hdev_obj, (u8 *) &dev_type);
315a1a20
IGC
404
405 if (!exec_file)
406 return -EFAULT;
407
7d55524d 408 if (dev_type == DSP_UNIT) {
315a1a20
IGC
409 if (!drv_datap || !drv_datap->base_img)
410 return -EFAULT;
411
412 if (strlen(drv_datap->base_img) > size)
413 return -EINVAL;
414
415 strcpy(exec_file, drv_datap->base_img);
416 } else if (dev_type == IVA_UNIT && iva_img) {
417 len = strlen(iva_img);
418 strncpy(exec_file, iva_img, len + 1);
419 } else {
420 return -ENOENT;
7d55524d 421 }
315a1a20
IGC
422
423 return 0;
7d55524d
ORL
424}
425
426/*
427 * ======== proc_auto_start ======== =
428 * Purpose:
429 * A Particular device gets loaded with the default image
430 * if the AutoStart flag is set.
431 * Parameters:
432 * hdev_obj: Handle to the Device
433 * Returns:
434 * 0: On Successful Loading
435 * -EPERM General Failure
436 * Requires:
437 * hdev_obj != NULL
438 * Ensures:
439 */
440int proc_auto_start(struct cfg_devnode *dev_node_obj,
441 struct dev_object *hdev_obj)
442{
443 int status = -EPERM;
444 struct proc_object *p_proc_object;
445 char sz_exec_file[MAXCMDLINELEN];
446 char *argv[2];
447 struct mgr_object *hmgr_obj = NULL;
73b87a91 448 struct drv_data *drv_datap = dev_get_drvdata(bridge);
7d55524d
ORL
449 u8 dev_type;
450
451 DBC_REQUIRE(refs > 0);
452 DBC_REQUIRE(dev_node_obj != NULL);
453 DBC_REQUIRE(hdev_obj != NULL);
454
455 /* Create a Dummy PROC Object */
73b87a91
IGC
456 if (!drv_datap || !drv_datap->mgr_object) {
457 status = -ENODATA;
458 pr_err("%s: Failed to retrieve the object handle\n", __func__);
7d55524d 459 goto func_end;
73b87a91
IGC
460 } else {
461 hmgr_obj = drv_datap->mgr_object;
462 }
7d55524d
ORL
463
464 p_proc_object = kzalloc(sizeof(struct proc_object), GFP_KERNEL);
465 if (p_proc_object == NULL) {
466 status = -ENOMEM;
467 goto func_end;
468 }
469 p_proc_object->hdev_obj = hdev_obj;
470 p_proc_object->hmgr_obj = hmgr_obj;
471 status = dev_get_intf_fxns(hdev_obj, &p_proc_object->intf_fxns);
a741ea6e 472 if (!status)
7d55524d
ORL
473 status = dev_get_bridge_context(hdev_obj,
474 &p_proc_object->hbridge_context);
b66e0986 475 if (status)
7d55524d
ORL
476 goto func_cont;
477
478 /* Stop the Device, put it into standby mode */
479 status = proc_stop(p_proc_object);
480
b66e0986 481 if (status)
7d55524d
ORL
482 goto func_cont;
483
484 /* Get the default executable for this board... */
485 dev_get_dev_type(hdev_obj, (u8 *) &dev_type);
486 p_proc_object->processor_id = dev_type;
487 status = get_exec_file(dev_node_obj, hdev_obj, sizeof(sz_exec_file),
488 sz_exec_file);
a741ea6e 489 if (!status) {
7d55524d
ORL
490 argv[0] = sz_exec_file;
491 argv[1] = NULL;
492 /* ...and try to load it: */
cd4f13c0 493 status = proc_load(p_proc_object, 1, (const char **)argv, NULL);
a741ea6e 494 if (!status)
7d55524d
ORL
495 status = proc_start(p_proc_object);
496 }
497 kfree(p_proc_object->psz_last_coff);
498 p_proc_object->psz_last_coff = NULL;
499func_cont:
500 kfree(p_proc_object);
501func_end:
502 return status;
503}
504
505/*
506 * ======== proc_ctrl ========
507 * Purpose:
508 * Pass control information to the GPP device driver managing the
509 * DSP processor.
510 *
511 * This will be an OEM-only function, and not part of the DSP/BIOS Bridge
512 * application developer's API.
513 * Call the bridge_dev_ctrl fxn with the Argument. This is a Synchronous
514 * Operation. arg can be null.
515 */
9d7d0a52 516int proc_ctrl(void *hprocessor, u32 dw_cmd, struct dsp_cbdata * arg)
7d55524d
ORL
517{
518 int status = 0;
519 struct proc_object *p_proc_object = hprocessor;
520 u32 timeout = 0;
521
522 DBC_REQUIRE(refs > 0);
523
524 if (p_proc_object) {
525 /* intercept PWR deep sleep command */
526 if (dw_cmd == BRDIOCTL_DEEPSLEEP) {
527 timeout = arg->cb_data;
528 status = pwr_sleep_dsp(PWR_DEEPSLEEP, timeout);
529 }
530 /* intercept PWR emergency sleep command */
531 else if (dw_cmd == BRDIOCTL_EMERGENCYSLEEP) {
532 timeout = arg->cb_data;
533 status = pwr_sleep_dsp(PWR_EMERGENCYDEEPSLEEP, timeout);
534 } else if (dw_cmd == PWR_DEEPSLEEP) {
535 /* timeout = arg->cb_data; */
536 status = pwr_sleep_dsp(PWR_DEEPSLEEP, timeout);
537 }
538 /* intercept PWR wake commands */
539 else if (dw_cmd == BRDIOCTL_WAKEUP) {
540 timeout = arg->cb_data;
541 status = pwr_wake_dsp(timeout);
542 } else if (dw_cmd == PWR_WAKEUP) {
543 /* timeout = arg->cb_data; */
544 status = pwr_wake_dsp(timeout);
545 } else
a741ea6e 546 if (!((*p_proc_object->intf_fxns->pfn_dev_cntrl)
7d55524d
ORL
547 (p_proc_object->hbridge_context, dw_cmd,
548 arg))) {
549 status = 0;
550 } else {
551 status = -EPERM;
552 }
553 } else {
554 status = -EFAULT;
555 }
556
557 return status;
558}
559
560/*
561 * ======== proc_detach ========
562 * Purpose:
563 * Destroys the Processor Object. Removes the notification from the Dev
564 * List.
565 */
566int proc_detach(struct process_context *pr_ctxt)
567{
568 int status = 0;
569 struct proc_object *p_proc_object = NULL;
570
571 DBC_REQUIRE(refs > 0);
572
573 p_proc_object = (struct proc_object *)pr_ctxt->hprocessor;
574
575 if (p_proc_object) {
576 /* Notify the Client */
577 ntfy_notify(p_proc_object->ntfy_obj, DSP_PROCESSORDETACH);
578 /* Remove the notification memory */
579 if (p_proc_object->ntfy_obj) {
580 ntfy_delete(p_proc_object->ntfy_obj);
581 kfree(p_proc_object->ntfy_obj);
582 }
583
584 kfree(p_proc_object->psz_last_coff);
585 p_proc_object->psz_last_coff = NULL;
586 /* Remove the Proc from the DEV List */
587 (void)dev_remove_proc_object(p_proc_object->hdev_obj,
588 (u32) p_proc_object);
589 /* Free the Processor Object */
590 kfree(p_proc_object);
591 pr_ctxt->hprocessor = NULL;
592 } else {
593 status = -EFAULT;
594 }
595
596 return status;
597}
598
599/*
600 * ======== proc_enum_nodes ========
601 * Purpose:
602 * Enumerate and get configuration information about nodes allocated
603 * on a DSP processor.
604 */
605int proc_enum_nodes(void *hprocessor, void **node_tab,
e6bf74f0
MN
606 u32 node_tab_size, u32 *pu_num_nodes,
607 u32 *pu_allocated)
7d55524d
ORL
608{
609 int status = -EPERM;
610 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
611 struct node_mgr *hnode_mgr = NULL;
612
613 DBC_REQUIRE(refs > 0);
614 DBC_REQUIRE(node_tab != NULL || node_tab_size == 0);
615 DBC_REQUIRE(pu_num_nodes != NULL);
616 DBC_REQUIRE(pu_allocated != NULL);
617
618 if (p_proc_object) {
a741ea6e 619 if (!(dev_get_node_manager(p_proc_object->hdev_obj,
7d55524d
ORL
620 &hnode_mgr))) {
621 if (hnode_mgr) {
622 status = node_enum_nodes(hnode_mgr, node_tab,
623 node_tab_size,
624 pu_num_nodes,
625 pu_allocated);
626 }
627 }
628 } else {
629 status = -EFAULT;
630 }
631
632 return status;
633}
634
635/* Cache operation against kernel address instead of users */
636static int build_dma_sg(struct dmm_map_object *map_obj, unsigned long start,
637 ssize_t len, int pg_i)
638{
639 struct page *page;
640 unsigned long offset;
641 ssize_t rest;
642 int ret = 0, i = 0;
643 struct scatterlist *sg = map_obj->dma_info.sg;
644
645 while (len) {
646 page = get_mapping_page(map_obj, pg_i);
647 if (!page) {
648 pr_err("%s: no page for %08lx\n", __func__, start);
649 ret = -EINVAL;
650 goto out;
651 } else if (IS_ERR(page)) {
652 pr_err("%s: err page for %08lx(%lu)\n", __func__, start,
653 PTR_ERR(page));
654 ret = PTR_ERR(page);
655 goto out;
656 }
657
658 offset = start & ~PAGE_MASK;
659 rest = min_t(ssize_t, PAGE_SIZE - offset, len);
660
661 sg_set_page(&sg[i], page, rest, offset);
662
663 len -= rest;
664 start += rest;
665 pg_i++, i++;
666 }
667
668 if (i != map_obj->dma_info.num_pages) {
669 pr_err("%s: bad number of sg iterations\n", __func__);
670 ret = -EFAULT;
671 goto out;
672 }
673
674out:
675 return ret;
676}
677
678static int memory_regain_ownership(struct dmm_map_object *map_obj,
679 unsigned long start, ssize_t len, enum dma_data_direction dir)
680{
681 int ret = 0;
682 unsigned long first_data_page = start >> PAGE_SHIFT;
683 unsigned long last_data_page = ((u32)(start + len - 1) >> PAGE_SHIFT);
684 /* calculating the number of pages this area spans */
685 unsigned long num_pages = last_data_page - first_data_page + 1;
686 struct bridge_dma_map_info *dma_info = &map_obj->dma_info;
687
688 if (!dma_info->sg)
689 goto out;
690
691 if (dma_info->dir != dir || dma_info->num_pages != num_pages) {
692 pr_err("%s: dma info doesn't match given params\n", __func__);
693 return -EINVAL;
694 }
695
696 dma_unmap_sg(bridge, dma_info->sg, num_pages, dma_info->dir);
697
698 pr_debug("%s: dma_map_sg unmapped\n", __func__);
699
700 kfree(dma_info->sg);
701
702 map_obj->dma_info.sg = NULL;
703
704out:
705 return ret;
706}
707
708/* Cache operation against kernel address instead of users */
709static int memory_give_ownership(struct dmm_map_object *map_obj,
710 unsigned long start, ssize_t len, enum dma_data_direction dir)
711{
712 int pg_i, ret, sg_num;
713 struct scatterlist *sg;
714 unsigned long first_data_page = start >> PAGE_SHIFT;
715 unsigned long last_data_page = ((u32)(start + len - 1) >> PAGE_SHIFT);
716 /* calculating the number of pages this area spans */
717 unsigned long num_pages = last_data_page - first_data_page + 1;
718
719 pg_i = find_first_page_in_cache(map_obj, start);
720 if (pg_i < 0) {
721 pr_err("%s: failed to find first page in cache\n", __func__);
722 ret = -EINVAL;
723 goto out;
724 }
725
726 sg = kcalloc(num_pages, sizeof(*sg), GFP_KERNEL);
727 if (!sg) {
728 pr_err("%s: kcalloc failed\n", __func__);
729 ret = -ENOMEM;
730 goto out;
731 }
732
733 sg_init_table(sg, num_pages);
734
735 /* cleanup a previous sg allocation */
736 /* this may happen if application doesn't signal for e/o DMA */
737 kfree(map_obj->dma_info.sg);
738
739 map_obj->dma_info.sg = sg;
740 map_obj->dma_info.dir = dir;
741 map_obj->dma_info.num_pages = num_pages;
742
743 ret = build_dma_sg(map_obj, start, len, pg_i);
744 if (ret)
745 goto kfree_sg;
746
747 sg_num = dma_map_sg(bridge, sg, num_pages, dir);
748 if (sg_num < 1) {
749 pr_err("%s: dma_map_sg failed: %d\n", __func__, sg_num);
750 ret = -EFAULT;
751 goto kfree_sg;
752 }
753
754 pr_debug("%s: dma_map_sg mapped %d elements\n", __func__, sg_num);
755 map_obj->dma_info.sg_num = sg_num;
756
757 return 0;
758
759kfree_sg:
760 kfree(sg);
761 map_obj->dma_info.sg = NULL;
762out:
763 return ret;
764}
765
766int proc_begin_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
767 enum dma_data_direction dir)
768{
769 /* Keep STATUS here for future additions to this function */
770 int status = 0;
771 struct process_context *pr_ctxt = (struct process_context *) hprocessor;
772 struct dmm_map_object *map_obj;
773
774 DBC_REQUIRE(refs > 0);
775
776 if (!pr_ctxt) {
777 status = -EFAULT;
778 goto err_out;
779 }
780
781 pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__,
782 (u32)pmpu_addr,
783 ul_size, dir);
784
785 /* find requested memory are in cached mapping information */
786 map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size);
787 if (!map_obj) {
788 pr_err("%s: find_containing_mapping failed\n", __func__);
789 status = -EFAULT;
790 goto err_out;
791 }
792
793 if (memory_give_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) {
794 pr_err("%s: InValid address parameters %p %x\n",
795 __func__, pmpu_addr, ul_size);
796 status = -EFAULT;
797 }
798
799err_out:
800
801 return status;
802}
803
804int proc_end_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
805 enum dma_data_direction dir)
806{
807 /* Keep STATUS here for future additions to this function */
808 int status = 0;
809 struct process_context *pr_ctxt = (struct process_context *) hprocessor;
810 struct dmm_map_object *map_obj;
811
812 DBC_REQUIRE(refs > 0);
813
814 if (!pr_ctxt) {
815 status = -EFAULT;
816 goto err_out;
817 }
818
819 pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__,
820 (u32)pmpu_addr,
821 ul_size, dir);
822
823 /* find requested memory are in cached mapping information */
824 map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size);
825 if (!map_obj) {
826 pr_err("%s: find_containing_mapping failed\n", __func__);
827 status = -EFAULT;
828 goto err_out;
829 }
830
831 if (memory_regain_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) {
832 pr_err("%s: InValid address parameters %p %x\n",
833 __func__, pmpu_addr, ul_size);
834 status = -EFAULT;
835 goto err_out;
836 }
837
838err_out:
839 return status;
840}
841
842/*
843 * ======== proc_flush_memory ========
844 * Purpose:
845 * Flush cache
846 */
847int proc_flush_memory(void *hprocessor, void *pmpu_addr,
848 u32 ul_size, u32 ul_flags)
849{
850 enum dma_data_direction dir = DMA_BIDIRECTIONAL;
851
852 return proc_begin_dma(hprocessor, pmpu_addr, ul_size, dir);
853}
854
855/*
856 * ======== proc_invalidate_memory ========
857 * Purpose:
858 * Invalidates the memory specified
859 */
860int proc_invalidate_memory(void *hprocessor, void *pmpu_addr, u32 size)
861{
862 enum dma_data_direction dir = DMA_FROM_DEVICE;
863
864 return proc_begin_dma(hprocessor, pmpu_addr, size, dir);
865}
866
867/*
868 * ======== proc_get_resource_info ========
869 * Purpose:
870 * Enumerate the resources currently available on a processor.
871 */
872int proc_get_resource_info(void *hprocessor, u32 resource_type,
e6bf74f0 873 struct dsp_resourceinfo *resource_info,
7d55524d
ORL
874 u32 resource_info_size)
875{
876 int status = -EPERM;
877 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
878 struct node_mgr *hnode_mgr = NULL;
879 struct nldr_object *nldr_obj = NULL;
880 struct rmm_target_obj *rmm = NULL;
881 struct io_mgr *hio_mgr = NULL; /* IO manager handle */
882
883 DBC_REQUIRE(refs > 0);
884 DBC_REQUIRE(resource_info != NULL);
885 DBC_REQUIRE(resource_info_size >= sizeof(struct dsp_resourceinfo));
886
887 if (!p_proc_object) {
888 status = -EFAULT;
889 goto func_end;
890 }
891 switch (resource_type) {
892 case DSP_RESOURCE_DYNDARAM:
893 case DSP_RESOURCE_DYNSARAM:
894 case DSP_RESOURCE_DYNEXTERNAL:
895 case DSP_RESOURCE_DYNSRAM:
896 status = dev_get_node_manager(p_proc_object->hdev_obj,
897 &hnode_mgr);
898 if (!hnode_mgr) {
899 status = -EFAULT;
900 goto func_end;
901 }
902
903 status = node_get_nldr_obj(hnode_mgr, &nldr_obj);
a741ea6e 904 if (!status) {
7d55524d
ORL
905 status = nldr_get_rmm_manager(nldr_obj, &rmm);
906 if (rmm) {
907 if (!rmm_stat(rmm,
908 (enum dsp_memtype)resource_type,
909 (struct dsp_memstat *)
910 &(resource_info->result.
911 mem_stat)))
912 status = -EINVAL;
913 } else {
914 status = -EFAULT;
915 }
916 }
917 break;
918 case DSP_RESOURCE_PROCLOAD:
919 status = dev_get_io_mgr(p_proc_object->hdev_obj, &hio_mgr);
920 if (hio_mgr)
921 status =
922 p_proc_object->intf_fxns->
923 pfn_io_get_proc_load(hio_mgr,
924 (struct dsp_procloadstat *)
925 &(resource_info->result.
926 proc_load_stat));
927 else
928 status = -EFAULT;
929 break;
930 default:
931 status = -EPERM;
932 break;
933 }
934func_end:
935 return status;
936}
937
938/*
939 * ======== proc_exit ========
940 * Purpose:
941 * Decrement reference count, and free resources when reference count is
942 * 0.
943 */
944void proc_exit(void)
945{
946 DBC_REQUIRE(refs > 0);
947
948 refs--;
949
950 DBC_ENSURE(refs >= 0);
951}
952
953/*
954 * ======== proc_get_dev_object ========
955 * Purpose:
956 * Return the Dev Object handle for a given Processor.
957 *
958 */
959int proc_get_dev_object(void *hprocessor,
e436d07d 960 struct dev_object **device_obj)
7d55524d
ORL
961{
962 int status = -EPERM;
963 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
964
965 DBC_REQUIRE(refs > 0);
e436d07d 966 DBC_REQUIRE(device_obj != NULL);
7d55524d
ORL
967
968 if (p_proc_object) {
e436d07d 969 *device_obj = p_proc_object->hdev_obj;
7d55524d
ORL
970 status = 0;
971 } else {
e436d07d 972 *device_obj = NULL;
7d55524d
ORL
973 status = -EFAULT;
974 }
975
a741ea6e 976 DBC_ENSURE((!status && *device_obj != NULL) ||
b66e0986 977 (status && *device_obj == NULL));
7d55524d
ORL
978
979 return status;
980}
981
982/*
983 * ======== proc_get_state ========
984 * Purpose:
985 * Report the state of the specified DSP processor.
986 */
987int proc_get_state(void *hprocessor,
e6bf74f0 988 struct dsp_processorstate *proc_state_obj,
7d55524d
ORL
989 u32 state_info_size)
990{
991 int status = 0;
992 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
993 int brd_status;
7d55524d
ORL
994
995 DBC_REQUIRE(refs > 0);
996 DBC_REQUIRE(proc_state_obj != NULL);
997 DBC_REQUIRE(state_info_size >= sizeof(struct dsp_processorstate));
998
999 if (p_proc_object) {
1000 /* First, retrieve BRD state information */
1001 status = (*p_proc_object->intf_fxns->pfn_brd_status)
1002 (p_proc_object->hbridge_context, &brd_status);
a741ea6e 1003 if (!status) {
7d55524d
ORL
1004 switch (brd_status) {
1005 case BRD_STOPPED:
1006 proc_state_obj->proc_state = PROC_STOPPED;
1007 break;
1008 case BRD_SLEEP_TRANSITION:
1009 case BRD_DSP_HIBERNATION:
1010 /* Fall through */
1011 case BRD_RUNNING:
1012 proc_state_obj->proc_state = PROC_RUNNING;
1013 break;
1014 case BRD_LOADED:
1015 proc_state_obj->proc_state = PROC_LOADED;
1016 break;
1017 case BRD_ERROR:
1018 proc_state_obj->proc_state = PROC_ERROR;
1019 break;
1020 default:
1021 proc_state_obj->proc_state = 0xFF;
1022 status = -EPERM;
1023 break;
1024 }
1025 }
7d55524d
ORL
1026 } else {
1027 status = -EFAULT;
1028 }
1029 dev_dbg(bridge, "%s, results: status: 0x%x proc_state_obj: 0x%x\n",
1030 __func__, status, proc_state_obj->proc_state);
1031 return status;
1032}
1033
1034/*
1035 * ======== proc_get_trace ========
1036 * Purpose:
1037 * Retrieve the current contents of the trace buffer, located on the
1038 * Processor. Predefined symbols for the trace buffer must have been
1039 * configured into the DSP executable.
1040 * Details:
1041 * We support using the symbols SYS_PUTCBEG and SYS_PUTCEND to define a
1042 * trace buffer, only. Treat it as an undocumented feature.
1043 * This call is destructive, meaning the processor is placed in the monitor
1044 * state as a result of this function.
1045 */
1046int proc_get_trace(void *hprocessor, u8 * pbuf, u32 max_size)
1047{
1048 int status;
1049 status = -ENOSYS;
1050 return status;
1051}
1052
1053/*
1054 * ======== proc_init ========
1055 * Purpose:
1056 * Initialize PROC's private state, keeping a reference count on each call
1057 */
1058bool proc_init(void)
1059{
1060 bool ret = true;
1061
1062 DBC_REQUIRE(refs >= 0);
1063
1064 if (ret)
1065 refs++;
1066
1067 DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
1068
1069 return ret;
1070}
1071
1072/*
1073 * ======== proc_load ========
1074 * Purpose:
1075 * Reset a processor and load a new base program image.
1076 * This will be an OEM-only function, and not part of the DSP/BIOS Bridge
1077 * application developer's API.
1078 */
9d7d0a52
MN
1079int proc_load(void *hprocessor, const s32 argc_index,
1080 const char **user_args, const char **user_envp)
7d55524d
ORL
1081{
1082 int status = 0;
1083 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1084 struct io_mgr *hio_mgr; /* IO manager handle */
1085 struct msg_mgr *hmsg_mgr;
1086 struct cod_manager *cod_mgr; /* Code manager handle */
1087 char *pargv0; /* temp argv[0] ptr */
1088 char **new_envp; /* Updated envp[] array. */
1089 char sz_proc_id[MAXPROCIDLEN]; /* Size of "PROC_ID=<n>" */
1090 s32 envp_elems; /* Num elements in envp[]. */
1091 s32 cnew_envp; /* " " in new_envp[] */
1092 s32 nproc_id = 0; /* Anticipate MP version. */
1093 struct dcd_manager *hdcd_handle;
1094 struct dmm_object *dmm_mgr;
1095 u32 dw_ext_end;
1096 u32 proc_id;
1097 int brd_state;
1098 struct drv_data *drv_datap = dev_get_drvdata(bridge);
1099
1100#ifdef OPT_LOAD_TIME_INSTRUMENTATION
1101 struct timeval tv1;
1102 struct timeval tv2;
1103#endif
1104
b3d23688 1105#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
7d55524d
ORL
1106 struct dspbridge_platform_data *pdata =
1107 omap_dspbridge_dev->dev.platform_data;
1108#endif
1109
1110 DBC_REQUIRE(refs > 0);
1111 DBC_REQUIRE(argc_index > 0);
1112 DBC_REQUIRE(user_args != NULL);
1113
1114#ifdef OPT_LOAD_TIME_INSTRUMENTATION
1115 do_gettimeofday(&tv1);
1116#endif
1117 if (!p_proc_object) {
1118 status = -EFAULT;
1119 goto func_end;
1120 }
1121 dev_get_cod_mgr(p_proc_object->hdev_obj, &cod_mgr);
1122 if (!cod_mgr) {
1123 status = -EPERM;
1124 goto func_end;
1125 }
1126 status = proc_stop(hprocessor);
b66e0986 1127 if (status)
7d55524d
ORL
1128 goto func_end;
1129
1130 /* Place the board in the monitor state. */
1131 status = proc_monitor(hprocessor);
b66e0986 1132 if (status)
7d55524d
ORL
1133 goto func_end;
1134
1135 /* Save ptr to original argv[0]. */
1136 pargv0 = (char *)user_args[0];
1137 /*Prepend "PROC_ID=<nproc_id>"to envp array for target. */
1138 envp_elems = get_envp_count((char **)user_envp);
1139 cnew_envp = (envp_elems ? (envp_elems + 1) : (envp_elems + 2));
1140 new_envp = kzalloc(cnew_envp * sizeof(char **), GFP_KERNEL);
1141 if (new_envp) {
1142 status = snprintf(sz_proc_id, MAXPROCIDLEN, PROC_ENVPROCID,
1143 nproc_id);
1144 if (status == -1) {
1145 dev_dbg(bridge, "%s: Proc ID string overflow\n",
1146 __func__);
1147 status = -EPERM;
1148 } else {
1149 new_envp =
1150 prepend_envp(new_envp, (char **)user_envp,
1151 envp_elems, cnew_envp, sz_proc_id);
1152 /* Get the DCD Handle */
1153 status = mgr_get_dcd_handle(p_proc_object->hmgr_obj,
1154 (u32 *) &hdcd_handle);
a741ea6e 1155 if (!status) {
7d55524d
ORL
1156 /* Before proceeding with new load,
1157 * check if a previously registered COFF
1158 * exists.
1159 * If yes, unregister nodes in previously
1160 * registered COFF. If any error occurred,
1161 * set previously registered COFF to NULL. */
1162 if (p_proc_object->psz_last_coff != NULL) {
1163 status =
1164 dcd_auto_unregister(hdcd_handle,
1165 p_proc_object->
1166 psz_last_coff);
1167 /* Regardless of auto unregister status,
1168 * free previously allocated
1169 * memory. */
1170 kfree(p_proc_object->psz_last_coff);
1171 p_proc_object->psz_last_coff = NULL;
1172 }
1173 }
1174 /* On success, do cod_open_base() */
1175 status = cod_open_base(cod_mgr, (char *)user_args[0],
1176 COD_SYMB);
1177 }
1178 } else {
1179 status = -ENOMEM;
1180 }
a741ea6e 1181 if (!status) {
7d55524d
ORL
1182 /* Auto-register data base */
1183 /* Get the DCD Handle */
1184 status = mgr_get_dcd_handle(p_proc_object->hmgr_obj,
1185 (u32 *) &hdcd_handle);
a741ea6e 1186 if (!status) {
7d55524d
ORL
1187 /* Auto register nodes in specified COFF
1188 * file. If registration did not fail,
1189 * (status = 0 or -EACCES)
1190 * save the name of the COFF file for
1191 * de-registration in the future. */
1192 status =
1193 dcd_auto_register(hdcd_handle,
1194 (char *)user_args[0]);
1195 if (status == -EACCES)
1196 status = 0;
1197
b66e0986 1198 if (status) {
7d55524d
ORL
1199 status = -EPERM;
1200 } else {
1201 DBC_ASSERT(p_proc_object->psz_last_coff ==
1202 NULL);
1203 /* Allocate memory for pszLastCoff */
1204 p_proc_object->psz_last_coff =
1205 kzalloc((strlen(user_args[0]) +
1206 1), GFP_KERNEL);
1207 /* If memory allocated, save COFF file name */
1208 if (p_proc_object->psz_last_coff) {
1209 strncpy(p_proc_object->psz_last_coff,
1210 (char *)user_args[0],
1211 (strlen((char *)user_args[0]) +
1212 1));
1213 }
1214 }
1215 }
1216 }
1217 /* Update shared memory address and size */
a741ea6e 1218 if (!status) {
7d55524d
ORL
1219 /* Create the message manager. This must be done
1220 * before calling the IOOnLoaded function. */
1221 dev_get_msg_mgr(p_proc_object->hdev_obj, &hmsg_mgr);
1222 if (!hmsg_mgr) {
1223 status = msg_create(&hmsg_mgr, p_proc_object->hdev_obj,
1224 (msg_onexit) node_on_exit);
a741ea6e 1225 DBC_ASSERT(!status);
7d55524d
ORL
1226 dev_set_msg_mgr(p_proc_object->hdev_obj, hmsg_mgr);
1227 }
1228 }
a741ea6e 1229 if (!status) {
7d55524d
ORL
1230 /* Set the Device object's message manager */
1231 status = dev_get_io_mgr(p_proc_object->hdev_obj, &hio_mgr);
1232 if (hio_mgr)
1233 status = (*p_proc_object->intf_fxns->pfn_io_on_loaded)
1234 (hio_mgr);
1235 else
1236 status = -EFAULT;
1237 }
a741ea6e 1238 if (!status) {
7d55524d
ORL
1239 /* Now, attempt to load an exec: */
1240
1241 /* Boost the OPP level to Maximum level supported by baseport */
b3d23688 1242#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
7d55524d
ORL
1243 if (pdata->cpu_set_freq)
1244 (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP5]);
1245#endif
1246 status = cod_load_base(cod_mgr, argc_index, (char **)user_args,
1247 dev_brd_write_fxn,
1248 p_proc_object->hdev_obj, NULL);
b66e0986 1249 if (status) {
7d55524d
ORL
1250 if (status == -EBADF) {
1251 dev_dbg(bridge, "%s: Failure to Load the EXE\n",
1252 __func__);
1253 }
1254 if (status == -ESPIPE) {
1255 pr_err("%s: Couldn't parse the file\n",
1256 __func__);
1257 }
1258 }
1259 /* Requesting the lowest opp supported */
b3d23688 1260#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
7d55524d
ORL
1261 if (pdata->cpu_set_freq)
1262 (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP1]);
1263#endif
1264
1265 }
a741ea6e 1266 if (!status) {
7d55524d
ORL
1267 /* Update the Processor status to loaded */
1268 status = (*p_proc_object->intf_fxns->pfn_brd_set_state)
1269 (p_proc_object->hbridge_context, BRD_LOADED);
a741ea6e 1270 if (!status) {
7d55524d
ORL
1271 p_proc_object->proc_state = PROC_LOADED;
1272 if (p_proc_object->ntfy_obj)
1273 proc_notify_clients(p_proc_object,
1274 DSP_PROCESSORSTATECHANGE);
1275 }
1276 }
a741ea6e 1277 if (!status) {
7d55524d
ORL
1278 status = proc_get_processor_id(hprocessor, &proc_id);
1279 if (proc_id == DSP_UNIT) {
1280 /* Use all available DSP address space after EXTMEM
1281 * for DMM */
a741ea6e 1282 if (!status)
7d55524d
ORL
1283 status = cod_get_sym_value(cod_mgr, EXTEND,
1284 &dw_ext_end);
1285
1286 /* Reset DMM structs and add an initial free chunk */
a741ea6e 1287 if (!status) {
7d55524d
ORL
1288 status =
1289 dev_get_dmm_mgr(p_proc_object->hdev_obj,
1290 &dmm_mgr);
1291 if (dmm_mgr) {
1292 /* Set dw_ext_end to DMM START u8
1293 * address */
1294 dw_ext_end =
1295 (dw_ext_end + 1) * DSPWORDSIZE;
1296 /* DMM memory is from EXT_END */
1297 status = dmm_create_tables(dmm_mgr,
1298 dw_ext_end,
1299 DMMPOOLSIZE);
1300 } else {
1301 status = -EFAULT;
1302 }
1303 }
1304 }
1305 }
1306 /* Restore the original argv[0] */
1307 kfree(new_envp);
1308 user_args[0] = pargv0;
a741ea6e
ER
1309 if (!status) {
1310 if (!((*p_proc_object->intf_fxns->pfn_brd_status)
7d55524d
ORL
1311 (p_proc_object->hbridge_context, &brd_state))) {
1312 pr_info("%s: Processor Loaded %s\n", __func__, pargv0);
1313 kfree(drv_datap->base_img);
1314 drv_datap->base_img = kmalloc(strlen(pargv0) + 1,
1315 GFP_KERNEL);
1316 if (drv_datap->base_img)
1317 strncpy(drv_datap->base_img, pargv0,
1318 strlen(pargv0) + 1);
1319 else
1320 status = -ENOMEM;
1321 DBC_ASSERT(brd_state == BRD_LOADED);
1322 }
1323 }
1324
1325func_end:
cfccf244 1326 if (status) {
7d55524d 1327 pr_err("%s: Processor failed to load\n", __func__);
cfccf244
ER
1328 proc_stop(p_proc_object);
1329 }
a741ea6e 1330 DBC_ENSURE((!status
7d55524d 1331 && p_proc_object->proc_state == PROC_LOADED)
b66e0986 1332 || status);
7d55524d
ORL
1333#ifdef OPT_LOAD_TIME_INSTRUMENTATION
1334 do_gettimeofday(&tv2);
1335 if (tv2.tv_usec < tv1.tv_usec) {
1336 tv2.tv_usec += 1000000;
1337 tv2.tv_sec--;
1338 }
1339 dev_dbg(bridge, "%s: time to load %d sec and %d usec\n", __func__,
1340 tv2.tv_sec - tv1.tv_sec, tv2.tv_usec - tv1.tv_usec);
1341#endif
1342 return status;
1343}
1344
1345/*
1346 * ======== proc_map ========
1347 * Purpose:
1348 * Maps a MPU buffer to DSP address space.
1349 */
1350int proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size,
1351 void *req_addr, void **pp_map_addr, u32 ul_map_attr,
1352 struct process_context *pr_ctxt)
1353{
1354 u32 va_align;
1355 u32 pa_align;
1356 struct dmm_object *dmm_mgr;
1357 u32 size_align;
1358 int status = 0;
1359 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1360 struct dmm_map_object *map_obj;
7d55524d 1361
b3d23688 1362#ifdef CONFIG_TIDSPBRIDGE_CACHE_LINE_CHECK
7d55524d
ORL
1363 if ((ul_map_attr & BUFMODE_MASK) != RBUF) {
1364 if (!IS_ALIGNED((u32)pmpu_addr, DSP_CACHE_LINE) ||
1365 !IS_ALIGNED(ul_size, DSP_CACHE_LINE)) {
1366 pr_err("%s: not aligned: 0x%x (%d)\n", __func__,
1367 (u32)pmpu_addr, ul_size);
1368 return -EFAULT;
1369 }
1370 }
1371#endif
1372
1373 /* Calculate the page-aligned PA, VA and size */
1374 va_align = PG_ALIGN_LOW((u32) req_addr, PG_SIZE4K);
1375 pa_align = PG_ALIGN_LOW((u32) pmpu_addr, PG_SIZE4K);
1376 size_align = PG_ALIGN_HIGH(ul_size + (u32) pmpu_addr - pa_align,
1377 PG_SIZE4K);
1378
1379 if (!p_proc_object) {
1380 status = -EFAULT;
1381 goto func_end;
1382 }
1383 /* Critical section */
1384 mutex_lock(&proc_lock);
1385 dmm_get_handle(p_proc_object, &dmm_mgr);
1386 if (dmm_mgr)
1387 status = dmm_map_memory(dmm_mgr, va_align, size_align);
1388 else
1389 status = -EFAULT;
1390
1391 /* Add mapping to the page tables. */
a741ea6e 1392 if (!status) {
7d55524d 1393 /* mapped memory resource tracking */
4dd1944a 1394 map_obj = add_mapping_info(pr_ctxt, pa_align, va_align,
7d55524d 1395 size_align);
4dd1944a 1396 if (!map_obj) {
7d55524d 1397 status = -ENOMEM;
4dd1944a
FGL
1398 } else {
1399 va_align = user_to_dsp_map(
1400 p_proc_object->hbridge_context->dsp_mmu,
1401 pa_align, va_align, size_align,
1402 map_obj->pages);
1403 if (IS_ERR_VALUE(va_align))
1404 status = (int)va_align;
1405 }
7d55524d 1406 }
a741ea6e 1407 if (!status) {
7d55524d 1408 /* Mapped address = MSB of VA | LSB of PA */
4dd1944a
FGL
1409 map_obj->dsp_addr = (va_align |
1410 ((u32)pmpu_addr & (PG_SIZE4K - 1)));
1411 *pp_map_addr = (void *)map_obj->dsp_addr;
7d55524d 1412 } else {
4dd1944a 1413 remove_mapping_information(pr_ctxt, va_align, size_align);
7d55524d
ORL
1414 dmm_un_map_memory(dmm_mgr, va_align, &size_align);
1415 }
1416 mutex_unlock(&proc_lock);
1417
b66e0986 1418 if (status)
7d55524d
ORL
1419 goto func_end;
1420
1421func_end:
1422 dev_dbg(bridge, "%s: hprocessor %p, pmpu_addr %p, ul_size %x, "
1423 "req_addr %p, ul_map_attr %x, pp_map_addr %p, va_align %x, "
1424 "pa_align %x, size_align %x status 0x%x\n", __func__,
1425 hprocessor, pmpu_addr, ul_size, req_addr, ul_map_attr,
1426 pp_map_addr, va_align, pa_align, size_align, status);
1427
1428 return status;
1429}
1430
1431/*
1432 * ======== proc_register_notify ========
1433 * Purpose:
1434 * Register to be notified of specific processor events.
1435 */
1436int proc_register_notify(void *hprocessor, u32 event_mask,
1437 u32 notify_type, struct dsp_notification
1438 * hnotification)
1439{
1440 int status = 0;
1441 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1442 struct deh_mgr *hdeh_mgr;
1443
1444 DBC_REQUIRE(hnotification != NULL);
1445 DBC_REQUIRE(refs > 0);
1446
1447 /* Check processor handle */
1448 if (!p_proc_object) {
1449 status = -EFAULT;
1450 goto func_end;
1451 }
1452 /* Check if event mask is a valid processor related event */
1453 if (event_mask & ~(DSP_PROCESSORSTATECHANGE | DSP_PROCESSORATTACH |
1454 DSP_PROCESSORDETACH | DSP_PROCESSORRESTART |
1455 DSP_MMUFAULT | DSP_SYSERROR | DSP_PWRERROR |
1456 DSP_WDTOVERFLOW))
1457 status = -EINVAL;
1458
1459 /* Check if notify type is valid */
1460 if (notify_type != DSP_SIGNALEVENT)
1461 status = -EINVAL;
1462
a741ea6e 1463 if (!status) {
7d55524d
ORL
1464 /* If event mask is not DSP_SYSERROR, DSP_MMUFAULT,
1465 * or DSP_PWRERROR then register event immediately. */
1466 if (event_mask &
1467 ~(DSP_SYSERROR | DSP_MMUFAULT | DSP_PWRERROR |
1468 DSP_WDTOVERFLOW)) {
1469 status = ntfy_register(p_proc_object->ntfy_obj,
1470 hnotification, event_mask,
1471 notify_type);
1472 /* Special case alert, special case alert!
1473 * If we're trying to *deregister* (i.e. event_mask
1474 * is 0), a DSP_SYSERROR or DSP_MMUFAULT notification,
1475 * we have to deregister with the DEH manager.
1476 * There's no way to know, based on event_mask which
1477 * manager the notification event was registered with,
1478 * so if we're trying to deregister and ntfy_register
1479 * failed, we'll give the deh manager a shot.
1480 */
b66e0986 1481 if ((event_mask == 0) && status) {
7d55524d
ORL
1482 status =
1483 dev_get_deh_mgr(p_proc_object->hdev_obj,
1484 &hdeh_mgr);
7d55524d 1485 status =
61a5b769
FC
1486 bridge_deh_register_notify(hdeh_mgr,
1487 event_mask,
1488 notify_type,
1489 hnotification);
7d55524d
ORL
1490 }
1491 } else {
1492 status = dev_get_deh_mgr(p_proc_object->hdev_obj,
1493 &hdeh_mgr);
7d55524d 1494 status =
61a5b769
FC
1495 bridge_deh_register_notify(hdeh_mgr,
1496 event_mask,
1497 notify_type,
1498 hnotification);
7d55524d
ORL
1499
1500 }
1501 }
1502func_end:
1503 return status;
1504}
1505
1506/*
1507 * ======== proc_reserve_memory ========
1508 * Purpose:
1509 * Reserve a virtually contiguous region of DSP address space.
1510 */
1511int proc_reserve_memory(void *hprocessor, u32 ul_size,
1512 void **pp_rsv_addr,
1513 struct process_context *pr_ctxt)
1514{
1515 struct dmm_object *dmm_mgr;
1516 int status = 0;
1517 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1518 struct dmm_rsv_object *rsv_obj;
1519
1520 if (!p_proc_object) {
1521 status = -EFAULT;
1522 goto func_end;
1523 }
1524
1525 status = dmm_get_handle(p_proc_object, &dmm_mgr);
1526 if (!dmm_mgr) {
1527 status = -EFAULT;
1528 goto func_end;
1529 }
1530
1531 status = dmm_reserve_memory(dmm_mgr, ul_size, (u32 *) pp_rsv_addr);
1532 if (status != 0)
1533 goto func_end;
1534
1535 /*
1536 * A successful reserve should be followed by insertion of rsv_obj
1537 * into dmm_rsv_list, so that reserved memory resource tracking
1538 * remains uptodate
1539 */
1540 rsv_obj = kmalloc(sizeof(struct dmm_rsv_object), GFP_KERNEL);
1541 if (rsv_obj) {
1542 rsv_obj->dsp_reserved_addr = (u32) *pp_rsv_addr;
1543 spin_lock(&pr_ctxt->dmm_rsv_lock);
1544 list_add(&rsv_obj->link, &pr_ctxt->dmm_rsv_list);
1545 spin_unlock(&pr_ctxt->dmm_rsv_lock);
1546 }
1547
1548func_end:
1549 dev_dbg(bridge, "%s: hprocessor: 0x%p ul_size: 0x%x pp_rsv_addr: 0x%p "
1550 "status 0x%x\n", __func__, hprocessor,
1551 ul_size, pp_rsv_addr, status);
1552 return status;
1553}
1554
1555/*
1556 * ======== proc_start ========
1557 * Purpose:
1558 * Start a processor running.
1559 */
1560int proc_start(void *hprocessor)
1561{
1562 int status = 0;
1563 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1564 struct cod_manager *cod_mgr; /* Code manager handle */
1565 u32 dw_dsp_addr; /* Loaded code's entry point. */
1566 int brd_state;
1567
1568 DBC_REQUIRE(refs > 0);
1569 if (!p_proc_object) {
1570 status = -EFAULT;
1571 goto func_end;
1572 }
1573 /* Call the bridge_brd_start */
1574 if (p_proc_object->proc_state != PROC_LOADED) {
1575 status = -EBADR;
1576 goto func_end;
1577 }
1578 status = dev_get_cod_mgr(p_proc_object->hdev_obj, &cod_mgr);
1579 if (!cod_mgr) {
1580 status = -EFAULT;
1581 goto func_cont;
1582 }
1583
1584 status = cod_get_entry(cod_mgr, &dw_dsp_addr);
b66e0986 1585 if (status)
7d55524d
ORL
1586 goto func_cont;
1587
1588 status = (*p_proc_object->intf_fxns->pfn_brd_start)
1589 (p_proc_object->hbridge_context, dw_dsp_addr);
b66e0986 1590 if (status)
7d55524d
ORL
1591 goto func_cont;
1592
1593 /* Call dev_create2 */
1594 status = dev_create2(p_proc_object->hdev_obj);
a741ea6e 1595 if (!status) {
7d55524d
ORL
1596 p_proc_object->proc_state = PROC_RUNNING;
1597 /* Deep sleep switces off the peripheral clocks.
1598 * we just put the DSP CPU in idle in the idle loop.
1599 * so there is no need to send a command to DSP */
1600
1601 if (p_proc_object->ntfy_obj) {
1602 proc_notify_clients(p_proc_object,
1603 DSP_PROCESSORSTATECHANGE);
1604 }
1605 } else {
1606 /* Failed to Create Node Manager and DISP Object
1607 * Stop the Processor from running. Put it in STOPPED State */
1608 (void)(*p_proc_object->intf_fxns->
1609 pfn_brd_stop) (p_proc_object->hbridge_context);
1610 p_proc_object->proc_state = PROC_STOPPED;
1611 }
1612func_cont:
a741ea6e
ER
1613 if (!status) {
1614 if (!((*p_proc_object->intf_fxns->pfn_brd_status)
7d55524d
ORL
1615 (p_proc_object->hbridge_context, &brd_state))) {
1616 pr_info("%s: dsp in running state\n", __func__);
1617 DBC_ASSERT(brd_state != BRD_HIBERNATION);
1618 }
1619 } else {
1620 pr_err("%s: Failed to start the dsp\n", __func__);
cfccf244 1621 proc_stop(p_proc_object);
7d55524d
ORL
1622 }
1623
1624func_end:
a741ea6e 1625 DBC_ENSURE((!status && p_proc_object->proc_state ==
b66e0986 1626 PROC_RUNNING) || status);
7d55524d
ORL
1627 return status;
1628}
1629
1630/*
1631 * ======== proc_stop ========
1632 * Purpose:
1633 * Stop a processor running.
1634 */
1635int proc_stop(void *hprocessor)
1636{
1637 int status = 0;
1638 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1639 struct msg_mgr *hmsg_mgr;
1640 struct node_mgr *hnode_mgr;
1641 void *hnode;
1642 u32 node_tab_size = 1;
1643 u32 num_nodes = 0;
1644 u32 nodes_allocated = 0;
1645 int brd_state;
1646
1647 DBC_REQUIRE(refs > 0);
1648 if (!p_proc_object) {
1649 status = -EFAULT;
1650 goto func_end;
1651 }
7d55524d
ORL
1652 /* check if there are any running nodes */
1653 status = dev_get_node_manager(p_proc_object->hdev_obj, &hnode_mgr);
a741ea6e 1654 if (!status && hnode_mgr) {
7d55524d
ORL
1655 status = node_enum_nodes(hnode_mgr, &hnode, node_tab_size,
1656 &num_nodes, &nodes_allocated);
1657 if ((status == -EINVAL) || (nodes_allocated > 0)) {
1658 pr_err("%s: Can't stop device, active nodes = %d \n",
1659 __func__, nodes_allocated);
1660 return -EBADR;
1661 }
1662 }
1663 /* Call the bridge_brd_stop */
1664 /* It is OK to stop a device that does n't have nodes OR not started */
1665 status =
1666 (*p_proc_object->intf_fxns->
1667 pfn_brd_stop) (p_proc_object->hbridge_context);
a741ea6e 1668 if (!status) {
7d55524d
ORL
1669 dev_dbg(bridge, "%s: processor in standby mode\n", __func__);
1670 p_proc_object->proc_state = PROC_STOPPED;
1671 /* Destory the Node Manager, msg_ctrl Manager */
a741ea6e 1672 if (!(dev_destroy2(p_proc_object->hdev_obj))) {
7d55524d
ORL
1673 /* Destroy the msg_ctrl by calling msg_delete */
1674 dev_get_msg_mgr(p_proc_object->hdev_obj, &hmsg_mgr);
1675 if (hmsg_mgr) {
1676 msg_delete(hmsg_mgr);
1677 dev_set_msg_mgr(p_proc_object->hdev_obj, NULL);
1678 }
a741ea6e 1679 if (!((*p_proc_object->
7d55524d
ORL
1680 intf_fxns->pfn_brd_status) (p_proc_object->
1681 hbridge_context,
1682 &brd_state)))
1683 DBC_ASSERT(brd_state == BRD_STOPPED);
1684 }
1685 } else {
1686 pr_err("%s: Failed to stop the processor\n", __func__);
1687 }
1688func_end:
1689
1690 return status;
1691}
1692
1693/*
1694 * ======== proc_un_map ========
1695 * Purpose:
1696 * Removes a MPU buffer mapping from the DSP address space.
1697 */
1698int proc_un_map(void *hprocessor, void *map_addr,
1699 struct process_context *pr_ctxt)
1700{
1701 int status = 0;
1702 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1703 struct dmm_object *dmm_mgr;
1704 u32 va_align;
1705 u32 size_align;
1706
1707 va_align = PG_ALIGN_LOW((u32) map_addr, PG_SIZE4K);
1708 if (!p_proc_object) {
1709 status = -EFAULT;
1710 goto func_end;
1711 }
1712
1713 status = dmm_get_handle(hprocessor, &dmm_mgr);
1714 if (!dmm_mgr) {
1715 status = -EFAULT;
1716 goto func_end;
1717 }
1718
1719 /* Critical section */
1720 mutex_lock(&proc_lock);
1721 /*
1722 * Update DMM structures. Get the size to unmap.
1723 * This function returns error if the VA is not mapped
1724 */
1725 status = dmm_un_map_memory(dmm_mgr, (u32) va_align, &size_align);
1726 /* Remove mapping from the page tables. */
4dd1944a
FGL
1727 if (!status)
1728 status = user_to_dsp_unmap(
1729 p_proc_object->hbridge_context->dsp_mmu, va_align);
7d55524d
ORL
1730
1731 mutex_unlock(&proc_lock);
b66e0986 1732 if (status)
7d55524d
ORL
1733 goto func_end;
1734
1735 /*
1736 * A successful unmap should be followed by removal of map_obj
1737 * from dmm_map_list, so that mapped memory resource tracking
1738 * remains uptodate
1739 */
1740 remove_mapping_information(pr_ctxt, (u32) map_addr, size_align);
1741
1742func_end:
1743 dev_dbg(bridge, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n",
1744 __func__, hprocessor, map_addr, status);
1745 return status;
1746}
1747
1748/*
1749 * ======== proc_un_reserve_memory ========
1750 * Purpose:
1751 * Frees a previously reserved region of DSP address space.
1752 */
1753int proc_un_reserve_memory(void *hprocessor, void *prsv_addr,
1754 struct process_context *pr_ctxt)
1755{
1756 struct dmm_object *dmm_mgr;
1757 int status = 0;
1758 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1759 struct dmm_rsv_object *rsv_obj;
1760
1761 if (!p_proc_object) {
1762 status = -EFAULT;
1763 goto func_end;
1764 }
1765
1766 status = dmm_get_handle(p_proc_object, &dmm_mgr);
1767 if (!dmm_mgr) {
1768 status = -EFAULT;
1769 goto func_end;
1770 }
1771
1772 status = dmm_un_reserve_memory(dmm_mgr, (u32) prsv_addr);
1773 if (status != 0)
1774 goto func_end;
1775
1776 /*
1777 * A successful unreserve should be followed by removal of rsv_obj
1778 * from dmm_rsv_list, so that reserved memory resource tracking
1779 * remains uptodate
1780 */
1781 spin_lock(&pr_ctxt->dmm_rsv_lock);
1782 list_for_each_entry(rsv_obj, &pr_ctxt->dmm_rsv_list, link) {
1783 if (rsv_obj->dsp_reserved_addr == (u32) prsv_addr) {
1784 list_del(&rsv_obj->link);
1785 kfree(rsv_obj);
1786 break;
1787 }
1788 }
1789 spin_unlock(&pr_ctxt->dmm_rsv_lock);
1790
1791func_end:
1792 dev_dbg(bridge, "%s: hprocessor: 0x%p prsv_addr: 0x%p status: 0x%x\n",
1793 __func__, hprocessor, prsv_addr, status);
1794 return status;
1795}
1796
1797/*
1798 * ======== = proc_monitor ======== ==
1799 * Purpose:
1800 * Place the Processor in Monitor State. This is an internal
1801 * function and a requirement before Processor is loaded.
1802 * This does a bridge_brd_stop, dev_destroy2 and bridge_brd_monitor.
1803 * In dev_destroy2 we delete the node manager.
1804 * Parameters:
1805 * p_proc_object: Pointer to Processor Object
1806 * Returns:
1807 * 0: Processor placed in monitor mode.
1808 * !0: Failed to place processor in monitor mode.
1809 * Requires:
1810 * Valid Processor Handle
1811 * Ensures:
1812 * Success: ProcObject state is PROC_IDLE
1813 */
c8c1ad8c 1814static int proc_monitor(struct proc_object *proc_obj)
7d55524d
ORL
1815{
1816 int status = -EPERM;
1817 struct msg_mgr *hmsg_mgr;
1818 int brd_state;
1819
1820 DBC_REQUIRE(refs > 0);
c8c1ad8c 1821 DBC_REQUIRE(proc_obj);
7d55524d
ORL
1822
1823 /* This is needed only when Device is loaded when it is
1824 * already 'ACTIVE' */
1825 /* Destory the Node Manager, msg_ctrl Manager */
a741ea6e 1826 if (!dev_destroy2(proc_obj->hdev_obj)) {
7d55524d 1827 /* Destroy the msg_ctrl by calling msg_delete */
c8c1ad8c 1828 dev_get_msg_mgr(proc_obj->hdev_obj, &hmsg_mgr);
7d55524d
ORL
1829 if (hmsg_mgr) {
1830 msg_delete(hmsg_mgr);
c8c1ad8c 1831 dev_set_msg_mgr(proc_obj->hdev_obj, NULL);
7d55524d
ORL
1832 }
1833 }
1834 /* Place the Board in the Monitor State */
a741ea6e 1835 if (!((*proc_obj->intf_fxns->pfn_brd_monitor)
c8c1ad8c 1836 (proc_obj->hbridge_context))) {
7d55524d 1837 status = 0;
a741ea6e 1838 if (!((*proc_obj->intf_fxns->pfn_brd_status)
c8c1ad8c 1839 (proc_obj->hbridge_context, &brd_state)))
7d55524d
ORL
1840 DBC_ASSERT(brd_state == BRD_IDLE);
1841 }
1842
a741ea6e 1843 DBC_ENSURE((!status && brd_state == BRD_IDLE) ||
b66e0986 1844 status);
7d55524d
ORL
1845 return status;
1846}
1847
1848/*
1849 * ======== get_envp_count ========
1850 * Purpose:
1851 * Return the number of elements in the envp array, including the
1852 * terminating NULL element.
1853 */
1854static s32 get_envp_count(char **envp)
1855{
1856 s32 ret = 0;
1857 if (envp) {
1858 while (*envp++)
1859 ret++;
1860
1861 ret += 1; /* Include the terminating NULL in the count. */
1862 }
1863
1864 return ret;
1865}
1866
1867/*
1868 * ======== prepend_envp ========
1869 * Purpose:
1870 * Prepend an environment variable=value pair to the new envp array, and
1871 * copy in the existing var=value pairs in the old envp array.
1872 */
1873static char **prepend_envp(char **new_envp, char **envp, s32 envp_elems,
0cd343a4 1874 s32 cnew_envp, char *sz_var)
7d55524d
ORL
1875{
1876 char **pp_envp = new_envp;
1877
1878 DBC_REQUIRE(new_envp);
1879
1880 /* Prepend new environ var=value string */
0cd343a4 1881 *new_envp++ = sz_var;
7d55524d
ORL
1882
1883 /* Copy user's environment into our own. */
1884 while (envp_elems--)
1885 *new_envp++ = *envp++;
1886
1887 /* Ensure NULL terminates the new environment strings array. */
1888 if (envp_elems == 0)
1889 *new_envp = NULL;
1890
1891 return pp_envp;
1892}
1893
1894/*
1895 * ======== proc_notify_clients ========
1896 * Purpose:
1897 * Notify the processor the events.
1898 */
0cd343a4 1899int proc_notify_clients(void *proc, u32 events)
7d55524d
ORL
1900{
1901 int status = 0;
e6890692 1902 struct proc_object *p_proc_object = (struct proc_object *)proc;
7d55524d
ORL
1903
1904 DBC_REQUIRE(p_proc_object);
bf968b0a 1905 DBC_REQUIRE(is_valid_proc_event(events));
7d55524d
ORL
1906 DBC_REQUIRE(refs > 0);
1907 if (!p_proc_object) {
1908 status = -EFAULT;
1909 goto func_end;
1910 }
1911
0cd343a4 1912 ntfy_notify(p_proc_object->ntfy_obj, events);
7d55524d
ORL
1913func_end:
1914 return status;
1915}
1916
1917/*
1918 * ======== proc_notify_all_clients ========
1919 * Purpose:
1920 * Notify the processor the events. This includes notifying all clients
1921 * attached to a particulat DSP.
1922 */
0cd343a4 1923int proc_notify_all_clients(void *proc, u32 events)
7d55524d
ORL
1924{
1925 int status = 0;
e6890692 1926 struct proc_object *p_proc_object = (struct proc_object *)proc;
7d55524d 1927
bf968b0a 1928 DBC_REQUIRE(is_valid_proc_event(events));
7d55524d
ORL
1929 DBC_REQUIRE(refs > 0);
1930
1931 if (!p_proc_object) {
1932 status = -EFAULT;
1933 goto func_end;
1934 }
1935
0cd343a4 1936 dev_notify_clients(p_proc_object->hdev_obj, events);
7d55524d
ORL
1937
1938func_end:
1939 return status;
1940}
1941
1942/*
1943 * ======== proc_get_processor_id ========
1944 * Purpose:
1945 * Retrieves the processor ID.
1946 */
13b18c29 1947int proc_get_processor_id(void *proc, u32 * proc_id)
7d55524d
ORL
1948{
1949 int status = 0;
e6890692 1950 struct proc_object *p_proc_object = (struct proc_object *)proc;
7d55524d
ORL
1951
1952 if (p_proc_object)
13b18c29 1953 *proc_id = p_proc_object->processor_id;
7d55524d
ORL
1954 else
1955 status = -EFAULT;
1956
1957 return status;
1958}