]>
Commit | Line | Data |
---|---|---|
7d55524d ORL |
1 | /* |
2 | * proc.c | |
3 | * | |
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | |
5 | * | |
6 | * Processor interface at the driver level. | |
7 | * | |
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | |
9 | * | |
10 | * This package is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License version 2 as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | |
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | |
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | |
17 | */ | |
18 | ||
19 | /* ------------------------------------ Host OS */ | |
20 | #include <linux/dma-mapping.h> | |
21 | #include <linux/scatterlist.h> | |
22 | #include <dspbridge/host_os.h> | |
23 | ||
24 | /* ----------------------------------- DSP/BIOS Bridge */ | |
25 | #include <dspbridge/std.h> | |
26 | #include <dspbridge/dbdefs.h> | |
27 | ||
28 | /* ----------------------------------- Trace & Debug */ | |
29 | #include <dspbridge/dbc.h> | |
30 | ||
31 | /* ----------------------------------- OS Adaptation Layer */ | |
32 | #include <dspbridge/cfg.h> | |
33 | #include <dspbridge/list.h> | |
34 | #include <dspbridge/ntfy.h> | |
35 | #include <dspbridge/sync.h> | |
36 | /* ----------------------------------- Bridge Driver */ | |
37 | #include <dspbridge/dspdefs.h> | |
38 | #include <dspbridge/dspdeh.h> | |
39 | /* ----------------------------------- Platform Manager */ | |
40 | #include <dspbridge/cod.h> | |
41 | #include <dspbridge/dev.h> | |
42 | #include <dspbridge/procpriv.h> | |
43 | #include <dspbridge/dmm.h> | |
44 | ||
45 | /* ----------------------------------- Resource Manager */ | |
46 | #include <dspbridge/mgr.h> | |
47 | #include <dspbridge/node.h> | |
48 | #include <dspbridge/nldr.h> | |
49 | #include <dspbridge/rmm.h> | |
50 | ||
51 | /* ----------------------------------- Others */ | |
52 | #include <dspbridge/dbdcd.h> | |
53 | #include <dspbridge/msg.h> | |
54 | #include <dspbridge/dspioctl.h> | |
55 | #include <dspbridge/drv.h> | |
56 | ||
57 | /* ----------------------------------- This */ | |
58 | #include <dspbridge/proc.h> | |
59 | #include <dspbridge/pwr.h> | |
60 | ||
61 | #include <dspbridge/resourcecleanup.h> | |
62 | /* ----------------------------------- Defines, Data Structures, Typedefs */ | |
63 | #define MAXCMDLINELEN 255 | |
64 | #define PROC_ENVPROCID "PROC_ID=%d" | |
65 | #define MAXPROCIDLEN (8 + 5) | |
66 | #define PROC_DFLT_TIMEOUT 10000 /* Time out in milliseconds */ | |
67 | #define PWR_TIMEOUT 500 /* Sleep/wake timout in msec */ | |
68 | #define EXTEND "_EXT_END" /* Extmem end addr in DSP binary */ | |
69 | ||
70 | #define DSP_CACHE_LINE 128 | |
71 | ||
72 | #define BUFMODE_MASK (3 << 14) | |
73 | ||
74 | /* Buffer modes from DSP perspective */ | |
75 | #define RBUF 0x4000 /* Input buffer */ | |
76 | #define WBUF 0x8000 /* Output Buffer */ | |
77 | ||
78 | extern struct device *bridge; | |
79 | ||
80 | /* ----------------------------------- Globals */ | |
81 | ||
82 | /* The proc_object structure. */ | |
83 | struct proc_object { | |
84 | struct list_head link; /* Link to next proc_object */ | |
85 | struct dev_object *hdev_obj; /* Device this PROC represents */ | |
86 | u32 process; /* Process owning this Processor */ | |
87 | struct mgr_object *hmgr_obj; /* Manager Object Handle */ | |
88 | u32 attach_count; /* Processor attach count */ | |
89 | u32 processor_id; /* Processor number */ | |
90 | u32 utimeout; /* Time out count */ | |
91 | enum dsp_procstate proc_state; /* Processor state */ | |
92 | u32 ul_unit; /* DDSP unit number */ | |
93 | bool is_already_attached; /* | |
94 | * True if the Device below has | |
95 | * GPP Client attached | |
96 | */ | |
97 | struct ntfy_object *ntfy_obj; /* Manages notifications */ | |
98 | /* Bridge Context Handle */ | |
99 | struct bridge_dev_context *hbridge_context; | |
100 | /* Function interface to Bridge driver */ | |
101 | struct bridge_drv_interface *intf_fxns; | |
102 | char *psz_last_coff; | |
103 | struct list_head proc_list; | |
104 | }; | |
105 | ||
106 | static u32 refs; | |
107 | ||
108 | DEFINE_MUTEX(proc_lock); /* For critical sections */ | |
109 | ||
110 | /* ----------------------------------- Function Prototypes */ | |
111 | static int proc_monitor(struct proc_object *hprocessor); | |
112 | static s32 get_envp_count(char **envp); | |
113 | static char **prepend_envp(char **new_envp, char **envp, s32 envp_elems, | |
114 | s32 cnew_envp, char *szVar); | |
115 | ||
116 | /* remember mapping information */ | |
117 | static struct dmm_map_object *add_mapping_info(struct process_context *pr_ctxt, | |
118 | u32 mpu_addr, u32 dsp_addr, u32 size) | |
119 | { | |
120 | struct dmm_map_object *map_obj; | |
121 | ||
122 | u32 num_usr_pgs = size / PG_SIZE4K; | |
123 | ||
124 | pr_debug("%s: adding map info: mpu_addr 0x%x virt 0x%x size 0x%x\n", | |
125 | __func__, mpu_addr, | |
126 | dsp_addr, size); | |
127 | ||
128 | map_obj = kzalloc(sizeof(struct dmm_map_object), GFP_KERNEL); | |
129 | if (!map_obj) { | |
130 | pr_err("%s: kzalloc failed\n", __func__); | |
131 | return NULL; | |
132 | } | |
133 | INIT_LIST_HEAD(&map_obj->link); | |
134 | ||
135 | map_obj->pages = kcalloc(num_usr_pgs, sizeof(struct page *), | |
136 | GFP_KERNEL); | |
137 | if (!map_obj->pages) { | |
138 | pr_err("%s: kzalloc failed\n", __func__); | |
139 | kfree(map_obj); | |
140 | return NULL; | |
141 | } | |
142 | ||
143 | map_obj->mpu_addr = mpu_addr; | |
144 | map_obj->dsp_addr = dsp_addr; | |
145 | map_obj->size = size; | |
146 | map_obj->num_usr_pgs = num_usr_pgs; | |
147 | ||
148 | spin_lock(&pr_ctxt->dmm_map_lock); | |
149 | list_add(&map_obj->link, &pr_ctxt->dmm_map_list); | |
150 | spin_unlock(&pr_ctxt->dmm_map_lock); | |
151 | ||
152 | return map_obj; | |
153 | } | |
154 | ||
155 | static int match_exact_map_obj(struct dmm_map_object *map_obj, | |
156 | u32 dsp_addr, u32 size) | |
157 | { | |
158 | if (map_obj->dsp_addr == dsp_addr && map_obj->size != size) | |
159 | pr_err("%s: addr match (0x%x), size don't (0x%x != 0x%x)\n", | |
160 | __func__, dsp_addr, map_obj->size, size); | |
161 | ||
162 | return map_obj->dsp_addr == dsp_addr && | |
163 | map_obj->size == size; | |
164 | } | |
165 | ||
166 | static void remove_mapping_information(struct process_context *pr_ctxt, | |
167 | u32 dsp_addr, u32 size) | |
168 | { | |
169 | struct dmm_map_object *map_obj; | |
170 | ||
171 | pr_debug("%s: looking for virt 0x%x size 0x%x\n", __func__, | |
172 | dsp_addr, size); | |
173 | ||
174 | spin_lock(&pr_ctxt->dmm_map_lock); | |
175 | list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) { | |
176 | pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n", | |
177 | __func__, | |
178 | map_obj->mpu_addr, | |
179 | map_obj->dsp_addr, | |
180 | map_obj->size); | |
181 | ||
182 | if (match_exact_map_obj(map_obj, dsp_addr, size)) { | |
183 | pr_debug("%s: match, deleting map info\n", __func__); | |
184 | list_del(&map_obj->link); | |
185 | kfree(map_obj->dma_info.sg); | |
186 | kfree(map_obj->pages); | |
187 | kfree(map_obj); | |
188 | goto out; | |
189 | } | |
190 | pr_debug("%s: candidate didn't match\n", __func__); | |
191 | } | |
192 | ||
193 | pr_err("%s: failed to find given map info\n", __func__); | |
194 | out: | |
195 | spin_unlock(&pr_ctxt->dmm_map_lock); | |
196 | } | |
197 | ||
198 | static int match_containing_map_obj(struct dmm_map_object *map_obj, | |
199 | u32 mpu_addr, u32 size) | |
200 | { | |
201 | u32 map_obj_end = map_obj->mpu_addr + map_obj->size; | |
202 | ||
203 | return mpu_addr >= map_obj->mpu_addr && | |
204 | mpu_addr + size <= map_obj_end; | |
205 | } | |
206 | ||
207 | static struct dmm_map_object *find_containing_mapping( | |
208 | struct process_context *pr_ctxt, | |
209 | u32 mpu_addr, u32 size) | |
210 | { | |
211 | struct dmm_map_object *map_obj; | |
212 | pr_debug("%s: looking for mpu_addr 0x%x size 0x%x\n", __func__, | |
213 | mpu_addr, size); | |
214 | ||
215 | spin_lock(&pr_ctxt->dmm_map_lock); | |
216 | list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) { | |
217 | pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n", | |
218 | __func__, | |
219 | map_obj->mpu_addr, | |
220 | map_obj->dsp_addr, | |
221 | map_obj->size); | |
222 | if (match_containing_map_obj(map_obj, mpu_addr, size)) { | |
223 | pr_debug("%s: match!\n", __func__); | |
224 | goto out; | |
225 | } | |
226 | ||
227 | pr_debug("%s: no match!\n", __func__); | |
228 | } | |
229 | ||
230 | map_obj = NULL; | |
231 | out: | |
232 | spin_unlock(&pr_ctxt->dmm_map_lock); | |
233 | return map_obj; | |
234 | } | |
235 | ||
236 | static int find_first_page_in_cache(struct dmm_map_object *map_obj, | |
237 | unsigned long mpu_addr) | |
238 | { | |
239 | u32 mapped_base_page = map_obj->mpu_addr >> PAGE_SHIFT; | |
240 | u32 requested_base_page = mpu_addr >> PAGE_SHIFT; | |
241 | int pg_index = requested_base_page - mapped_base_page; | |
242 | ||
243 | if (pg_index < 0 || pg_index >= map_obj->num_usr_pgs) { | |
244 | pr_err("%s: failed (got %d)\n", __func__, pg_index); | |
245 | return -1; | |
246 | } | |
247 | ||
248 | pr_debug("%s: first page is %d\n", __func__, pg_index); | |
249 | return pg_index; | |
250 | } | |
251 | ||
252 | static inline struct page *get_mapping_page(struct dmm_map_object *map_obj, | |
253 | int pg_i) | |
254 | { | |
255 | pr_debug("%s: looking for pg_i %d, num_usr_pgs: %d\n", __func__, | |
256 | pg_i, map_obj->num_usr_pgs); | |
257 | ||
258 | if (pg_i < 0 || pg_i >= map_obj->num_usr_pgs) { | |
259 | pr_err("%s: requested pg_i %d is out of mapped range\n", | |
260 | __func__, pg_i); | |
261 | return NULL; | |
262 | } | |
263 | ||
264 | return map_obj->pages[pg_i]; | |
265 | } | |
266 | ||
267 | /* | |
268 | * ======== proc_attach ======== | |
269 | * Purpose: | |
270 | * Prepare for communication with a particular DSP processor, and return | |
271 | * a handle to the processor object. | |
272 | */ | |
273 | int | |
274 | proc_attach(u32 processor_id, | |
275 | OPTIONAL CONST struct dsp_processorattrin *attr_in, | |
276 | void **ph_processor, struct process_context *pr_ctxt) | |
277 | { | |
278 | int status = 0; | |
279 | struct dev_object *hdev_obj; | |
280 | struct proc_object *p_proc_object = NULL; | |
281 | struct mgr_object *hmgr_obj = NULL; | |
282 | struct drv_object *hdrv_obj = NULL; | |
283 | u8 dev_type; | |
284 | ||
285 | DBC_REQUIRE(refs > 0); | |
286 | DBC_REQUIRE(ph_processor != NULL); | |
287 | ||
288 | if (pr_ctxt->hprocessor) { | |
289 | *ph_processor = pr_ctxt->hprocessor; | |
290 | return status; | |
291 | } | |
292 | ||
293 | /* Get the Driver and Manager Object Handles */ | |
294 | status = cfg_get_object((u32 *) &hdrv_obj, REG_DRV_OBJECT); | |
295 | if (DSP_SUCCEEDED(status)) | |
296 | status = cfg_get_object((u32 *) &hmgr_obj, REG_MGR_OBJECT); | |
297 | ||
298 | if (DSP_SUCCEEDED(status)) { | |
299 | /* Get the Device Object */ | |
300 | status = drv_get_dev_object(processor_id, hdrv_obj, &hdev_obj); | |
301 | } | |
302 | if (DSP_SUCCEEDED(status)) | |
303 | status = dev_get_dev_type(hdev_obj, &dev_type); | |
304 | ||
305 | if (DSP_FAILED(status)) | |
306 | goto func_end; | |
307 | ||
308 | /* If we made it this far, create the Proceesor object: */ | |
309 | p_proc_object = kzalloc(sizeof(struct proc_object), GFP_KERNEL); | |
310 | /* Fill out the Processor Object: */ | |
311 | if (p_proc_object == NULL) { | |
312 | status = -ENOMEM; | |
313 | goto func_end; | |
314 | } | |
315 | p_proc_object->hdev_obj = hdev_obj; | |
316 | p_proc_object->hmgr_obj = hmgr_obj; | |
317 | p_proc_object->processor_id = dev_type; | |
318 | /* Store TGID instead of process handle */ | |
319 | p_proc_object->process = current->tgid; | |
320 | ||
321 | INIT_LIST_HEAD(&p_proc_object->proc_list); | |
322 | ||
323 | if (attr_in) | |
324 | p_proc_object->utimeout = attr_in->utimeout; | |
325 | else | |
326 | p_proc_object->utimeout = PROC_DFLT_TIMEOUT; | |
327 | ||
328 | status = dev_get_intf_fxns(hdev_obj, &p_proc_object->intf_fxns); | |
329 | if (DSP_SUCCEEDED(status)) { | |
330 | status = dev_get_bridge_context(hdev_obj, | |
331 | &p_proc_object->hbridge_context); | |
332 | if (DSP_FAILED(status)) | |
333 | kfree(p_proc_object); | |
334 | } else | |
335 | kfree(p_proc_object); | |
336 | ||
337 | if (DSP_FAILED(status)) | |
338 | goto func_end; | |
339 | ||
340 | /* Create the Notification Object */ | |
341 | /* This is created with no event mask, no notify mask | |
342 | * and no valid handle to the notification. They all get | |
343 | * filled up when proc_register_notify is called */ | |
344 | p_proc_object->ntfy_obj = kmalloc(sizeof(struct ntfy_object), | |
345 | GFP_KERNEL); | |
346 | if (p_proc_object->ntfy_obj) | |
347 | ntfy_init(p_proc_object->ntfy_obj); | |
348 | else | |
349 | status = -ENOMEM; | |
350 | ||
351 | if (DSP_SUCCEEDED(status)) { | |
352 | /* Insert the Processor Object into the DEV List. | |
353 | * Return handle to this Processor Object: | |
354 | * Find out if the Device is already attached to a | |
355 | * Processor. If so, return AlreadyAttached status */ | |
356 | lst_init_elem(&p_proc_object->link); | |
357 | status = dev_insert_proc_object(p_proc_object->hdev_obj, | |
358 | (u32) p_proc_object, | |
359 | &p_proc_object-> | |
360 | is_already_attached); | |
361 | if (DSP_SUCCEEDED(status)) { | |
362 | if (p_proc_object->is_already_attached) | |
363 | status = 0; | |
364 | } else { | |
365 | if (p_proc_object->ntfy_obj) { | |
366 | ntfy_delete(p_proc_object->ntfy_obj); | |
367 | kfree(p_proc_object->ntfy_obj); | |
368 | } | |
369 | ||
370 | kfree(p_proc_object); | |
371 | } | |
372 | if (DSP_SUCCEEDED(status)) { | |
373 | *ph_processor = (void *)p_proc_object; | |
374 | pr_ctxt->hprocessor = *ph_processor; | |
375 | (void)proc_notify_clients(p_proc_object, | |
376 | DSP_PROCESSORATTACH); | |
377 | } | |
378 | } else { | |
379 | /* Don't leak memory if DSP_FAILED */ | |
380 | kfree(p_proc_object); | |
381 | } | |
382 | func_end: | |
383 | DBC_ENSURE((status == -EPERM && *ph_processor == NULL) || | |
384 | (DSP_SUCCEEDED(status) && p_proc_object) || | |
385 | (status == 0 && p_proc_object)); | |
386 | ||
387 | return status; | |
388 | } | |
389 | ||
390 | static int get_exec_file(struct cfg_devnode *dev_node_obj, | |
391 | struct dev_object *hdev_obj, | |
392 | u32 size, char *execFile) | |
393 | { | |
394 | u8 dev_type; | |
395 | s32 len; | |
396 | ||
397 | dev_get_dev_type(hdev_obj, (u8 *) &dev_type); | |
398 | if (dev_type == DSP_UNIT) { | |
399 | return cfg_get_exec_file(dev_node_obj, size, execFile); | |
400 | } else if (dev_type == IVA_UNIT) { | |
401 | if (iva_img) { | |
402 | len = strlen(iva_img); | |
403 | strncpy(execFile, iva_img, len + 1); | |
404 | return 0; | |
405 | } | |
406 | } | |
407 | return -ENOENT; | |
408 | } | |
409 | ||
410 | /* | |
411 | * ======== proc_auto_start ======== = | |
412 | * Purpose: | |
413 | * A Particular device gets loaded with the default image | |
414 | * if the AutoStart flag is set. | |
415 | * Parameters: | |
416 | * hdev_obj: Handle to the Device | |
417 | * Returns: | |
418 | * 0: On Successful Loading | |
419 | * -EPERM General Failure | |
420 | * Requires: | |
421 | * hdev_obj != NULL | |
422 | * Ensures: | |
423 | */ | |
424 | int proc_auto_start(struct cfg_devnode *dev_node_obj, | |
425 | struct dev_object *hdev_obj) | |
426 | { | |
427 | int status = -EPERM; | |
428 | struct proc_object *p_proc_object; | |
429 | char sz_exec_file[MAXCMDLINELEN]; | |
430 | char *argv[2]; | |
431 | struct mgr_object *hmgr_obj = NULL; | |
432 | u8 dev_type; | |
433 | ||
434 | DBC_REQUIRE(refs > 0); | |
435 | DBC_REQUIRE(dev_node_obj != NULL); | |
436 | DBC_REQUIRE(hdev_obj != NULL); | |
437 | ||
438 | /* Create a Dummy PROC Object */ | |
439 | status = cfg_get_object((u32 *) &hmgr_obj, REG_MGR_OBJECT); | |
440 | if (DSP_FAILED(status)) | |
441 | goto func_end; | |
442 | ||
443 | p_proc_object = kzalloc(sizeof(struct proc_object), GFP_KERNEL); | |
444 | if (p_proc_object == NULL) { | |
445 | status = -ENOMEM; | |
446 | goto func_end; | |
447 | } | |
448 | p_proc_object->hdev_obj = hdev_obj; | |
449 | p_proc_object->hmgr_obj = hmgr_obj; | |
450 | status = dev_get_intf_fxns(hdev_obj, &p_proc_object->intf_fxns); | |
451 | if (DSP_SUCCEEDED(status)) | |
452 | status = dev_get_bridge_context(hdev_obj, | |
453 | &p_proc_object->hbridge_context); | |
454 | if (DSP_FAILED(status)) | |
455 | goto func_cont; | |
456 | ||
457 | /* Stop the Device, put it into standby mode */ | |
458 | status = proc_stop(p_proc_object); | |
459 | ||
460 | if (DSP_FAILED(status)) | |
461 | goto func_cont; | |
462 | ||
463 | /* Get the default executable for this board... */ | |
464 | dev_get_dev_type(hdev_obj, (u8 *) &dev_type); | |
465 | p_proc_object->processor_id = dev_type; | |
466 | status = get_exec_file(dev_node_obj, hdev_obj, sizeof(sz_exec_file), | |
467 | sz_exec_file); | |
468 | if (DSP_SUCCEEDED(status)) { | |
469 | argv[0] = sz_exec_file; | |
470 | argv[1] = NULL; | |
471 | /* ...and try to load it: */ | |
472 | status = proc_load(p_proc_object, 1, (CONST char **)argv, NULL); | |
473 | if (DSP_SUCCEEDED(status)) | |
474 | status = proc_start(p_proc_object); | |
475 | } | |
476 | kfree(p_proc_object->psz_last_coff); | |
477 | p_proc_object->psz_last_coff = NULL; | |
478 | func_cont: | |
479 | kfree(p_proc_object); | |
480 | func_end: | |
481 | return status; | |
482 | } | |
483 | ||
484 | /* | |
485 | * ======== proc_ctrl ======== | |
486 | * Purpose: | |
487 | * Pass control information to the GPP device driver managing the | |
488 | * DSP processor. | |
489 | * | |
490 | * This will be an OEM-only function, and not part of the DSP/BIOS Bridge | |
491 | * application developer's API. | |
492 | * Call the bridge_dev_ctrl fxn with the Argument. This is a Synchronous | |
493 | * Operation. arg can be null. | |
494 | */ | |
495 | int proc_ctrl(void *hprocessor, u32 dw_cmd, IN struct dsp_cbdata * arg) | |
496 | { | |
497 | int status = 0; | |
498 | struct proc_object *p_proc_object = hprocessor; | |
499 | u32 timeout = 0; | |
500 | ||
501 | DBC_REQUIRE(refs > 0); | |
502 | ||
503 | if (p_proc_object) { | |
504 | /* intercept PWR deep sleep command */ | |
505 | if (dw_cmd == BRDIOCTL_DEEPSLEEP) { | |
506 | timeout = arg->cb_data; | |
507 | status = pwr_sleep_dsp(PWR_DEEPSLEEP, timeout); | |
508 | } | |
509 | /* intercept PWR emergency sleep command */ | |
510 | else if (dw_cmd == BRDIOCTL_EMERGENCYSLEEP) { | |
511 | timeout = arg->cb_data; | |
512 | status = pwr_sleep_dsp(PWR_EMERGENCYDEEPSLEEP, timeout); | |
513 | } else if (dw_cmd == PWR_DEEPSLEEP) { | |
514 | /* timeout = arg->cb_data; */ | |
515 | status = pwr_sleep_dsp(PWR_DEEPSLEEP, timeout); | |
516 | } | |
517 | /* intercept PWR wake commands */ | |
518 | else if (dw_cmd == BRDIOCTL_WAKEUP) { | |
519 | timeout = arg->cb_data; | |
520 | status = pwr_wake_dsp(timeout); | |
521 | } else if (dw_cmd == PWR_WAKEUP) { | |
522 | /* timeout = arg->cb_data; */ | |
523 | status = pwr_wake_dsp(timeout); | |
524 | } else | |
525 | if (DSP_SUCCEEDED((*p_proc_object->intf_fxns->pfn_dev_cntrl) | |
526 | (p_proc_object->hbridge_context, dw_cmd, | |
527 | arg))) { | |
528 | status = 0; | |
529 | } else { | |
530 | status = -EPERM; | |
531 | } | |
532 | } else { | |
533 | status = -EFAULT; | |
534 | } | |
535 | ||
536 | return status; | |
537 | } | |
538 | ||
539 | /* | |
540 | * ======== proc_detach ======== | |
541 | * Purpose: | |
542 | * Destroys the Processor Object. Removes the notification from the Dev | |
543 | * List. | |
544 | */ | |
545 | int proc_detach(struct process_context *pr_ctxt) | |
546 | { | |
547 | int status = 0; | |
548 | struct proc_object *p_proc_object = NULL; | |
549 | ||
550 | DBC_REQUIRE(refs > 0); | |
551 | ||
552 | p_proc_object = (struct proc_object *)pr_ctxt->hprocessor; | |
553 | ||
554 | if (p_proc_object) { | |
555 | /* Notify the Client */ | |
556 | ntfy_notify(p_proc_object->ntfy_obj, DSP_PROCESSORDETACH); | |
557 | /* Remove the notification memory */ | |
558 | if (p_proc_object->ntfy_obj) { | |
559 | ntfy_delete(p_proc_object->ntfy_obj); | |
560 | kfree(p_proc_object->ntfy_obj); | |
561 | } | |
562 | ||
563 | kfree(p_proc_object->psz_last_coff); | |
564 | p_proc_object->psz_last_coff = NULL; | |
565 | /* Remove the Proc from the DEV List */ | |
566 | (void)dev_remove_proc_object(p_proc_object->hdev_obj, | |
567 | (u32) p_proc_object); | |
568 | /* Free the Processor Object */ | |
569 | kfree(p_proc_object); | |
570 | pr_ctxt->hprocessor = NULL; | |
571 | } else { | |
572 | status = -EFAULT; | |
573 | } | |
574 | ||
575 | return status; | |
576 | } | |
577 | ||
578 | /* | |
579 | * ======== proc_enum_nodes ======== | |
580 | * Purpose: | |
581 | * Enumerate and get configuration information about nodes allocated | |
582 | * on a DSP processor. | |
583 | */ | |
584 | int proc_enum_nodes(void *hprocessor, void **node_tab, | |
585 | IN u32 node_tab_size, OUT u32 *pu_num_nodes, | |
586 | OUT u32 *pu_allocated) | |
587 | { | |
588 | int status = -EPERM; | |
589 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | |
590 | struct node_mgr *hnode_mgr = NULL; | |
591 | ||
592 | DBC_REQUIRE(refs > 0); | |
593 | DBC_REQUIRE(node_tab != NULL || node_tab_size == 0); | |
594 | DBC_REQUIRE(pu_num_nodes != NULL); | |
595 | DBC_REQUIRE(pu_allocated != NULL); | |
596 | ||
597 | if (p_proc_object) { | |
598 | if (DSP_SUCCEEDED(dev_get_node_manager(p_proc_object->hdev_obj, | |
599 | &hnode_mgr))) { | |
600 | if (hnode_mgr) { | |
601 | status = node_enum_nodes(hnode_mgr, node_tab, | |
602 | node_tab_size, | |
603 | pu_num_nodes, | |
604 | pu_allocated); | |
605 | } | |
606 | } | |
607 | } else { | |
608 | status = -EFAULT; | |
609 | } | |
610 | ||
611 | return status; | |
612 | } | |
613 | ||
614 | /* Cache operation against kernel address instead of users */ | |
615 | static int build_dma_sg(struct dmm_map_object *map_obj, unsigned long start, | |
616 | ssize_t len, int pg_i) | |
617 | { | |
618 | struct page *page; | |
619 | unsigned long offset; | |
620 | ssize_t rest; | |
621 | int ret = 0, i = 0; | |
622 | struct scatterlist *sg = map_obj->dma_info.sg; | |
623 | ||
624 | while (len) { | |
625 | page = get_mapping_page(map_obj, pg_i); | |
626 | if (!page) { | |
627 | pr_err("%s: no page for %08lx\n", __func__, start); | |
628 | ret = -EINVAL; | |
629 | goto out; | |
630 | } else if (IS_ERR(page)) { | |
631 | pr_err("%s: err page for %08lx(%lu)\n", __func__, start, | |
632 | PTR_ERR(page)); | |
633 | ret = PTR_ERR(page); | |
634 | goto out; | |
635 | } | |
636 | ||
637 | offset = start & ~PAGE_MASK; | |
638 | rest = min_t(ssize_t, PAGE_SIZE - offset, len); | |
639 | ||
640 | sg_set_page(&sg[i], page, rest, offset); | |
641 | ||
642 | len -= rest; | |
643 | start += rest; | |
644 | pg_i++, i++; | |
645 | } | |
646 | ||
647 | if (i != map_obj->dma_info.num_pages) { | |
648 | pr_err("%s: bad number of sg iterations\n", __func__); | |
649 | ret = -EFAULT; | |
650 | goto out; | |
651 | } | |
652 | ||
653 | out: | |
654 | return ret; | |
655 | } | |
656 | ||
657 | static int memory_regain_ownership(struct dmm_map_object *map_obj, | |
658 | unsigned long start, ssize_t len, enum dma_data_direction dir) | |
659 | { | |
660 | int ret = 0; | |
661 | unsigned long first_data_page = start >> PAGE_SHIFT; | |
662 | unsigned long last_data_page = ((u32)(start + len - 1) >> PAGE_SHIFT); | |
663 | /* calculating the number of pages this area spans */ | |
664 | unsigned long num_pages = last_data_page - first_data_page + 1; | |
665 | struct bridge_dma_map_info *dma_info = &map_obj->dma_info; | |
666 | ||
667 | if (!dma_info->sg) | |
668 | goto out; | |
669 | ||
670 | if (dma_info->dir != dir || dma_info->num_pages != num_pages) { | |
671 | pr_err("%s: dma info doesn't match given params\n", __func__); | |
672 | return -EINVAL; | |
673 | } | |
674 | ||
675 | dma_unmap_sg(bridge, dma_info->sg, num_pages, dma_info->dir); | |
676 | ||
677 | pr_debug("%s: dma_map_sg unmapped\n", __func__); | |
678 | ||
679 | kfree(dma_info->sg); | |
680 | ||
681 | map_obj->dma_info.sg = NULL; | |
682 | ||
683 | out: | |
684 | return ret; | |
685 | } | |
686 | ||
687 | /* Cache operation against kernel address instead of users */ | |
688 | static int memory_give_ownership(struct dmm_map_object *map_obj, | |
689 | unsigned long start, ssize_t len, enum dma_data_direction dir) | |
690 | { | |
691 | int pg_i, ret, sg_num; | |
692 | struct scatterlist *sg; | |
693 | unsigned long first_data_page = start >> PAGE_SHIFT; | |
694 | unsigned long last_data_page = ((u32)(start + len - 1) >> PAGE_SHIFT); | |
695 | /* calculating the number of pages this area spans */ | |
696 | unsigned long num_pages = last_data_page - first_data_page + 1; | |
697 | ||
698 | pg_i = find_first_page_in_cache(map_obj, start); | |
699 | if (pg_i < 0) { | |
700 | pr_err("%s: failed to find first page in cache\n", __func__); | |
701 | ret = -EINVAL; | |
702 | goto out; | |
703 | } | |
704 | ||
705 | sg = kcalloc(num_pages, sizeof(*sg), GFP_KERNEL); | |
706 | if (!sg) { | |
707 | pr_err("%s: kcalloc failed\n", __func__); | |
708 | ret = -ENOMEM; | |
709 | goto out; | |
710 | } | |
711 | ||
712 | sg_init_table(sg, num_pages); | |
713 | ||
714 | /* cleanup a previous sg allocation */ | |
715 | /* this may happen if application doesn't signal for e/o DMA */ | |
716 | kfree(map_obj->dma_info.sg); | |
717 | ||
718 | map_obj->dma_info.sg = sg; | |
719 | map_obj->dma_info.dir = dir; | |
720 | map_obj->dma_info.num_pages = num_pages; | |
721 | ||
722 | ret = build_dma_sg(map_obj, start, len, pg_i); | |
723 | if (ret) | |
724 | goto kfree_sg; | |
725 | ||
726 | sg_num = dma_map_sg(bridge, sg, num_pages, dir); | |
727 | if (sg_num < 1) { | |
728 | pr_err("%s: dma_map_sg failed: %d\n", __func__, sg_num); | |
729 | ret = -EFAULT; | |
730 | goto kfree_sg; | |
731 | } | |
732 | ||
733 | pr_debug("%s: dma_map_sg mapped %d elements\n", __func__, sg_num); | |
734 | map_obj->dma_info.sg_num = sg_num; | |
735 | ||
736 | return 0; | |
737 | ||
738 | kfree_sg: | |
739 | kfree(sg); | |
740 | map_obj->dma_info.sg = NULL; | |
741 | out: | |
742 | return ret; | |
743 | } | |
744 | ||
745 | int proc_begin_dma(void *hprocessor, void *pmpu_addr, u32 ul_size, | |
746 | enum dma_data_direction dir) | |
747 | { | |
748 | /* Keep STATUS here for future additions to this function */ | |
749 | int status = 0; | |
750 | struct process_context *pr_ctxt = (struct process_context *) hprocessor; | |
751 | struct dmm_map_object *map_obj; | |
752 | ||
753 | DBC_REQUIRE(refs > 0); | |
754 | ||
755 | if (!pr_ctxt) { | |
756 | status = -EFAULT; | |
757 | goto err_out; | |
758 | } | |
759 | ||
760 | pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__, | |
761 | (u32)pmpu_addr, | |
762 | ul_size, dir); | |
763 | ||
764 | /* find requested memory are in cached mapping information */ | |
765 | map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size); | |
766 | if (!map_obj) { | |
767 | pr_err("%s: find_containing_mapping failed\n", __func__); | |
768 | status = -EFAULT; | |
769 | goto err_out; | |
770 | } | |
771 | ||
772 | if (memory_give_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) { | |
773 | pr_err("%s: InValid address parameters %p %x\n", | |
774 | __func__, pmpu_addr, ul_size); | |
775 | status = -EFAULT; | |
776 | } | |
777 | ||
778 | err_out: | |
779 | ||
780 | return status; | |
781 | } | |
782 | ||
783 | int proc_end_dma(void *hprocessor, void *pmpu_addr, u32 ul_size, | |
784 | enum dma_data_direction dir) | |
785 | { | |
786 | /* Keep STATUS here for future additions to this function */ | |
787 | int status = 0; | |
788 | struct process_context *pr_ctxt = (struct process_context *) hprocessor; | |
789 | struct dmm_map_object *map_obj; | |
790 | ||
791 | DBC_REQUIRE(refs > 0); | |
792 | ||
793 | if (!pr_ctxt) { | |
794 | status = -EFAULT; | |
795 | goto err_out; | |
796 | } | |
797 | ||
798 | pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__, | |
799 | (u32)pmpu_addr, | |
800 | ul_size, dir); | |
801 | ||
802 | /* find requested memory are in cached mapping information */ | |
803 | map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size); | |
804 | if (!map_obj) { | |
805 | pr_err("%s: find_containing_mapping failed\n", __func__); | |
806 | status = -EFAULT; | |
807 | goto err_out; | |
808 | } | |
809 | ||
810 | if (memory_regain_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) { | |
811 | pr_err("%s: InValid address parameters %p %x\n", | |
812 | __func__, pmpu_addr, ul_size); | |
813 | status = -EFAULT; | |
814 | goto err_out; | |
815 | } | |
816 | ||
817 | err_out: | |
818 | return status; | |
819 | } | |
820 | ||
821 | /* | |
822 | * ======== proc_flush_memory ======== | |
823 | * Purpose: | |
824 | * Flush cache | |
825 | */ | |
826 | int proc_flush_memory(void *hprocessor, void *pmpu_addr, | |
827 | u32 ul_size, u32 ul_flags) | |
828 | { | |
829 | enum dma_data_direction dir = DMA_BIDIRECTIONAL; | |
830 | ||
831 | return proc_begin_dma(hprocessor, pmpu_addr, ul_size, dir); | |
832 | } | |
833 | ||
834 | /* | |
835 | * ======== proc_invalidate_memory ======== | |
836 | * Purpose: | |
837 | * Invalidates the memory specified | |
838 | */ | |
839 | int proc_invalidate_memory(void *hprocessor, void *pmpu_addr, u32 size) | |
840 | { | |
841 | enum dma_data_direction dir = DMA_FROM_DEVICE; | |
842 | ||
843 | return proc_begin_dma(hprocessor, pmpu_addr, size, dir); | |
844 | } | |
845 | ||
846 | /* | |
847 | * ======== proc_get_resource_info ======== | |
848 | * Purpose: | |
849 | * Enumerate the resources currently available on a processor. | |
850 | */ | |
851 | int proc_get_resource_info(void *hprocessor, u32 resource_type, | |
852 | OUT struct dsp_resourceinfo *resource_info, | |
853 | u32 resource_info_size) | |
854 | { | |
855 | int status = -EPERM; | |
856 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | |
857 | struct node_mgr *hnode_mgr = NULL; | |
858 | struct nldr_object *nldr_obj = NULL; | |
859 | struct rmm_target_obj *rmm = NULL; | |
860 | struct io_mgr *hio_mgr = NULL; /* IO manager handle */ | |
861 | ||
862 | DBC_REQUIRE(refs > 0); | |
863 | DBC_REQUIRE(resource_info != NULL); | |
864 | DBC_REQUIRE(resource_info_size >= sizeof(struct dsp_resourceinfo)); | |
865 | ||
866 | if (!p_proc_object) { | |
867 | status = -EFAULT; | |
868 | goto func_end; | |
869 | } | |
870 | switch (resource_type) { | |
871 | case DSP_RESOURCE_DYNDARAM: | |
872 | case DSP_RESOURCE_DYNSARAM: | |
873 | case DSP_RESOURCE_DYNEXTERNAL: | |
874 | case DSP_RESOURCE_DYNSRAM: | |
875 | status = dev_get_node_manager(p_proc_object->hdev_obj, | |
876 | &hnode_mgr); | |
877 | if (!hnode_mgr) { | |
878 | status = -EFAULT; | |
879 | goto func_end; | |
880 | } | |
881 | ||
882 | status = node_get_nldr_obj(hnode_mgr, &nldr_obj); | |
883 | if (DSP_SUCCEEDED(status)) { | |
884 | status = nldr_get_rmm_manager(nldr_obj, &rmm); | |
885 | if (rmm) { | |
886 | if (!rmm_stat(rmm, | |
887 | (enum dsp_memtype)resource_type, | |
888 | (struct dsp_memstat *) | |
889 | &(resource_info->result. | |
890 | mem_stat))) | |
891 | status = -EINVAL; | |
892 | } else { | |
893 | status = -EFAULT; | |
894 | } | |
895 | } | |
896 | break; | |
897 | case DSP_RESOURCE_PROCLOAD: | |
898 | status = dev_get_io_mgr(p_proc_object->hdev_obj, &hio_mgr); | |
899 | if (hio_mgr) | |
900 | status = | |
901 | p_proc_object->intf_fxns-> | |
902 | pfn_io_get_proc_load(hio_mgr, | |
903 | (struct dsp_procloadstat *) | |
904 | &(resource_info->result. | |
905 | proc_load_stat)); | |
906 | else | |
907 | status = -EFAULT; | |
908 | break; | |
909 | default: | |
910 | status = -EPERM; | |
911 | break; | |
912 | } | |
913 | func_end: | |
914 | return status; | |
915 | } | |
916 | ||
917 | /* | |
918 | * ======== proc_exit ======== | |
919 | * Purpose: | |
920 | * Decrement reference count, and free resources when reference count is | |
921 | * 0. | |
922 | */ | |
923 | void proc_exit(void) | |
924 | { | |
925 | DBC_REQUIRE(refs > 0); | |
926 | ||
927 | refs--; | |
928 | ||
929 | DBC_ENSURE(refs >= 0); | |
930 | } | |
931 | ||
932 | /* | |
933 | * ======== proc_get_dev_object ======== | |
934 | * Purpose: | |
935 | * Return the Dev Object handle for a given Processor. | |
936 | * | |
937 | */ | |
938 | int proc_get_dev_object(void *hprocessor, | |
939 | struct dev_object **phDevObject) | |
940 | { | |
941 | int status = -EPERM; | |
942 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | |
943 | ||
944 | DBC_REQUIRE(refs > 0); | |
945 | DBC_REQUIRE(phDevObject != NULL); | |
946 | ||
947 | if (p_proc_object) { | |
948 | *phDevObject = p_proc_object->hdev_obj; | |
949 | status = 0; | |
950 | } else { | |
951 | *phDevObject = NULL; | |
952 | status = -EFAULT; | |
953 | } | |
954 | ||
955 | DBC_ENSURE((DSP_SUCCEEDED(status) && *phDevObject != NULL) || | |
956 | (DSP_FAILED(status) && *phDevObject == NULL)); | |
957 | ||
958 | return status; | |
959 | } | |
960 | ||
961 | /* | |
962 | * ======== proc_get_state ======== | |
963 | * Purpose: | |
964 | * Report the state of the specified DSP processor. | |
965 | */ | |
966 | int proc_get_state(void *hprocessor, | |
967 | OUT struct dsp_processorstate *proc_state_obj, | |
968 | u32 state_info_size) | |
969 | { | |
970 | int status = 0; | |
971 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | |
972 | int brd_status; | |
973 | struct deh_mgr *hdeh_mgr; | |
974 | ||
975 | DBC_REQUIRE(refs > 0); | |
976 | DBC_REQUIRE(proc_state_obj != NULL); | |
977 | DBC_REQUIRE(state_info_size >= sizeof(struct dsp_processorstate)); | |
978 | ||
979 | if (p_proc_object) { | |
980 | /* First, retrieve BRD state information */ | |
981 | status = (*p_proc_object->intf_fxns->pfn_brd_status) | |
982 | (p_proc_object->hbridge_context, &brd_status); | |
983 | if (DSP_SUCCEEDED(status)) { | |
984 | switch (brd_status) { | |
985 | case BRD_STOPPED: | |
986 | proc_state_obj->proc_state = PROC_STOPPED; | |
987 | break; | |
988 | case BRD_SLEEP_TRANSITION: | |
989 | case BRD_DSP_HIBERNATION: | |
990 | /* Fall through */ | |
991 | case BRD_RUNNING: | |
992 | proc_state_obj->proc_state = PROC_RUNNING; | |
993 | break; | |
994 | case BRD_LOADED: | |
995 | proc_state_obj->proc_state = PROC_LOADED; | |
996 | break; | |
997 | case BRD_ERROR: | |
998 | proc_state_obj->proc_state = PROC_ERROR; | |
999 | break; | |
1000 | default: | |
1001 | proc_state_obj->proc_state = 0xFF; | |
1002 | status = -EPERM; | |
1003 | break; | |
1004 | } | |
1005 | } | |
1006 | /* Next, retrieve error information, if any */ | |
1007 | status = dev_get_deh_mgr(p_proc_object->hdev_obj, &hdeh_mgr); | |
1008 | if (DSP_SUCCEEDED(status) && hdeh_mgr) | |
1009 | status = (*p_proc_object->intf_fxns->pfn_deh_get_info) | |
1010 | (hdeh_mgr, &(proc_state_obj->err_info)); | |
1011 | } else { | |
1012 | status = -EFAULT; | |
1013 | } | |
1014 | dev_dbg(bridge, "%s, results: status: 0x%x proc_state_obj: 0x%x\n", | |
1015 | __func__, status, proc_state_obj->proc_state); | |
1016 | return status; | |
1017 | } | |
1018 | ||
1019 | /* | |
1020 | * ======== proc_get_trace ======== | |
1021 | * Purpose: | |
1022 | * Retrieve the current contents of the trace buffer, located on the | |
1023 | * Processor. Predefined symbols for the trace buffer must have been | |
1024 | * configured into the DSP executable. | |
1025 | * Details: | |
1026 | * We support using the symbols SYS_PUTCBEG and SYS_PUTCEND to define a | |
1027 | * trace buffer, only. Treat it as an undocumented feature. | |
1028 | * This call is destructive, meaning the processor is placed in the monitor | |
1029 | * state as a result of this function. | |
1030 | */ | |
1031 | int proc_get_trace(void *hprocessor, u8 * pbuf, u32 max_size) | |
1032 | { | |
1033 | int status; | |
1034 | status = -ENOSYS; | |
1035 | return status; | |
1036 | } | |
1037 | ||
1038 | /* | |
1039 | * ======== proc_init ======== | |
1040 | * Purpose: | |
1041 | * Initialize PROC's private state, keeping a reference count on each call | |
1042 | */ | |
1043 | bool proc_init(void) | |
1044 | { | |
1045 | bool ret = true; | |
1046 | ||
1047 | DBC_REQUIRE(refs >= 0); | |
1048 | ||
1049 | if (ret) | |
1050 | refs++; | |
1051 | ||
1052 | DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0))); | |
1053 | ||
1054 | return ret; | |
1055 | } | |
1056 | ||
1057 | /* | |
1058 | * ======== proc_load ======== | |
1059 | * Purpose: | |
1060 | * Reset a processor and load a new base program image. | |
1061 | * This will be an OEM-only function, and not part of the DSP/BIOS Bridge | |
1062 | * application developer's API. | |
1063 | */ | |
1064 | int proc_load(void *hprocessor, IN CONST s32 argc_index, | |
1065 | IN CONST char **user_args, IN CONST char **user_envp) | |
1066 | { | |
1067 | int status = 0; | |
1068 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | |
1069 | struct io_mgr *hio_mgr; /* IO manager handle */ | |
1070 | struct msg_mgr *hmsg_mgr; | |
1071 | struct cod_manager *cod_mgr; /* Code manager handle */ | |
1072 | char *pargv0; /* temp argv[0] ptr */ | |
1073 | char **new_envp; /* Updated envp[] array. */ | |
1074 | char sz_proc_id[MAXPROCIDLEN]; /* Size of "PROC_ID=<n>" */ | |
1075 | s32 envp_elems; /* Num elements in envp[]. */ | |
1076 | s32 cnew_envp; /* " " in new_envp[] */ | |
1077 | s32 nproc_id = 0; /* Anticipate MP version. */ | |
1078 | struct dcd_manager *hdcd_handle; | |
1079 | struct dmm_object *dmm_mgr; | |
1080 | u32 dw_ext_end; | |
1081 | u32 proc_id; | |
1082 | int brd_state; | |
1083 | struct drv_data *drv_datap = dev_get_drvdata(bridge); | |
1084 | ||
1085 | #ifdef OPT_LOAD_TIME_INSTRUMENTATION | |
1086 | struct timeval tv1; | |
1087 | struct timeval tv2; | |
1088 | #endif | |
1089 | ||
1090 | #if defined(CONFIG_BRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ) | |
1091 | struct dspbridge_platform_data *pdata = | |
1092 | omap_dspbridge_dev->dev.platform_data; | |
1093 | #endif | |
1094 | ||
1095 | DBC_REQUIRE(refs > 0); | |
1096 | DBC_REQUIRE(argc_index > 0); | |
1097 | DBC_REQUIRE(user_args != NULL); | |
1098 | ||
1099 | #ifdef OPT_LOAD_TIME_INSTRUMENTATION | |
1100 | do_gettimeofday(&tv1); | |
1101 | #endif | |
1102 | if (!p_proc_object) { | |
1103 | status = -EFAULT; | |
1104 | goto func_end; | |
1105 | } | |
1106 | dev_get_cod_mgr(p_proc_object->hdev_obj, &cod_mgr); | |
1107 | if (!cod_mgr) { | |
1108 | status = -EPERM; | |
1109 | goto func_end; | |
1110 | } | |
1111 | status = proc_stop(hprocessor); | |
1112 | if (DSP_FAILED(status)) | |
1113 | goto func_end; | |
1114 | ||
1115 | /* Place the board in the monitor state. */ | |
1116 | status = proc_monitor(hprocessor); | |
1117 | if (DSP_FAILED(status)) | |
1118 | goto func_end; | |
1119 | ||
1120 | /* Save ptr to original argv[0]. */ | |
1121 | pargv0 = (char *)user_args[0]; | |
1122 | /*Prepend "PROC_ID=<nproc_id>"to envp array for target. */ | |
1123 | envp_elems = get_envp_count((char **)user_envp); | |
1124 | cnew_envp = (envp_elems ? (envp_elems + 1) : (envp_elems + 2)); | |
1125 | new_envp = kzalloc(cnew_envp * sizeof(char **), GFP_KERNEL); | |
1126 | if (new_envp) { | |
1127 | status = snprintf(sz_proc_id, MAXPROCIDLEN, PROC_ENVPROCID, | |
1128 | nproc_id); | |
1129 | if (status == -1) { | |
1130 | dev_dbg(bridge, "%s: Proc ID string overflow\n", | |
1131 | __func__); | |
1132 | status = -EPERM; | |
1133 | } else { | |
1134 | new_envp = | |
1135 | prepend_envp(new_envp, (char **)user_envp, | |
1136 | envp_elems, cnew_envp, sz_proc_id); | |
1137 | /* Get the DCD Handle */ | |
1138 | status = mgr_get_dcd_handle(p_proc_object->hmgr_obj, | |
1139 | (u32 *) &hdcd_handle); | |
1140 | if (DSP_SUCCEEDED(status)) { | |
1141 | /* Before proceeding with new load, | |
1142 | * check if a previously registered COFF | |
1143 | * exists. | |
1144 | * If yes, unregister nodes in previously | |
1145 | * registered COFF. If any error occurred, | |
1146 | * set previously registered COFF to NULL. */ | |
1147 | if (p_proc_object->psz_last_coff != NULL) { | |
1148 | status = | |
1149 | dcd_auto_unregister(hdcd_handle, | |
1150 | p_proc_object-> | |
1151 | psz_last_coff); | |
1152 | /* Regardless of auto unregister status, | |
1153 | * free previously allocated | |
1154 | * memory. */ | |
1155 | kfree(p_proc_object->psz_last_coff); | |
1156 | p_proc_object->psz_last_coff = NULL; | |
1157 | } | |
1158 | } | |
1159 | /* On success, do cod_open_base() */ | |
1160 | status = cod_open_base(cod_mgr, (char *)user_args[0], | |
1161 | COD_SYMB); | |
1162 | } | |
1163 | } else { | |
1164 | status = -ENOMEM; | |
1165 | } | |
1166 | if (DSP_SUCCEEDED(status)) { | |
1167 | /* Auto-register data base */ | |
1168 | /* Get the DCD Handle */ | |
1169 | status = mgr_get_dcd_handle(p_proc_object->hmgr_obj, | |
1170 | (u32 *) &hdcd_handle); | |
1171 | if (DSP_SUCCEEDED(status)) { | |
1172 | /* Auto register nodes in specified COFF | |
1173 | * file. If registration did not fail, | |
1174 | * (status = 0 or -EACCES) | |
1175 | * save the name of the COFF file for | |
1176 | * de-registration in the future. */ | |
1177 | status = | |
1178 | dcd_auto_register(hdcd_handle, | |
1179 | (char *)user_args[0]); | |
1180 | if (status == -EACCES) | |
1181 | status = 0; | |
1182 | ||
1183 | if (DSP_FAILED(status)) { | |
1184 | status = -EPERM; | |
1185 | } else { | |
1186 | DBC_ASSERT(p_proc_object->psz_last_coff == | |
1187 | NULL); | |
1188 | /* Allocate memory for pszLastCoff */ | |
1189 | p_proc_object->psz_last_coff = | |
1190 | kzalloc((strlen(user_args[0]) + | |
1191 | 1), GFP_KERNEL); | |
1192 | /* If memory allocated, save COFF file name */ | |
1193 | if (p_proc_object->psz_last_coff) { | |
1194 | strncpy(p_proc_object->psz_last_coff, | |
1195 | (char *)user_args[0], | |
1196 | (strlen((char *)user_args[0]) + | |
1197 | 1)); | |
1198 | } | |
1199 | } | |
1200 | } | |
1201 | } | |
1202 | /* Update shared memory address and size */ | |
1203 | if (DSP_SUCCEEDED(status)) { | |
1204 | /* Create the message manager. This must be done | |
1205 | * before calling the IOOnLoaded function. */ | |
1206 | dev_get_msg_mgr(p_proc_object->hdev_obj, &hmsg_mgr); | |
1207 | if (!hmsg_mgr) { | |
1208 | status = msg_create(&hmsg_mgr, p_proc_object->hdev_obj, | |
1209 | (msg_onexit) node_on_exit); | |
1210 | DBC_ASSERT(DSP_SUCCEEDED(status)); | |
1211 | dev_set_msg_mgr(p_proc_object->hdev_obj, hmsg_mgr); | |
1212 | } | |
1213 | } | |
1214 | if (DSP_SUCCEEDED(status)) { | |
1215 | /* Set the Device object's message manager */ | |
1216 | status = dev_get_io_mgr(p_proc_object->hdev_obj, &hio_mgr); | |
1217 | if (hio_mgr) | |
1218 | status = (*p_proc_object->intf_fxns->pfn_io_on_loaded) | |
1219 | (hio_mgr); | |
1220 | else | |
1221 | status = -EFAULT; | |
1222 | } | |
1223 | if (DSP_SUCCEEDED(status)) { | |
1224 | /* Now, attempt to load an exec: */ | |
1225 | ||
1226 | /* Boost the OPP level to Maximum level supported by baseport */ | |
1227 | #if defined(CONFIG_BRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ) | |
1228 | if (pdata->cpu_set_freq) | |
1229 | (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP5]); | |
1230 | #endif | |
1231 | status = cod_load_base(cod_mgr, argc_index, (char **)user_args, | |
1232 | dev_brd_write_fxn, | |
1233 | p_proc_object->hdev_obj, NULL); | |
1234 | if (DSP_FAILED(status)) { | |
1235 | if (status == -EBADF) { | |
1236 | dev_dbg(bridge, "%s: Failure to Load the EXE\n", | |
1237 | __func__); | |
1238 | } | |
1239 | if (status == -ESPIPE) { | |
1240 | pr_err("%s: Couldn't parse the file\n", | |
1241 | __func__); | |
1242 | } | |
1243 | } | |
1244 | /* Requesting the lowest opp supported */ | |
1245 | #if defined(CONFIG_BRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ) | |
1246 | if (pdata->cpu_set_freq) | |
1247 | (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP1]); | |
1248 | #endif | |
1249 | ||
1250 | } | |
1251 | if (DSP_SUCCEEDED(status)) { | |
1252 | /* Update the Processor status to loaded */ | |
1253 | status = (*p_proc_object->intf_fxns->pfn_brd_set_state) | |
1254 | (p_proc_object->hbridge_context, BRD_LOADED); | |
1255 | if (DSP_SUCCEEDED(status)) { | |
1256 | p_proc_object->proc_state = PROC_LOADED; | |
1257 | if (p_proc_object->ntfy_obj) | |
1258 | proc_notify_clients(p_proc_object, | |
1259 | DSP_PROCESSORSTATECHANGE); | |
1260 | } | |
1261 | } | |
1262 | if (DSP_SUCCEEDED(status)) { | |
1263 | status = proc_get_processor_id(hprocessor, &proc_id); | |
1264 | if (proc_id == DSP_UNIT) { | |
1265 | /* Use all available DSP address space after EXTMEM | |
1266 | * for DMM */ | |
1267 | if (DSP_SUCCEEDED(status)) | |
1268 | status = cod_get_sym_value(cod_mgr, EXTEND, | |
1269 | &dw_ext_end); | |
1270 | ||
1271 | /* Reset DMM structs and add an initial free chunk */ | |
1272 | if (DSP_SUCCEEDED(status)) { | |
1273 | status = | |
1274 | dev_get_dmm_mgr(p_proc_object->hdev_obj, | |
1275 | &dmm_mgr); | |
1276 | if (dmm_mgr) { | |
1277 | /* Set dw_ext_end to DMM START u8 | |
1278 | * address */ | |
1279 | dw_ext_end = | |
1280 | (dw_ext_end + 1) * DSPWORDSIZE; | |
1281 | /* DMM memory is from EXT_END */ | |
1282 | status = dmm_create_tables(dmm_mgr, | |
1283 | dw_ext_end, | |
1284 | DMMPOOLSIZE); | |
1285 | } else { | |
1286 | status = -EFAULT; | |
1287 | } | |
1288 | } | |
1289 | } | |
1290 | } | |
1291 | /* Restore the original argv[0] */ | |
1292 | kfree(new_envp); | |
1293 | user_args[0] = pargv0; | |
1294 | if (DSP_SUCCEEDED(status)) { | |
1295 | if (DSP_SUCCEEDED((*p_proc_object->intf_fxns->pfn_brd_status) | |
1296 | (p_proc_object->hbridge_context, &brd_state))) { | |
1297 | pr_info("%s: Processor Loaded %s\n", __func__, pargv0); | |
1298 | kfree(drv_datap->base_img); | |
1299 | drv_datap->base_img = kmalloc(strlen(pargv0) + 1, | |
1300 | GFP_KERNEL); | |
1301 | if (drv_datap->base_img) | |
1302 | strncpy(drv_datap->base_img, pargv0, | |
1303 | strlen(pargv0) + 1); | |
1304 | else | |
1305 | status = -ENOMEM; | |
1306 | DBC_ASSERT(brd_state == BRD_LOADED); | |
1307 | } | |
1308 | } | |
1309 | ||
1310 | func_end: | |
1311 | if (DSP_FAILED(status)) | |
1312 | pr_err("%s: Processor failed to load\n", __func__); | |
1313 | ||
1314 | DBC_ENSURE((DSP_SUCCEEDED(status) | |
1315 | && p_proc_object->proc_state == PROC_LOADED) | |
1316 | || DSP_FAILED(status)); | |
1317 | #ifdef OPT_LOAD_TIME_INSTRUMENTATION | |
1318 | do_gettimeofday(&tv2); | |
1319 | if (tv2.tv_usec < tv1.tv_usec) { | |
1320 | tv2.tv_usec += 1000000; | |
1321 | tv2.tv_sec--; | |
1322 | } | |
1323 | dev_dbg(bridge, "%s: time to load %d sec and %d usec\n", __func__, | |
1324 | tv2.tv_sec - tv1.tv_sec, tv2.tv_usec - tv1.tv_usec); | |
1325 | #endif | |
1326 | return status; | |
1327 | } | |
1328 | ||
1329 | /* | |
1330 | * ======== proc_map ======== | |
1331 | * Purpose: | |
1332 | * Maps a MPU buffer to DSP address space. | |
1333 | */ | |
1334 | int proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size, | |
1335 | void *req_addr, void **pp_map_addr, u32 ul_map_attr, | |
1336 | struct process_context *pr_ctxt) | |
1337 | { | |
1338 | u32 va_align; | |
1339 | u32 pa_align; | |
1340 | struct dmm_object *dmm_mgr; | |
1341 | u32 size_align; | |
1342 | int status = 0; | |
1343 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | |
1344 | struct dmm_map_object *map_obj; | |
1345 | u32 tmp_addr = 0; | |
1346 | ||
1347 | #ifdef CONFIG_BRIDGE_CACHE_LINE_CHECK | |
1348 | if ((ul_map_attr & BUFMODE_MASK) != RBUF) { | |
1349 | if (!IS_ALIGNED((u32)pmpu_addr, DSP_CACHE_LINE) || | |
1350 | !IS_ALIGNED(ul_size, DSP_CACHE_LINE)) { | |
1351 | pr_err("%s: not aligned: 0x%x (%d)\n", __func__, | |
1352 | (u32)pmpu_addr, ul_size); | |
1353 | return -EFAULT; | |
1354 | } | |
1355 | } | |
1356 | #endif | |
1357 | ||
1358 | /* Calculate the page-aligned PA, VA and size */ | |
1359 | va_align = PG_ALIGN_LOW((u32) req_addr, PG_SIZE4K); | |
1360 | pa_align = PG_ALIGN_LOW((u32) pmpu_addr, PG_SIZE4K); | |
1361 | size_align = PG_ALIGN_HIGH(ul_size + (u32) pmpu_addr - pa_align, | |
1362 | PG_SIZE4K); | |
1363 | ||
1364 | if (!p_proc_object) { | |
1365 | status = -EFAULT; | |
1366 | goto func_end; | |
1367 | } | |
1368 | /* Critical section */ | |
1369 | mutex_lock(&proc_lock); | |
1370 | dmm_get_handle(p_proc_object, &dmm_mgr); | |
1371 | if (dmm_mgr) | |
1372 | status = dmm_map_memory(dmm_mgr, va_align, size_align); | |
1373 | else | |
1374 | status = -EFAULT; | |
1375 | ||
1376 | /* Add mapping to the page tables. */ | |
1377 | if (DSP_SUCCEEDED(status)) { | |
1378 | ||
1379 | /* Mapped address = MSB of VA | LSB of PA */ | |
1380 | tmp_addr = (va_align | ((u32) pmpu_addr & (PG_SIZE4K - 1))); | |
1381 | /* mapped memory resource tracking */ | |
1382 | map_obj = add_mapping_info(pr_ctxt, pa_align, tmp_addr, | |
1383 | size_align); | |
1384 | if (!map_obj) | |
1385 | status = -ENOMEM; | |
1386 | else | |
1387 | status = (*p_proc_object->intf_fxns->pfn_brd_mem_map) | |
1388 | (p_proc_object->hbridge_context, pa_align, va_align, | |
1389 | size_align, ul_map_attr, map_obj->pages); | |
1390 | } | |
1391 | if (DSP_SUCCEEDED(status)) { | |
1392 | /* Mapped address = MSB of VA | LSB of PA */ | |
1393 | *pp_map_addr = (void *) tmp_addr; | |
1394 | } else { | |
1395 | remove_mapping_information(pr_ctxt, tmp_addr, size_align); | |
1396 | dmm_un_map_memory(dmm_mgr, va_align, &size_align); | |
1397 | } | |
1398 | mutex_unlock(&proc_lock); | |
1399 | ||
1400 | if (DSP_FAILED(status)) | |
1401 | goto func_end; | |
1402 | ||
1403 | func_end: | |
1404 | dev_dbg(bridge, "%s: hprocessor %p, pmpu_addr %p, ul_size %x, " | |
1405 | "req_addr %p, ul_map_attr %x, pp_map_addr %p, va_align %x, " | |
1406 | "pa_align %x, size_align %x status 0x%x\n", __func__, | |
1407 | hprocessor, pmpu_addr, ul_size, req_addr, ul_map_attr, | |
1408 | pp_map_addr, va_align, pa_align, size_align, status); | |
1409 | ||
1410 | return status; | |
1411 | } | |
1412 | ||
1413 | /* | |
1414 | * ======== proc_register_notify ======== | |
1415 | * Purpose: | |
1416 | * Register to be notified of specific processor events. | |
1417 | */ | |
1418 | int proc_register_notify(void *hprocessor, u32 event_mask, | |
1419 | u32 notify_type, struct dsp_notification | |
1420 | * hnotification) | |
1421 | { | |
1422 | int status = 0; | |
1423 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | |
1424 | struct deh_mgr *hdeh_mgr; | |
1425 | ||
1426 | DBC_REQUIRE(hnotification != NULL); | |
1427 | DBC_REQUIRE(refs > 0); | |
1428 | ||
1429 | /* Check processor handle */ | |
1430 | if (!p_proc_object) { | |
1431 | status = -EFAULT; | |
1432 | goto func_end; | |
1433 | } | |
1434 | /* Check if event mask is a valid processor related event */ | |
1435 | if (event_mask & ~(DSP_PROCESSORSTATECHANGE | DSP_PROCESSORATTACH | | |
1436 | DSP_PROCESSORDETACH | DSP_PROCESSORRESTART | | |
1437 | DSP_MMUFAULT | DSP_SYSERROR | DSP_PWRERROR | | |
1438 | DSP_WDTOVERFLOW)) | |
1439 | status = -EINVAL; | |
1440 | ||
1441 | /* Check if notify type is valid */ | |
1442 | if (notify_type != DSP_SIGNALEVENT) | |
1443 | status = -EINVAL; | |
1444 | ||
1445 | if (DSP_SUCCEEDED(status)) { | |
1446 | /* If event mask is not DSP_SYSERROR, DSP_MMUFAULT, | |
1447 | * or DSP_PWRERROR then register event immediately. */ | |
1448 | if (event_mask & | |
1449 | ~(DSP_SYSERROR | DSP_MMUFAULT | DSP_PWRERROR | | |
1450 | DSP_WDTOVERFLOW)) { | |
1451 | status = ntfy_register(p_proc_object->ntfy_obj, | |
1452 | hnotification, event_mask, | |
1453 | notify_type); | |
1454 | /* Special case alert, special case alert! | |
1455 | * If we're trying to *deregister* (i.e. event_mask | |
1456 | * is 0), a DSP_SYSERROR or DSP_MMUFAULT notification, | |
1457 | * we have to deregister with the DEH manager. | |
1458 | * There's no way to know, based on event_mask which | |
1459 | * manager the notification event was registered with, | |
1460 | * so if we're trying to deregister and ntfy_register | |
1461 | * failed, we'll give the deh manager a shot. | |
1462 | */ | |
1463 | if ((event_mask == 0) && DSP_FAILED(status)) { | |
1464 | status = | |
1465 | dev_get_deh_mgr(p_proc_object->hdev_obj, | |
1466 | &hdeh_mgr); | |
1467 | DBC_ASSERT(p_proc_object-> | |
1468 | intf_fxns->pfn_deh_register_notify); | |
1469 | status = | |
1470 | (*p_proc_object-> | |
1471 | intf_fxns->pfn_deh_register_notify) | |
1472 | (hdeh_mgr, event_mask, notify_type, | |
1473 | hnotification); | |
1474 | } | |
1475 | } else { | |
1476 | status = dev_get_deh_mgr(p_proc_object->hdev_obj, | |
1477 | &hdeh_mgr); | |
1478 | DBC_ASSERT(p_proc_object-> | |
1479 | intf_fxns->pfn_deh_register_notify); | |
1480 | status = | |
1481 | (*p_proc_object->intf_fxns->pfn_deh_register_notify) | |
1482 | (hdeh_mgr, event_mask, notify_type, hnotification); | |
1483 | ||
1484 | } | |
1485 | } | |
1486 | func_end: | |
1487 | return status; | |
1488 | } | |
1489 | ||
1490 | /* | |
1491 | * ======== proc_reserve_memory ======== | |
1492 | * Purpose: | |
1493 | * Reserve a virtually contiguous region of DSP address space. | |
1494 | */ | |
1495 | int proc_reserve_memory(void *hprocessor, u32 ul_size, | |
1496 | void **pp_rsv_addr, | |
1497 | struct process_context *pr_ctxt) | |
1498 | { | |
1499 | struct dmm_object *dmm_mgr; | |
1500 | int status = 0; | |
1501 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | |
1502 | struct dmm_rsv_object *rsv_obj; | |
1503 | ||
1504 | if (!p_proc_object) { | |
1505 | status = -EFAULT; | |
1506 | goto func_end; | |
1507 | } | |
1508 | ||
1509 | status = dmm_get_handle(p_proc_object, &dmm_mgr); | |
1510 | if (!dmm_mgr) { | |
1511 | status = -EFAULT; | |
1512 | goto func_end; | |
1513 | } | |
1514 | ||
1515 | status = dmm_reserve_memory(dmm_mgr, ul_size, (u32 *) pp_rsv_addr); | |
1516 | if (status != 0) | |
1517 | goto func_end; | |
1518 | ||
1519 | /* | |
1520 | * A successful reserve should be followed by insertion of rsv_obj | |
1521 | * into dmm_rsv_list, so that reserved memory resource tracking | |
1522 | * remains uptodate | |
1523 | */ | |
1524 | rsv_obj = kmalloc(sizeof(struct dmm_rsv_object), GFP_KERNEL); | |
1525 | if (rsv_obj) { | |
1526 | rsv_obj->dsp_reserved_addr = (u32) *pp_rsv_addr; | |
1527 | spin_lock(&pr_ctxt->dmm_rsv_lock); | |
1528 | list_add(&rsv_obj->link, &pr_ctxt->dmm_rsv_list); | |
1529 | spin_unlock(&pr_ctxt->dmm_rsv_lock); | |
1530 | } | |
1531 | ||
1532 | func_end: | |
1533 | dev_dbg(bridge, "%s: hprocessor: 0x%p ul_size: 0x%x pp_rsv_addr: 0x%p " | |
1534 | "status 0x%x\n", __func__, hprocessor, | |
1535 | ul_size, pp_rsv_addr, status); | |
1536 | return status; | |
1537 | } | |
1538 | ||
1539 | /* | |
1540 | * ======== proc_start ======== | |
1541 | * Purpose: | |
1542 | * Start a processor running. | |
1543 | */ | |
1544 | int proc_start(void *hprocessor) | |
1545 | { | |
1546 | int status = 0; | |
1547 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | |
1548 | struct cod_manager *cod_mgr; /* Code manager handle */ | |
1549 | u32 dw_dsp_addr; /* Loaded code's entry point. */ | |
1550 | int brd_state; | |
1551 | ||
1552 | DBC_REQUIRE(refs > 0); | |
1553 | if (!p_proc_object) { | |
1554 | status = -EFAULT; | |
1555 | goto func_end; | |
1556 | } | |
1557 | /* Call the bridge_brd_start */ | |
1558 | if (p_proc_object->proc_state != PROC_LOADED) { | |
1559 | status = -EBADR; | |
1560 | goto func_end; | |
1561 | } | |
1562 | status = dev_get_cod_mgr(p_proc_object->hdev_obj, &cod_mgr); | |
1563 | if (!cod_mgr) { | |
1564 | status = -EFAULT; | |
1565 | goto func_cont; | |
1566 | } | |
1567 | ||
1568 | status = cod_get_entry(cod_mgr, &dw_dsp_addr); | |
1569 | if (DSP_FAILED(status)) | |
1570 | goto func_cont; | |
1571 | ||
1572 | status = (*p_proc_object->intf_fxns->pfn_brd_start) | |
1573 | (p_proc_object->hbridge_context, dw_dsp_addr); | |
1574 | if (DSP_FAILED(status)) | |
1575 | goto func_cont; | |
1576 | ||
1577 | /* Call dev_create2 */ | |
1578 | status = dev_create2(p_proc_object->hdev_obj); | |
1579 | if (DSP_SUCCEEDED(status)) { | |
1580 | p_proc_object->proc_state = PROC_RUNNING; | |
1581 | /* Deep sleep switces off the peripheral clocks. | |
1582 | * we just put the DSP CPU in idle in the idle loop. | |
1583 | * so there is no need to send a command to DSP */ | |
1584 | ||
1585 | if (p_proc_object->ntfy_obj) { | |
1586 | proc_notify_clients(p_proc_object, | |
1587 | DSP_PROCESSORSTATECHANGE); | |
1588 | } | |
1589 | } else { | |
1590 | /* Failed to Create Node Manager and DISP Object | |
1591 | * Stop the Processor from running. Put it in STOPPED State */ | |
1592 | (void)(*p_proc_object->intf_fxns-> | |
1593 | pfn_brd_stop) (p_proc_object->hbridge_context); | |
1594 | p_proc_object->proc_state = PROC_STOPPED; | |
1595 | } | |
1596 | func_cont: | |
1597 | if (DSP_SUCCEEDED(status)) { | |
1598 | if (DSP_SUCCEEDED((*p_proc_object->intf_fxns->pfn_brd_status) | |
1599 | (p_proc_object->hbridge_context, &brd_state))) { | |
1600 | pr_info("%s: dsp in running state\n", __func__); | |
1601 | DBC_ASSERT(brd_state != BRD_HIBERNATION); | |
1602 | } | |
1603 | } else { | |
1604 | pr_err("%s: Failed to start the dsp\n", __func__); | |
1605 | } | |
1606 | ||
1607 | func_end: | |
1608 | DBC_ENSURE((DSP_SUCCEEDED(status) && p_proc_object->proc_state == | |
1609 | PROC_RUNNING) || DSP_FAILED(status)); | |
1610 | return status; | |
1611 | } | |
1612 | ||
1613 | /* | |
1614 | * ======== proc_stop ======== | |
1615 | * Purpose: | |
1616 | * Stop a processor running. | |
1617 | */ | |
1618 | int proc_stop(void *hprocessor) | |
1619 | { | |
1620 | int status = 0; | |
1621 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | |
1622 | struct msg_mgr *hmsg_mgr; | |
1623 | struct node_mgr *hnode_mgr; | |
1624 | void *hnode; | |
1625 | u32 node_tab_size = 1; | |
1626 | u32 num_nodes = 0; | |
1627 | u32 nodes_allocated = 0; | |
1628 | int brd_state; | |
1629 | ||
1630 | DBC_REQUIRE(refs > 0); | |
1631 | if (!p_proc_object) { | |
1632 | status = -EFAULT; | |
1633 | goto func_end; | |
1634 | } | |
7d55524d ORL |
1635 | /* check if there are any running nodes */ |
1636 | status = dev_get_node_manager(p_proc_object->hdev_obj, &hnode_mgr); | |
1637 | if (DSP_SUCCEEDED(status) && hnode_mgr) { | |
1638 | status = node_enum_nodes(hnode_mgr, &hnode, node_tab_size, | |
1639 | &num_nodes, &nodes_allocated); | |
1640 | if ((status == -EINVAL) || (nodes_allocated > 0)) { | |
1641 | pr_err("%s: Can't stop device, active nodes = %d \n", | |
1642 | __func__, nodes_allocated); | |
1643 | return -EBADR; | |
1644 | } | |
1645 | } | |
1646 | /* Call the bridge_brd_stop */ | |
1647 | /* It is OK to stop a device that does n't have nodes OR not started */ | |
1648 | status = | |
1649 | (*p_proc_object->intf_fxns-> | |
1650 | pfn_brd_stop) (p_proc_object->hbridge_context); | |
1651 | if (DSP_SUCCEEDED(status)) { | |
1652 | dev_dbg(bridge, "%s: processor in standby mode\n", __func__); | |
1653 | p_proc_object->proc_state = PROC_STOPPED; | |
1654 | /* Destory the Node Manager, msg_ctrl Manager */ | |
1655 | if (DSP_SUCCEEDED(dev_destroy2(p_proc_object->hdev_obj))) { | |
1656 | /* Destroy the msg_ctrl by calling msg_delete */ | |
1657 | dev_get_msg_mgr(p_proc_object->hdev_obj, &hmsg_mgr); | |
1658 | if (hmsg_mgr) { | |
1659 | msg_delete(hmsg_mgr); | |
1660 | dev_set_msg_mgr(p_proc_object->hdev_obj, NULL); | |
1661 | } | |
1662 | if (DSP_SUCCEEDED | |
1663 | ((*p_proc_object-> | |
1664 | intf_fxns->pfn_brd_status) (p_proc_object-> | |
1665 | hbridge_context, | |
1666 | &brd_state))) | |
1667 | DBC_ASSERT(brd_state == BRD_STOPPED); | |
1668 | } | |
1669 | } else { | |
1670 | pr_err("%s: Failed to stop the processor\n", __func__); | |
1671 | } | |
1672 | func_end: | |
1673 | ||
1674 | return status; | |
1675 | } | |
1676 | ||
1677 | /* | |
1678 | * ======== proc_un_map ======== | |
1679 | * Purpose: | |
1680 | * Removes a MPU buffer mapping from the DSP address space. | |
1681 | */ | |
1682 | int proc_un_map(void *hprocessor, void *map_addr, | |
1683 | struct process_context *pr_ctxt) | |
1684 | { | |
1685 | int status = 0; | |
1686 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | |
1687 | struct dmm_object *dmm_mgr; | |
1688 | u32 va_align; | |
1689 | u32 size_align; | |
1690 | ||
1691 | va_align = PG_ALIGN_LOW((u32) map_addr, PG_SIZE4K); | |
1692 | if (!p_proc_object) { | |
1693 | status = -EFAULT; | |
1694 | goto func_end; | |
1695 | } | |
1696 | ||
1697 | status = dmm_get_handle(hprocessor, &dmm_mgr); | |
1698 | if (!dmm_mgr) { | |
1699 | status = -EFAULT; | |
1700 | goto func_end; | |
1701 | } | |
1702 | ||
1703 | /* Critical section */ | |
1704 | mutex_lock(&proc_lock); | |
1705 | /* | |
1706 | * Update DMM structures. Get the size to unmap. | |
1707 | * This function returns error if the VA is not mapped | |
1708 | */ | |
1709 | status = dmm_un_map_memory(dmm_mgr, (u32) va_align, &size_align); | |
1710 | /* Remove mapping from the page tables. */ | |
1711 | if (DSP_SUCCEEDED(status)) { | |
1712 | status = (*p_proc_object->intf_fxns->pfn_brd_mem_un_map) | |
1713 | (p_proc_object->hbridge_context, va_align, size_align); | |
1714 | } | |
1715 | ||
1716 | mutex_unlock(&proc_lock); | |
1717 | if (DSP_FAILED(status)) | |
1718 | goto func_end; | |
1719 | ||
1720 | /* | |
1721 | * A successful unmap should be followed by removal of map_obj | |
1722 | * from dmm_map_list, so that mapped memory resource tracking | |
1723 | * remains uptodate | |
1724 | */ | |
1725 | remove_mapping_information(pr_ctxt, (u32) map_addr, size_align); | |
1726 | ||
1727 | func_end: | |
1728 | dev_dbg(bridge, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n", | |
1729 | __func__, hprocessor, map_addr, status); | |
1730 | return status; | |
1731 | } | |
1732 | ||
1733 | /* | |
1734 | * ======== proc_un_reserve_memory ======== | |
1735 | * Purpose: | |
1736 | * Frees a previously reserved region of DSP address space. | |
1737 | */ | |
1738 | int proc_un_reserve_memory(void *hprocessor, void *prsv_addr, | |
1739 | struct process_context *pr_ctxt) | |
1740 | { | |
1741 | struct dmm_object *dmm_mgr; | |
1742 | int status = 0; | |
1743 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | |
1744 | struct dmm_rsv_object *rsv_obj; | |
1745 | ||
1746 | if (!p_proc_object) { | |
1747 | status = -EFAULT; | |
1748 | goto func_end; | |
1749 | } | |
1750 | ||
1751 | status = dmm_get_handle(p_proc_object, &dmm_mgr); | |
1752 | if (!dmm_mgr) { | |
1753 | status = -EFAULT; | |
1754 | goto func_end; | |
1755 | } | |
1756 | ||
1757 | status = dmm_un_reserve_memory(dmm_mgr, (u32) prsv_addr); | |
1758 | if (status != 0) | |
1759 | goto func_end; | |
1760 | ||
1761 | /* | |
1762 | * A successful unreserve should be followed by removal of rsv_obj | |
1763 | * from dmm_rsv_list, so that reserved memory resource tracking | |
1764 | * remains uptodate | |
1765 | */ | |
1766 | spin_lock(&pr_ctxt->dmm_rsv_lock); | |
1767 | list_for_each_entry(rsv_obj, &pr_ctxt->dmm_rsv_list, link) { | |
1768 | if (rsv_obj->dsp_reserved_addr == (u32) prsv_addr) { | |
1769 | list_del(&rsv_obj->link); | |
1770 | kfree(rsv_obj); | |
1771 | break; | |
1772 | } | |
1773 | } | |
1774 | spin_unlock(&pr_ctxt->dmm_rsv_lock); | |
1775 | ||
1776 | func_end: | |
1777 | dev_dbg(bridge, "%s: hprocessor: 0x%p prsv_addr: 0x%p status: 0x%x\n", | |
1778 | __func__, hprocessor, prsv_addr, status); | |
1779 | return status; | |
1780 | } | |
1781 | ||
1782 | /* | |
1783 | * ======== = proc_monitor ======== == | |
1784 | * Purpose: | |
1785 | * Place the Processor in Monitor State. This is an internal | |
1786 | * function and a requirement before Processor is loaded. | |
1787 | * This does a bridge_brd_stop, dev_destroy2 and bridge_brd_monitor. | |
1788 | * In dev_destroy2 we delete the node manager. | |
1789 | * Parameters: | |
1790 | * p_proc_object: Pointer to Processor Object | |
1791 | * Returns: | |
1792 | * 0: Processor placed in monitor mode. | |
1793 | * !0: Failed to place processor in monitor mode. | |
1794 | * Requires: | |
1795 | * Valid Processor Handle | |
1796 | * Ensures: | |
1797 | * Success: ProcObject state is PROC_IDLE | |
1798 | */ | |
1799 | static int proc_monitor(struct proc_object *p_proc_object) | |
1800 | { | |
1801 | int status = -EPERM; | |
1802 | struct msg_mgr *hmsg_mgr; | |
1803 | int brd_state; | |
1804 | ||
1805 | DBC_REQUIRE(refs > 0); | |
1806 | DBC_REQUIRE(p_proc_object); | |
1807 | ||
1808 | /* This is needed only when Device is loaded when it is | |
1809 | * already 'ACTIVE' */ | |
1810 | /* Destory the Node Manager, msg_ctrl Manager */ | |
1811 | if (DSP_SUCCEEDED(dev_destroy2(p_proc_object->hdev_obj))) { | |
1812 | /* Destroy the msg_ctrl by calling msg_delete */ | |
1813 | dev_get_msg_mgr(p_proc_object->hdev_obj, &hmsg_mgr); | |
1814 | if (hmsg_mgr) { | |
1815 | msg_delete(hmsg_mgr); | |
1816 | dev_set_msg_mgr(p_proc_object->hdev_obj, NULL); | |
1817 | } | |
1818 | } | |
1819 | /* Place the Board in the Monitor State */ | |
1820 | if (DSP_SUCCEEDED((*p_proc_object->intf_fxns->pfn_brd_monitor) | |
1821 | (p_proc_object->hbridge_context))) { | |
1822 | status = 0; | |
1823 | if (DSP_SUCCEEDED((*p_proc_object->intf_fxns->pfn_brd_status) | |
1824 | (p_proc_object->hbridge_context, &brd_state))) | |
1825 | DBC_ASSERT(brd_state == BRD_IDLE); | |
1826 | } | |
1827 | ||
1828 | DBC_ENSURE((DSP_SUCCEEDED(status) && brd_state == BRD_IDLE) || | |
1829 | DSP_FAILED(status)); | |
1830 | return status; | |
1831 | } | |
1832 | ||
1833 | /* | |
1834 | * ======== get_envp_count ======== | |
1835 | * Purpose: | |
1836 | * Return the number of elements in the envp array, including the | |
1837 | * terminating NULL element. | |
1838 | */ | |
1839 | static s32 get_envp_count(char **envp) | |
1840 | { | |
1841 | s32 ret = 0; | |
1842 | if (envp) { | |
1843 | while (*envp++) | |
1844 | ret++; | |
1845 | ||
1846 | ret += 1; /* Include the terminating NULL in the count. */ | |
1847 | } | |
1848 | ||
1849 | return ret; | |
1850 | } | |
1851 | ||
1852 | /* | |
1853 | * ======== prepend_envp ======== | |
1854 | * Purpose: | |
1855 | * Prepend an environment variable=value pair to the new envp array, and | |
1856 | * copy in the existing var=value pairs in the old envp array. | |
1857 | */ | |
1858 | static char **prepend_envp(char **new_envp, char **envp, s32 envp_elems, | |
1859 | s32 cnew_envp, char *szVar) | |
1860 | { | |
1861 | char **pp_envp = new_envp; | |
1862 | ||
1863 | DBC_REQUIRE(new_envp); | |
1864 | ||
1865 | /* Prepend new environ var=value string */ | |
1866 | *new_envp++ = szVar; | |
1867 | ||
1868 | /* Copy user's environment into our own. */ | |
1869 | while (envp_elems--) | |
1870 | *new_envp++ = *envp++; | |
1871 | ||
1872 | /* Ensure NULL terminates the new environment strings array. */ | |
1873 | if (envp_elems == 0) | |
1874 | *new_envp = NULL; | |
1875 | ||
1876 | return pp_envp; | |
1877 | } | |
1878 | ||
1879 | /* | |
1880 | * ======== proc_notify_clients ======== | |
1881 | * Purpose: | |
1882 | * Notify the processor the events. | |
1883 | */ | |
1884 | int proc_notify_clients(void *hProc, u32 uEvents) | |
1885 | { | |
1886 | int status = 0; | |
1887 | struct proc_object *p_proc_object = (struct proc_object *)hProc; | |
1888 | ||
1889 | DBC_REQUIRE(p_proc_object); | |
1890 | DBC_REQUIRE(IS_VALID_PROC_EVENT(uEvents)); | |
1891 | DBC_REQUIRE(refs > 0); | |
1892 | if (!p_proc_object) { | |
1893 | status = -EFAULT; | |
1894 | goto func_end; | |
1895 | } | |
1896 | ||
1897 | ntfy_notify(p_proc_object->ntfy_obj, uEvents); | |
1898 | func_end: | |
1899 | return status; | |
1900 | } | |
1901 | ||
1902 | /* | |
1903 | * ======== proc_notify_all_clients ======== | |
1904 | * Purpose: | |
1905 | * Notify the processor the events. This includes notifying all clients | |
1906 | * attached to a particulat DSP. | |
1907 | */ | |
1908 | int proc_notify_all_clients(void *hProc, u32 uEvents) | |
1909 | { | |
1910 | int status = 0; | |
1911 | struct proc_object *p_proc_object = (struct proc_object *)hProc; | |
1912 | ||
1913 | DBC_REQUIRE(IS_VALID_PROC_EVENT(uEvents)); | |
1914 | DBC_REQUIRE(refs > 0); | |
1915 | ||
1916 | if (!p_proc_object) { | |
1917 | status = -EFAULT; | |
1918 | goto func_end; | |
1919 | } | |
1920 | ||
1921 | dev_notify_clients(p_proc_object->hdev_obj, uEvents); | |
1922 | ||
1923 | func_end: | |
1924 | return status; | |
1925 | } | |
1926 | ||
1927 | /* | |
1928 | * ======== proc_get_processor_id ======== | |
1929 | * Purpose: | |
1930 | * Retrieves the processor ID. | |
1931 | */ | |
1932 | int proc_get_processor_id(void *hProc, u32 * procID) | |
1933 | { | |
1934 | int status = 0; | |
1935 | struct proc_object *p_proc_object = (struct proc_object *)hProc; | |
1936 | ||
1937 | if (p_proc_object) | |
1938 | *procID = p_proc_object->processor_id; | |
1939 | else | |
1940 | status = -EFAULT; | |
1941 | ||
1942 | return status; | |
1943 | } |