4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * DSP/BIOS Bridge dynamic + overlay Node loader.
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
19 #include <linux/types.h>
21 #include <dspbridge/host_os.h>
23 #include <dspbridge/dbdefs.h>
25 #include <dspbridge/dbc.h>
27 /* Platform manager */
28 #include <dspbridge/cod.h>
29 #include <dspbridge/dev.h>
31 /* Resource manager */
32 #include <dspbridge/dbll.h>
33 #include <dspbridge/dbdcd.h>
34 #include <dspbridge/rmm.h>
35 #include <dspbridge/uuidutil.h>
37 #include <dspbridge/nldr.h>
39 /* Name of section containing dynamic load mem */
40 #define DYNMEMSECT ".dspbridge_mem"
42 /* Name of section containing dependent library information */
43 #define DEPLIBSECT ".dspbridge_deplibs"
45 /* Max depth of recursion for loading node's dependent libraries */
48 /* Max number of persistent libraries kept by a node */
52 * Defines for extracting packed dynamic load memory requirements from two
54 * These defines must match node.cdb and dynm.cdb
55 * Format of data/code mask is:
56 * uuuuuuuu|fueeeeee|fudddddd|fucccccc|
59 * cccccc = prefered/required dynamic mem segid for create phase data/code
60 * dddddd = prefered/required dynamic mem segid for delete phase data/code
61 * eeeeee = prefered/req. dynamic mem segid for execute phase data/code
62 * f = flag indicating if memory is preferred or required:
63 * f = 1 if required, f = 0 if preferred.
65 * The 6 bits of the segid are interpreted as follows:
67 * If the 6th bit (bit 5) is not set, then this specifies a memory segment
68 * between 0 and 31 (a maximum of 32 dynamic loading memory segments).
69 * If the 6th bit (bit 5) is set, segid has the following interpretation:
70 * segid = 32 - Any internal memory segment can be used.
71 * segid = 33 - Any external memory segment can be used.
72 * segid = 63 - Any memory segment can be used (in this case the
73 * required/preferred flag is irrelevant).
76 /* Maximum allowed dynamic loading memory segments */
79 #define MAXSEGID 3 /* Largest possible (real) segid */
80 #define MEMINTERNALID 32 /* Segid meaning use internal mem */
81 #define MEMEXTERNALID 33 /* Segid meaning use external mem */
82 #define NULLID 63 /* Segid meaning no memory req/pref */
83 #define FLAGBIT 7 /* 7th bit is pref./req. flag */
84 #define SEGMASK 0x3f /* Bits 0 - 5 */
86 #define CREATEBIT 0 /* Create segid starts at bit 0 */
87 #define DELETEBIT 8 /* Delete segid starts at bit 8 */
88 #define EXECUTEBIT 16 /* Execute segid starts at bit 16 */
91 * Masks that define memory type. Must match defines in dynm.cdb.
95 #define DYNM_CODEDATA (DYNM_CODE | DYNM_DATA)
96 #define DYNM_INTERNAL 0x8
97 #define DYNM_EXTERNAL 0x10
100 * Defines for packing memory requirement/preference flags for code and
101 * data of each of the node's phases into one mask.
102 * The bit is set if the segid is required for loading code/data of the
103 * given phase. The bit is not set, if the segid is preferred only.
105 * These defines are also used as indeces into a segid array for the node.
106 * eg node's segid[CREATEDATAFLAGBIT] is the memory segment id that the
107 * create phase data is required or preferred to be loaded into.
109 #define CREATEDATAFLAGBIT 0
110 #define CREATECODEFLAGBIT 1
111 #define EXECUTEDATAFLAGBIT 2
112 #define EXECUTECODEFLAGBIT 3
113 #define DELETEDATAFLAGBIT 4
114 #define DELETECODEFLAGBIT 5
117 #define IS_INTERNAL(nldr_obj, segid) (((segid) <= MAXSEGID && \
118 nldr_obj->seg_table[(segid)] & DYNM_INTERNAL) || \
119 (segid) == MEMINTERNALID)
121 #define IS_EXTERNAL(nldr_obj, segid) (((segid) <= MAXSEGID && \
122 nldr_obj->seg_table[(segid)] & DYNM_EXTERNAL) || \
123 (segid) == MEMEXTERNALID)
125 #define SWAPLONG(x) ((((x) << 24) & 0xFF000000) | (((x) << 8) & 0xFF0000L) | \
126 (((x) >> 8) & 0xFF00L) | (((x) >> 24) & 0xFF))
128 #define SWAPWORD(x) ((((x) << 8) & 0xFF00) | (((x) >> 8) & 0xFF))
131 * These names may be embedded in overlay sections to identify which
132 * node phase the section should be overlayed.
134 #define PCREATE "create"
135 #define PDELETE "delete"
136 #define PEXECUTE "execute"
138 #define IS_EQUAL_UUID(uuid1, uuid2) (\
139 ((uuid1).ul_data1 == (uuid2).ul_data1) && \
140 ((uuid1).us_data2 == (uuid2).us_data2) && \
141 ((uuid1).us_data3 == (uuid2).us_data3) && \
142 ((uuid1).uc_data4 == (uuid2).uc_data4) && \
143 ((uuid1).uc_data5 == (uuid2).uc_data5) && \
144 (strncmp((void *)(uuid1).uc_data6, (void *)(uuid2).uc_data6, 6)) == 0)
147 * ======== mem_seg_info ========
148 * Format of dynamic loading memory segment info in coff file.
149 * Must match dynm.h55.
151 struct mem_seg_info {
152 u32 segid; /* Dynamic loading memory segment number */
155 u32 type; /* Mask of DYNM_CODE, DYNM_INTERNAL, etc. */
159 * ======== lib_node ========
160 * For maintaining a tree of library dependencies.
163 struct dbll_library_obj *lib; /* The library */
164 u16 dep_libs; /* Number of dependent libraries */
165 struct lib_node *dep_libs_tree; /* Dependent libraries of lib */
169 * ======== ovly_sect ========
170 * Information needed to overlay a section.
173 struct ovly_sect *next_sect;
174 u32 sect_load_addr; /* Load address of section */
175 u32 sect_run_addr; /* Run address of section */
176 u32 size; /* Size of section */
177 u16 page; /* DBL_CODE, DBL_DATA */
181 * ======== ovly_node ========
182 * For maintaining a list of overlay nodes, with sections that need to be
183 * overlayed for each of the nodes phases.
186 struct dsp_uuid uuid;
188 struct ovly_sect *create_sects_list;
189 struct ovly_sect *delete_sects_list;
190 struct ovly_sect *execute_sects_list;
191 struct ovly_sect *other_sects_list;
203 * ======== nldr_object ========
204 * Overlay loader object.
207 struct dev_object *hdev_obj; /* Device object */
208 struct dcd_manager *hdcd_mgr; /* Proc/Node data manager */
209 struct dbll_tar_obj *dbll; /* The DBL loader */
210 struct dbll_library_obj *base_lib; /* Base image library */
211 struct rmm_target_obj *rmm; /* Remote memory manager for DSP */
212 struct dbll_fxns ldr_fxns; /* Loader function table */
213 struct dbll_attrs ldr_attrs; /* attrs to pass to loader functions */
214 nldr_ovlyfxn ovly_fxn; /* "write" for overlay nodes */
215 nldr_writefxn write_fxn; /* "write" for dynamic nodes */
216 struct ovly_node *ovly_table; /* Table of overlay nodes */
217 u16 ovly_nodes; /* Number of overlay nodes in base */
218 u16 ovly_nid; /* Index for tracking overlay nodes */
219 u16 dload_segs; /* Number of dynamic load mem segs */
220 u32 *seg_table; /* memtypes of dynamic memory segs
223 u16 us_dsp_mau_size; /* Size of DSP MAU */
224 u16 us_dsp_word_size; /* Size of DSP word */
228 * ======== nldr_nodeobject ========
229 * Dynamic node object. This object is created when a node is allocated.
231 struct nldr_nodeobject {
232 struct nldr_object *nldr_obj; /* Dynamic loader handle */
233 void *priv_ref; /* Handle to pass to dbl_write_fxn */
234 struct dsp_uuid uuid; /* Node's UUID */
235 bool dynamic; /* Dynamically loaded node? */
236 bool overlay; /* Overlay node? */
237 bool *pf_phase_split; /* Multiple phase libraries? */
238 struct lib_node root; /* Library containing node phase */
239 struct lib_node create_lib; /* Library with create phase lib */
240 struct lib_node execute_lib; /* Library with execute phase lib */
241 struct lib_node delete_lib; /* Library with delete phase lib */
242 /* libs remain loaded until Delete */
243 struct lib_node pers_lib_table[MAXLIBS];
244 s32 pers_libs; /* Number of persistent libraries */
245 /* Path in lib dependency tree */
246 struct dbll_library_obj *lib_path[MAXDEPTH + 1];
247 enum nldr_phase phase; /* Node phase currently being loaded */
250 * Dynamic loading memory segments for data and code of each phase.
252 u16 seg_id[MAXFLAGS];
255 * Mask indicating whether each mem segment specified in seg_id[]
256 * is preferred or required.
258 * if (code_data_flag_mask & (1 << EXECUTEDATAFLAGBIT)) != 0,
259 * then it is required to load execute phase data into the memory
260 * specified by seg_id[EXECUTEDATAFLAGBIT].
262 u32 code_data_flag_mask;
265 /* Dynamic loader function table */
266 static struct dbll_fxns ldr_fxns = {
267 (dbll_close_fxn) dbll_close,
268 (dbll_create_fxn) dbll_create,
269 (dbll_delete_fxn) dbll_delete,
270 (dbll_exit_fxn) dbll_exit,
271 (dbll_get_attrs_fxn) dbll_get_attrs,
272 (dbll_get_addr_fxn) dbll_get_addr,
273 (dbll_get_c_addr_fxn) dbll_get_c_addr,
274 (dbll_get_sect_fxn) dbll_get_sect,
275 (dbll_init_fxn) dbll_init,
276 (dbll_load_fxn) dbll_load,
277 (dbll_load_sect_fxn) dbll_load_sect,
278 (dbll_open_fxn) dbll_open,
279 (dbll_read_sect_fxn) dbll_read_sect,
280 (dbll_set_attrs_fxn) dbll_set_attrs,
281 (dbll_unload_fxn) dbll_unload,
282 (dbll_unload_sect_fxn) dbll_unload_sect,
285 static u32 refs; /* module reference count */
287 static int add_ovly_info(void *handle, struct dbll_sect_info *sect_info,
288 u32 addr, u32 bytes);
289 static int add_ovly_node(struct dsp_uuid *uuid_obj,
290 enum dsp_dcdobjtype obj_type, void *handle);
291 static int add_ovly_sect(struct nldr_object *nldr_obj,
292 struct ovly_sect **lst,
293 struct dbll_sect_info *sect_inf,
294 bool *exists, u32 addr, u32 bytes);
295 static s32 fake_ovly_write(void *handle, u32 dsp_address, void *buf, u32 bytes,
297 static void free_sects(struct nldr_object *nldr_obj,
298 struct ovly_sect *phase_sects, u16 alloc_num);
299 static bool get_symbol_value(void *handle, void *parg, void *rmm_handle,
300 char *sym_name, struct dbll_sym_val **sym);
301 static int load_lib(struct nldr_nodeobject *nldr_node_obj,
302 struct lib_node *root, struct dsp_uuid uuid,
304 struct dbll_library_obj **lib_path,
305 enum nldr_phase phase, u16 depth);
306 static int load_ovly(struct nldr_nodeobject *nldr_node_obj,
307 enum nldr_phase phase);
308 static int remote_alloc(void **ref, u16 mem_sect, u32 size,
309 u32 align, u32 *dsp_address,
311 s32 req, bool reserve);
312 static int remote_free(void **ref, u16 space, u32 dsp_address, u32 size,
315 static void unload_lib(struct nldr_nodeobject *nldr_node_obj,
316 struct lib_node *root);
317 static void unload_ovly(struct nldr_nodeobject *nldr_node_obj,
318 enum nldr_phase phase);
319 static bool find_in_persistent_lib_array(struct nldr_nodeobject *nldr_node_obj,
320 struct dbll_library_obj *lib);
321 static u32 find_lcm(u32 a, u32 b);
322 static u32 find_gcf(u32 a, u32 b);
325 * ======== nldr_allocate ========
327 int nldr_allocate(struct nldr_object *nldr_obj, void *priv_ref,
328 const struct dcd_nodeprops *node_props,
329 struct nldr_nodeobject **nldr_nodeobj,
330 bool *pf_phase_split)
332 struct nldr_nodeobject *nldr_node_obj = NULL;
335 DBC_REQUIRE(refs > 0);
336 DBC_REQUIRE(node_props != NULL);
337 DBC_REQUIRE(nldr_nodeobj != NULL);
338 DBC_REQUIRE(nldr_obj);
340 /* Initialize handle in case of failure */
341 *nldr_nodeobj = NULL;
342 /* Allocate node object */
343 nldr_node_obj = kzalloc(sizeof(struct nldr_nodeobject), GFP_KERNEL);
345 if (nldr_node_obj == NULL) {
348 nldr_node_obj->pf_phase_split = pf_phase_split;
349 nldr_node_obj->pers_libs = 0;
350 nldr_node_obj->nldr_obj = nldr_obj;
351 nldr_node_obj->priv_ref = priv_ref;
352 /* Save node's UUID. */
353 nldr_node_obj->uuid = node_props->ndb_props.ui_node_id;
355 * Determine if node is a dynamically loaded node from
358 if (node_props->us_load_type == NLDR_DYNAMICLOAD) {
360 nldr_node_obj->dynamic = true;
362 * Extract memory requirements from ndb_props masks
365 nldr_node_obj->seg_id[CREATEDATAFLAGBIT] = (u16)
366 (node_props->ul_data_mem_seg_mask >> CREATEBIT) &
368 nldr_node_obj->code_data_flag_mask |=
369 ((node_props->ul_data_mem_seg_mask >>
370 (CREATEBIT + FLAGBIT)) & 1) << CREATEDATAFLAGBIT;
371 nldr_node_obj->seg_id[CREATECODEFLAGBIT] = (u16)
372 (node_props->ul_code_mem_seg_mask >>
373 CREATEBIT) & SEGMASK;
374 nldr_node_obj->code_data_flag_mask |=
375 ((node_props->ul_code_mem_seg_mask >>
376 (CREATEBIT + FLAGBIT)) & 1) << CREATECODEFLAGBIT;
378 nldr_node_obj->seg_id[EXECUTEDATAFLAGBIT] = (u16)
379 (node_props->ul_data_mem_seg_mask >>
380 EXECUTEBIT) & SEGMASK;
381 nldr_node_obj->code_data_flag_mask |=
382 ((node_props->ul_data_mem_seg_mask >>
383 (EXECUTEBIT + FLAGBIT)) & 1) <<
385 nldr_node_obj->seg_id[EXECUTECODEFLAGBIT] = (u16)
386 (node_props->ul_code_mem_seg_mask >>
387 EXECUTEBIT) & SEGMASK;
388 nldr_node_obj->code_data_flag_mask |=
389 ((node_props->ul_code_mem_seg_mask >>
390 (EXECUTEBIT + FLAGBIT)) & 1) <<
393 nldr_node_obj->seg_id[DELETEDATAFLAGBIT] = (u16)
394 (node_props->ul_data_mem_seg_mask >> DELETEBIT) &
396 nldr_node_obj->code_data_flag_mask |=
397 ((node_props->ul_data_mem_seg_mask >>
398 (DELETEBIT + FLAGBIT)) & 1) << DELETEDATAFLAGBIT;
399 nldr_node_obj->seg_id[DELETECODEFLAGBIT] = (u16)
400 (node_props->ul_code_mem_seg_mask >>
401 DELETEBIT) & SEGMASK;
402 nldr_node_obj->code_data_flag_mask |=
403 ((node_props->ul_code_mem_seg_mask >>
404 (DELETEBIT + FLAGBIT)) & 1) << DELETECODEFLAGBIT;
406 /* Non-dynamically loaded nodes are part of the
408 nldr_node_obj->root.lib = nldr_obj->base_lib;
409 /* Check for overlay node */
410 if (node_props->us_load_type == NLDR_OVLYLOAD)
411 nldr_node_obj->overlay = true;
414 *nldr_nodeobj = (struct nldr_nodeobject *)nldr_node_obj;
416 /* Cleanup on failure */
417 if (DSP_FAILED(status) && nldr_node_obj)
418 kfree(nldr_node_obj);
420 DBC_ENSURE((DSP_SUCCEEDED(status) && *nldr_nodeobj)
421 || (DSP_FAILED(status) && *nldr_nodeobj == NULL));
426 * ======== nldr_create ========
428 int nldr_create(struct nldr_object **nldr,
429 struct dev_object *hdev_obj,
430 const struct nldr_attrs *pattrs)
432 struct cod_manager *cod_mgr; /* COD manager */
433 char *psz_coff_buf = NULL;
434 char sz_zl_file[COD_MAXPATHLENGTH];
435 struct nldr_object *nldr_obj = NULL;
436 struct dbll_attrs save_attrs;
437 struct dbll_attrs new_attrs;
441 struct mem_seg_info *mem_info_obj;
444 struct rmm_segment *rmm_segs = NULL;
447 DBC_REQUIRE(refs > 0);
448 DBC_REQUIRE(nldr != NULL);
449 DBC_REQUIRE(hdev_obj != NULL);
450 DBC_REQUIRE(pattrs != NULL);
451 DBC_REQUIRE(pattrs->pfn_ovly != NULL);
452 DBC_REQUIRE(pattrs->pfn_write != NULL);
454 /* Allocate dynamic loader object */
455 nldr_obj = kzalloc(sizeof(struct nldr_object), GFP_KERNEL);
457 nldr_obj->hdev_obj = hdev_obj;
458 /* warning, lazy status checking alert! */
459 dev_get_cod_mgr(hdev_obj, &cod_mgr);
461 status = cod_get_loader(cod_mgr, &nldr_obj->dbll);
462 DBC_ASSERT(DSP_SUCCEEDED(status));
463 status = cod_get_base_lib(cod_mgr, &nldr_obj->base_lib);
464 DBC_ASSERT(DSP_SUCCEEDED(status));
466 cod_get_base_name(cod_mgr, sz_zl_file,
468 DBC_ASSERT(DSP_SUCCEEDED(status));
471 /* end lazy status checking */
472 nldr_obj->us_dsp_mau_size = pattrs->us_dsp_mau_size;
473 nldr_obj->us_dsp_word_size = pattrs->us_dsp_word_size;
474 nldr_obj->ldr_fxns = ldr_fxns;
475 if (!(nldr_obj->ldr_fxns.init_fxn()))
481 /* Create the DCD Manager */
482 if (DSP_SUCCEEDED(status))
483 status = dcd_create_manager(NULL, &nldr_obj->hdcd_mgr);
485 /* Get dynamic loading memory sections from base lib */
486 if (DSP_SUCCEEDED(status)) {
488 nldr_obj->ldr_fxns.get_sect_fxn(nldr_obj->base_lib,
489 DYNMEMSECT, &ul_addr,
491 if (DSP_SUCCEEDED(status)) {
493 kzalloc(ul_len * nldr_obj->us_dsp_mau_size,
498 /* Ok to not have dynamic loading memory */
501 dev_dbg(bridge, "%s: failed - no dynamic loading mem "
502 "segments: 0x%x\n", __func__, status);
505 if (DSP_SUCCEEDED(status) && ul_len > 0) {
506 /* Read section containing dynamic load mem segments */
508 nldr_obj->ldr_fxns.read_sect_fxn(nldr_obj->base_lib,
509 DYNMEMSECT, psz_coff_buf,
512 if (DSP_SUCCEEDED(status) && ul_len > 0) {
513 /* Parse memory segment data */
514 dload_segs = (u16) (*((u32 *) psz_coff_buf));
515 if (dload_segs > MAXMEMSEGS)
518 /* Parse dynamic load memory segments */
519 if (DSP_SUCCEEDED(status) && dload_segs > 0) {
520 rmm_segs = kzalloc(sizeof(struct rmm_segment) * dload_segs,
522 nldr_obj->seg_table =
523 kzalloc(sizeof(u32) * dload_segs, GFP_KERNEL);
524 if (rmm_segs == NULL || nldr_obj->seg_table == NULL) {
527 nldr_obj->dload_segs = dload_segs;
528 mem_info_obj = (struct mem_seg_info *)(psz_coff_buf +
530 for (i = 0; i < dload_segs; i++) {
531 rmm_segs[i].base = (mem_info_obj + i)->base;
532 rmm_segs[i].length = (mem_info_obj + i)->len;
533 rmm_segs[i].space = 0;
534 nldr_obj->seg_table[i] =
535 (mem_info_obj + i)->type;
537 "(proc) DLL MEMSEGMENT: %d, "
538 "Base: 0x%x, Length: 0x%x\n", i,
539 rmm_segs[i].base, rmm_segs[i].length);
543 /* Create Remote memory manager */
544 if (DSP_SUCCEEDED(status))
545 status = rmm_create(&nldr_obj->rmm, rmm_segs, dload_segs);
547 if (DSP_SUCCEEDED(status)) {
548 /* set the alloc, free, write functions for loader */
549 nldr_obj->ldr_fxns.get_attrs_fxn(nldr_obj->dbll, &save_attrs);
550 new_attrs = save_attrs;
551 new_attrs.alloc = (dbll_alloc_fxn) remote_alloc;
552 new_attrs.free = (dbll_free_fxn) remote_free;
553 new_attrs.sym_lookup = (dbll_sym_lookup) get_symbol_value;
554 new_attrs.sym_handle = nldr_obj;
555 new_attrs.write = (dbll_write_fxn) pattrs->pfn_write;
556 nldr_obj->ovly_fxn = pattrs->pfn_ovly;
557 nldr_obj->write_fxn = pattrs->pfn_write;
558 nldr_obj->ldr_attrs = new_attrs;
564 /* Get overlay nodes */
565 if (DSP_SUCCEEDED(status)) {
567 cod_get_base_name(cod_mgr, sz_zl_file, COD_MAXPATHLENGTH);
569 DBC_ASSERT(DSP_SUCCEEDED(status));
570 /* First count number of overlay nodes */
572 dcd_get_objects(nldr_obj->hdcd_mgr, sz_zl_file,
573 add_ovly_node, (void *)nldr_obj);
574 /* Now build table of overlay nodes */
575 if (DSP_SUCCEEDED(status) && nldr_obj->ovly_nodes > 0) {
576 /* Allocate table for overlay nodes */
577 nldr_obj->ovly_table =
578 kzalloc(sizeof(struct ovly_node) *
579 nldr_obj->ovly_nodes, GFP_KERNEL);
580 /* Put overlay nodes in the table */
581 nldr_obj->ovly_nid = 0;
582 status = dcd_get_objects(nldr_obj->hdcd_mgr, sz_zl_file,
587 /* Do a fake reload of the base image to get overlay section info */
588 if (DSP_SUCCEEDED(status) && nldr_obj->ovly_nodes > 0) {
589 save_attrs.write = fake_ovly_write;
590 save_attrs.log_write = add_ovly_info;
591 save_attrs.log_write_handle = nldr_obj;
592 flags = DBLL_CODE | DBLL_DATA | DBLL_SYMB;
593 status = nldr_obj->ldr_fxns.load_fxn(nldr_obj->base_lib, flags,
594 &save_attrs, &ul_entry);
596 if (DSP_SUCCEEDED(status)) {
597 *nldr = (struct nldr_object *)nldr_obj;
600 nldr_delete((struct nldr_object *)nldr_obj);
604 /* FIXME:Temp. Fix. Must be removed */
605 DBC_ENSURE((DSP_SUCCEEDED(status) && *nldr)
606 || (DSP_FAILED(status) && (*nldr == NULL)));
611 * ======== nldr_delete ========
613 void nldr_delete(struct nldr_object *nldr_obj)
615 struct ovly_sect *ovly_section;
616 struct ovly_sect *next;
618 DBC_REQUIRE(refs > 0);
619 DBC_REQUIRE(nldr_obj);
621 nldr_obj->ldr_fxns.exit_fxn();
623 rmm_delete(nldr_obj->rmm);
625 kfree(nldr_obj->seg_table);
627 if (nldr_obj->hdcd_mgr)
628 dcd_destroy_manager(nldr_obj->hdcd_mgr);
630 /* Free overlay node information */
631 if (nldr_obj->ovly_table) {
632 for (i = 0; i < nldr_obj->ovly_nodes; i++) {
634 nldr_obj->ovly_table[i].create_sects_list;
635 while (ovly_section) {
636 next = ovly_section->next_sect;
641 nldr_obj->ovly_table[i].delete_sects_list;
642 while (ovly_section) {
643 next = ovly_section->next_sect;
648 nldr_obj->ovly_table[i].execute_sects_list;
649 while (ovly_section) {
650 next = ovly_section->next_sect;
654 ovly_section = nldr_obj->ovly_table[i].other_sects_list;
655 while (ovly_section) {
656 next = ovly_section->next_sect;
661 kfree(nldr_obj->ovly_table);
667 * ======== nldr_exit ========
668 * Discontinue usage of NLDR module.
672 DBC_REQUIRE(refs > 0);
679 DBC_ENSURE(refs >= 0);
683 * ======== nldr_get_fxn_addr ========
685 int nldr_get_fxn_addr(struct nldr_nodeobject *nldr_node_obj,
686 char *str_fxn, u32 * addr)
688 struct dbll_sym_val *dbll_sym;
689 struct nldr_object *nldr_obj;
691 bool status1 = false;
693 struct lib_node root = { NULL, 0, NULL };
694 DBC_REQUIRE(refs > 0);
695 DBC_REQUIRE(nldr_node_obj);
696 DBC_REQUIRE(addr != NULL);
697 DBC_REQUIRE(str_fxn != NULL);
699 nldr_obj = nldr_node_obj->nldr_obj;
700 /* Called from node_create(), node_delete(), or node_run(). */
701 if (nldr_node_obj->dynamic && *nldr_node_obj->pf_phase_split) {
702 switch (nldr_node_obj->phase) {
704 root = nldr_node_obj->create_lib;
707 root = nldr_node_obj->execute_lib;
710 root = nldr_node_obj->delete_lib;
717 /* for Overlay nodes or non-split Dynamic nodes */
718 root = nldr_node_obj->root;
721 nldr_obj->ldr_fxns.get_c_addr_fxn(root.lib, str_fxn, &dbll_sym);
724 nldr_obj->ldr_fxns.get_addr_fxn(root.lib, str_fxn,
727 /* If symbol not found, check dependent libraries */
729 for (i = 0; i < root.dep_libs; i++) {
731 nldr_obj->ldr_fxns.get_addr_fxn(root.dep_libs_tree
737 get_c_addr_fxn(root.dep_libs_tree[i].lib,
746 /* Check persistent libraries */
748 for (i = 0; i < nldr_node_obj->pers_libs; i++) {
751 get_addr_fxn(nldr_node_obj->pers_lib_table[i].lib,
756 get_c_addr_fxn(nldr_node_obj->pers_lib_table
757 [i].lib, str_fxn, &dbll_sym);
767 *addr = dbll_sym->value;
775 * ======== nldr_get_rmm_manager ========
776 * Given a NLDR object, retrieve RMM Manager Handle
778 int nldr_get_rmm_manager(struct nldr_object *nldr,
779 struct rmm_target_obj **rmm_mgr)
782 struct nldr_object *nldr_obj = nldr;
783 DBC_REQUIRE(rmm_mgr != NULL);
786 *rmm_mgr = nldr_obj->rmm;
792 DBC_ENSURE(DSP_SUCCEEDED(status) || ((rmm_mgr != NULL) &&
793 (*rmm_mgr == NULL)));
799 * ======== nldr_init ========
800 * Initialize the NLDR module.
804 DBC_REQUIRE(refs >= 0);
811 DBC_ENSURE(refs > 0);
816 * ======== nldr_load ========
818 int nldr_load(struct nldr_nodeobject *nldr_node_obj,
819 enum nldr_phase phase)
821 struct nldr_object *nldr_obj;
822 struct dsp_uuid lib_uuid;
825 DBC_REQUIRE(refs > 0);
826 DBC_REQUIRE(nldr_node_obj);
828 nldr_obj = nldr_node_obj->nldr_obj;
830 if (nldr_node_obj->dynamic) {
831 nldr_node_obj->phase = phase;
833 lib_uuid = nldr_node_obj->uuid;
835 /* At this point, we may not know if node is split into
836 * different libraries. So we'll go ahead and load the
837 * library, and then save the pointer to the appropriate
838 * location after we know. */
841 load_lib(nldr_node_obj, &nldr_node_obj->root, lib_uuid,
842 false, nldr_node_obj->lib_path, phase, 0);
844 if (DSP_SUCCEEDED(status)) {
845 if (*nldr_node_obj->pf_phase_split) {
848 nldr_node_obj->create_lib =
853 nldr_node_obj->execute_lib =
858 nldr_node_obj->delete_lib =
869 if (nldr_node_obj->overlay)
870 status = load_ovly(nldr_node_obj, phase);
878 * ======== nldr_unload ========
880 int nldr_unload(struct nldr_nodeobject *nldr_node_obj,
881 enum nldr_phase phase)
884 struct lib_node *root_lib = NULL;
887 DBC_REQUIRE(refs > 0);
888 DBC_REQUIRE(nldr_node_obj);
890 if (nldr_node_obj != NULL) {
891 if (nldr_node_obj->dynamic) {
892 if (*nldr_node_obj->pf_phase_split) {
895 root_lib = &nldr_node_obj->create_lib;
898 root_lib = &nldr_node_obj->execute_lib;
901 root_lib = &nldr_node_obj->delete_lib;
902 /* Unload persistent libraries */
904 i < nldr_node_obj->pers_libs;
906 unload_lib(nldr_node_obj,
910 nldr_node_obj->pers_libs = 0;
917 /* Unload main library */
918 root_lib = &nldr_node_obj->root;
921 unload_lib(nldr_node_obj, root_lib);
923 if (nldr_node_obj->overlay)
924 unload_ovly(nldr_node_obj, phase);
932 * ======== add_ovly_info ========
934 static int add_ovly_info(void *handle, struct dbll_sect_info *sect_info,
938 char *sect_name = (char *)sect_info->name;
939 bool sect_exists = false;
943 struct nldr_object *nldr_obj = (struct nldr_object *)handle;
946 /* Is this an overlay section (load address != run address)? */
947 if (sect_info->sect_load_addr == sect_info->sect_run_addr)
950 /* Find the node it belongs to */
951 for (i = 0; i < nldr_obj->ovly_nodes; i++) {
952 node_name = nldr_obj->ovly_table[i].node_name;
953 DBC_REQUIRE(node_name);
954 if (strncmp(node_name, sect_name + 1, strlen(node_name)) == 0) {
959 if (!(i < nldr_obj->ovly_nodes))
962 /* Determine which phase this section belongs to */
963 for (pch = sect_name + 1; *pch && *pch != seps; pch++)
967 pch++; /* Skip over the ':' */
968 if (strncmp(pch, PCREATE, strlen(PCREATE)) == 0) {
970 add_ovly_sect(nldr_obj,
972 ovly_table[i].create_sects_list,
973 sect_info, §_exists, addr, bytes);
974 if (DSP_SUCCEEDED(status) && !sect_exists)
975 nldr_obj->ovly_table[i].create_sects++;
977 } else if (strncmp(pch, PDELETE, strlen(PDELETE)) == 0) {
979 add_ovly_sect(nldr_obj,
981 ovly_table[i].delete_sects_list,
982 sect_info, §_exists, addr, bytes);
983 if (DSP_SUCCEEDED(status) && !sect_exists)
984 nldr_obj->ovly_table[i].delete_sects++;
986 } else if (strncmp(pch, PEXECUTE, strlen(PEXECUTE)) == 0) {
988 add_ovly_sect(nldr_obj,
990 ovly_table[i].execute_sects_list,
991 sect_info, §_exists, addr, bytes);
992 if (DSP_SUCCEEDED(status) && !sect_exists)
993 nldr_obj->ovly_table[i].execute_sects++;
996 /* Put in "other" sectins */
998 add_ovly_sect(nldr_obj,
1000 ovly_table[i].other_sects_list,
1001 sect_info, §_exists, addr, bytes);
1002 if (DSP_SUCCEEDED(status) && !sect_exists)
1003 nldr_obj->ovly_table[i].other_sects++;
1012 * ======== add_ovly_node =========
1013 * Callback function passed to dcd_get_objects.
1015 static int add_ovly_node(struct dsp_uuid *uuid_obj,
1016 enum dsp_dcdobjtype obj_type, void *handle)
1018 struct nldr_object *nldr_obj = (struct nldr_object *)handle;
1019 char *node_name = NULL;
1022 struct dcd_genericobj obj_def;
1025 if (obj_type != DSP_DCDNODETYPE)
1029 dcd_get_object_def(nldr_obj->hdcd_mgr, uuid_obj, obj_type,
1031 if (DSP_FAILED(status))
1034 /* If overlay node, add to the list */
1035 if (obj_def.obj_data.node_obj.us_load_type == NLDR_OVLYLOAD) {
1036 if (nldr_obj->ovly_table == NULL) {
1037 nldr_obj->ovly_nodes++;
1039 /* Add node to table */
1040 nldr_obj->ovly_table[nldr_obj->ovly_nid].uuid =
1042 DBC_REQUIRE(obj_def.obj_data.node_obj.ndb_props.
1045 strlen(obj_def.obj_data.node_obj.ndb_props.ac_name);
1046 node_name = obj_def.obj_data.node_obj.ndb_props.ac_name;
1047 pbuf = kzalloc(len + 1, GFP_KERNEL);
1051 strncpy(pbuf, node_name, len);
1052 nldr_obj->ovly_table[nldr_obj->ovly_nid].
1054 nldr_obj->ovly_nid++;
1058 /* These were allocated in dcd_get_object_def */
1059 kfree(obj_def.obj_data.node_obj.pstr_create_phase_fxn);
1061 kfree(obj_def.obj_data.node_obj.pstr_execute_phase_fxn);
1063 kfree(obj_def.obj_data.node_obj.pstr_delete_phase_fxn);
1065 kfree(obj_def.obj_data.node_obj.pstr_i_alg_name);
1072 * ======== add_ovly_sect ========
1074 static int add_ovly_sect(struct nldr_object *nldr_obj,
1075 struct ovly_sect **lst,
1076 struct dbll_sect_info *sect_inf,
1077 bool *exists, u32 addr, u32 bytes)
1079 struct ovly_sect *new_sect = NULL;
1080 struct ovly_sect *last_sect;
1081 struct ovly_sect *ovly_section;
1084 ovly_section = last_sect = *lst;
1086 while (ovly_section) {
1088 * Make sure section has not already been added. Multiple
1089 * 'write' calls may be made to load the section.
1091 if (ovly_section->sect_load_addr == addr) {
1096 last_sect = ovly_section;
1097 ovly_section = ovly_section->next_sect;
1100 if (!ovly_section) {
1102 new_sect = kzalloc(sizeof(struct ovly_sect), GFP_KERNEL);
1103 if (new_sect == NULL) {
1106 new_sect->sect_load_addr = addr;
1107 new_sect->sect_run_addr = sect_inf->sect_run_addr +
1108 (addr - sect_inf->sect_load_addr);
1109 new_sect->size = bytes;
1110 new_sect->page = sect_inf->type;
1113 /* Add to the list */
1114 if (DSP_SUCCEEDED(status)) {
1116 /* First in the list */
1119 last_sect->next_sect = new_sect;
1128 * ======== fake_ovly_write ========
1130 static s32 fake_ovly_write(void *handle, u32 dsp_address, void *buf, u32 bytes,
1137 * ======== free_sects ========
1139 static void free_sects(struct nldr_object *nldr_obj,
1140 struct ovly_sect *phase_sects, u16 alloc_num)
1142 struct ovly_sect *ovly_section = phase_sects;
1146 while (ovly_section && i < alloc_num) {
1148 /* segid - page not supported yet */
1149 /* Reserved memory */
1151 rmm_free(nldr_obj->rmm, 0, ovly_section->sect_run_addr,
1152 ovly_section->size, true);
1154 ovly_section = ovly_section->next_sect;
1160 * ======== get_symbol_value ========
1161 * Find symbol in library's base image. If not there, check dependent
1164 static bool get_symbol_value(void *handle, void *parg, void *rmm_handle,
1165 char *sym_name, struct dbll_sym_val **sym)
1167 struct nldr_object *nldr_obj = (struct nldr_object *)handle;
1168 struct nldr_nodeobject *nldr_node_obj =
1169 (struct nldr_nodeobject *)rmm_handle;
1170 struct lib_node *root = (struct lib_node *)parg;
1172 bool status = false;
1174 /* check the base image */
1175 status = nldr_obj->ldr_fxns.get_addr_fxn(nldr_obj->base_lib,
1179 nldr_obj->ldr_fxns.get_c_addr_fxn(nldr_obj->base_lib,
1183 * Check in root lib itself. If the library consists of
1184 * multiple object files linked together, some symbols in the
1185 * library may need to be resolved.
1188 status = nldr_obj->ldr_fxns.get_addr_fxn(root->lib, sym_name,
1192 nldr_obj->ldr_fxns.get_c_addr_fxn(root->lib,
1198 * Check in root lib's dependent libraries, but not dependent
1199 * libraries' dependents.
1202 for (i = 0; i < root->dep_libs; i++) {
1204 nldr_obj->ldr_fxns.get_addr_fxn(root->
1211 get_c_addr_fxn(root->dep_libs_tree[i].lib,
1221 * Check in persistent libraries
1224 for (i = 0; i < nldr_node_obj->pers_libs; i++) {
1227 get_addr_fxn(nldr_node_obj->pers_lib_table[i].lib,
1230 status = nldr_obj->ldr_fxns.get_c_addr_fxn
1231 (nldr_node_obj->pers_lib_table[i].lib,
1245 * ======== load_lib ========
1246 * Recursively load library and all its dependent libraries. The library
1247 * we're loading is specified by a uuid.
1249 static int load_lib(struct nldr_nodeobject *nldr_node_obj,
1250 struct lib_node *root, struct dsp_uuid uuid,
1252 struct dbll_library_obj **lib_path,
1253 enum nldr_phase phase, u16 depth)
1255 struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj;
1256 u16 nd_libs = 0; /* Number of dependent libraries */
1257 u16 np_libs = 0; /* Number of persistent libraries */
1258 u16 nd_libs_loaded = 0; /* Number of dep. libraries loaded */
1261 u32 dw_buf_size = NLDR_MAXPATHLENGTH;
1262 dbll_flags flags = DBLL_SYMB | DBLL_CODE | DBLL_DATA | DBLL_DYNAMIC;
1263 struct dbll_attrs new_attrs;
1264 char *psz_file_name = NULL;
1265 struct dsp_uuid *dep_lib_uui_ds = NULL;
1266 bool *persistent_dep_libs = NULL;
1268 bool lib_status = false;
1269 struct lib_node *dep_lib;
1271 if (depth > MAXDEPTH) {
1276 /* Allocate a buffer for library file name of size DBL_MAXPATHLENGTH */
1277 psz_file_name = kzalloc(DBLL_MAXPATHLENGTH, GFP_KERNEL);
1278 if (psz_file_name == NULL)
1281 if (DSP_SUCCEEDED(status)) {
1282 /* Get the name of the library */
1285 dcd_get_library_name(nldr_node_obj->nldr_obj->
1286 hdcd_mgr, &uuid, psz_file_name,
1287 &dw_buf_size, phase,
1288 nldr_node_obj->pf_phase_split);
1290 /* Dependent libraries are registered with a phase */
1292 dcd_get_library_name(nldr_node_obj->nldr_obj->
1293 hdcd_mgr, &uuid, psz_file_name,
1294 &dw_buf_size, NLDR_NOPHASE,
1298 if (DSP_SUCCEEDED(status)) {
1299 /* Open the library, don't load symbols */
1301 nldr_obj->ldr_fxns.open_fxn(nldr_obj->dbll, psz_file_name,
1302 DBLL_NOLOAD, &root->lib);
1304 /* Done with file name */
1305 kfree(psz_file_name);
1307 /* Check to see if library not already loaded */
1308 if (DSP_SUCCEEDED(status) && root_prstnt) {
1310 find_in_persistent_lib_array(nldr_node_obj, root->lib);
1313 nldr_obj->ldr_fxns.close_fxn(root->lib);
1317 if (DSP_SUCCEEDED(status)) {
1318 /* Check for circular dependencies. */
1319 for (i = 0; i < depth; i++) {
1320 if (root->lib == lib_path[i]) {
1321 /* This condition could be checked by a
1322 * tool at build time. */
1327 if (DSP_SUCCEEDED(status)) {
1328 /* Add library to current path in dependency tree */
1329 lib_path[depth] = root->lib;
1331 /* Get number of dependent libraries */
1333 dcd_get_num_dep_libs(nldr_node_obj->nldr_obj->hdcd_mgr,
1334 &uuid, &nd_libs, &np_libs, phase);
1336 DBC_ASSERT(nd_libs >= np_libs);
1337 if (DSP_SUCCEEDED(status)) {
1338 if (!(*nldr_node_obj->pf_phase_split))
1341 /* nd_libs = #of dependent libraries */
1342 root->dep_libs = nd_libs - np_libs;
1344 dep_lib_uui_ds = kzalloc(sizeof(struct dsp_uuid) *
1345 nd_libs, GFP_KERNEL);
1346 persistent_dep_libs =
1347 kzalloc(sizeof(bool) * nd_libs, GFP_KERNEL);
1348 if (!dep_lib_uui_ds || !persistent_dep_libs)
1351 if (root->dep_libs > 0) {
1352 /* Allocate arrays for dependent lib UUIDs,
1354 root->dep_libs_tree = kzalloc
1355 (sizeof(struct lib_node) *
1356 (root->dep_libs), GFP_KERNEL);
1357 if (!(root->dep_libs_tree))
1362 if (DSP_SUCCEEDED(status)) {
1363 /* Get the dependent library UUIDs */
1365 dcd_get_dep_libs(nldr_node_obj->
1366 nldr_obj->hdcd_mgr, &uuid,
1367 nd_libs, dep_lib_uui_ds,
1368 persistent_dep_libs,
1375 * Recursively load dependent libraries.
1377 if (DSP_SUCCEEDED(status)) {
1378 for (i = 0; i < nd_libs; i++) {
1379 /* If root library is NOT persistent, and dep library
1380 * is, then record it. If root library IS persistent,
1381 * the deplib is already included */
1382 if (!root_prstnt && persistent_dep_libs[i] &&
1383 *nldr_node_obj->pf_phase_split) {
1384 if ((nldr_node_obj->pers_libs) >= MAXLIBS) {
1389 /* Allocate library outside of phase */
1391 &nldr_node_obj->pers_lib_table
1392 [nldr_node_obj->pers_libs];
1395 persistent_dep_libs[i] = true;
1397 /* Allocate library within phase */
1398 dep_lib = &root->dep_libs_tree[nd_libs_loaded];
1401 status = load_lib(nldr_node_obj, dep_lib,
1403 persistent_dep_libs[i], lib_path,
1406 if (DSP_SUCCEEDED(status)) {
1407 if ((status != 0) &&
1408 !root_prstnt && persistent_dep_libs[i] &&
1409 *nldr_node_obj->pf_phase_split) {
1410 (nldr_node_obj->pers_libs)++;
1412 if (!persistent_dep_libs[i] ||
1413 !(*nldr_node_obj->pf_phase_split)) {
1423 /* Now we can load the root library */
1424 if (DSP_SUCCEEDED(status)) {
1425 new_attrs = nldr_obj->ldr_attrs;
1426 new_attrs.sym_arg = root;
1427 new_attrs.rmm_handle = nldr_node_obj;
1428 new_attrs.input_params = nldr_node_obj->priv_ref;
1429 new_attrs.base_image = false;
1432 nldr_obj->ldr_fxns.load_fxn(root->lib, flags, &new_attrs,
1437 * In case of failure, unload any dependent libraries that
1438 * were loaded, and close the root library.
1439 * (Persistent libraries are unloaded from the very top)
1441 if (DSP_FAILED(status)) {
1442 if (phase != NLDR_EXECUTE) {
1443 for (i = 0; i < nldr_node_obj->pers_libs; i++)
1444 unload_lib(nldr_node_obj,
1445 &nldr_node_obj->pers_lib_table[i]);
1447 nldr_node_obj->pers_libs = 0;
1449 for (i = 0; i < nd_libs_loaded; i++)
1450 unload_lib(nldr_node_obj, &root->dep_libs_tree[i]);
1453 nldr_obj->ldr_fxns.close_fxn(root->lib);
1457 /* Going up one node in the dependency tree */
1460 kfree(dep_lib_uui_ds);
1461 dep_lib_uui_ds = NULL;
1463 kfree(persistent_dep_libs);
1464 persistent_dep_libs = NULL;
1470 * ======== load_ovly ========
1472 static int load_ovly(struct nldr_nodeobject *nldr_node_obj,
1473 enum nldr_phase phase)
1475 struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj;
1476 struct ovly_node *po_node = NULL;
1477 struct ovly_sect *phase_sects = NULL;
1478 struct ovly_sect *other_sects_list = NULL;
1481 u16 other_alloc = 0;
1482 u16 *ref_count = NULL;
1483 u16 *other_ref = NULL;
1485 struct ovly_sect *ovly_section;
1488 /* Find the node in the table */
1489 for (i = 0; i < nldr_obj->ovly_nodes; i++) {
1491 (nldr_node_obj->uuid, nldr_obj->ovly_table[i].uuid)) {
1493 po_node = &(nldr_obj->ovly_table[i]);
1498 DBC_ASSERT(i < nldr_obj->ovly_nodes);
1507 ref_count = &(po_node->create_ref);
1508 other_ref = &(po_node->other_ref);
1509 phase_sects = po_node->create_sects_list;
1510 other_sects_list = po_node->other_sects_list;
1514 ref_count = &(po_node->execute_ref);
1515 phase_sects = po_node->execute_sects_list;
1519 ref_count = &(po_node->delete_ref);
1520 phase_sects = po_node->delete_sects_list;
1528 if (ref_count == NULL)
1531 if (*ref_count != 0)
1534 /* 'Allocate' memory for overlay sections of this phase */
1535 ovly_section = phase_sects;
1536 while (ovly_section) {
1537 /* allocate *//* page not supported yet */
1538 /* reserve *//* align */
1539 status = rmm_alloc(nldr_obj->rmm, 0, ovly_section->size, 0,
1540 &(ovly_section->sect_run_addr), true);
1541 if (DSP_SUCCEEDED(status)) {
1542 ovly_section = ovly_section->next_sect;
1548 if (other_ref && *other_ref == 0) {
1549 /* 'Allocate' memory for other overlay sections
1551 if (DSP_SUCCEEDED(status)) {
1552 ovly_section = other_sects_list;
1553 while (ovly_section) {
1554 /* page not supported *//* align */
1557 rmm_alloc(nldr_obj->rmm, 0,
1558 ovly_section->size, 0,
1559 &(ovly_section->sect_run_addr),
1561 if (DSP_SUCCEEDED(status)) {
1562 ovly_section = ovly_section->next_sect;
1570 if (*ref_count == 0) {
1571 if (DSP_SUCCEEDED(status)) {
1572 /* Load sections for this phase */
1573 ovly_section = phase_sects;
1574 while (ovly_section && DSP_SUCCEEDED(status)) {
1576 (*nldr_obj->ovly_fxn) (nldr_node_obj->
1583 ovly_section->page);
1584 if (bytes != ovly_section->size)
1587 ovly_section = ovly_section->next_sect;
1591 if (other_ref && *other_ref == 0) {
1592 if (DSP_SUCCEEDED(status)) {
1593 /* Load other sections (create phase) */
1594 ovly_section = other_sects_list;
1595 while (ovly_section && DSP_SUCCEEDED(status)) {
1597 (*nldr_obj->ovly_fxn) (nldr_node_obj->
1604 ovly_section->page);
1605 if (bytes != ovly_section->size)
1608 ovly_section = ovly_section->next_sect;
1612 if (DSP_FAILED(status)) {
1613 /* 'Deallocate' memory */
1614 free_sects(nldr_obj, phase_sects, alloc_num);
1615 free_sects(nldr_obj, other_sects_list, other_alloc);
1618 if (DSP_SUCCEEDED(status) && (ref_count != NULL)) {
1629 * ======== remote_alloc ========
1631 static int remote_alloc(void **ref, u16 mem_sect, u32 size,
1632 u32 align, u32 *dsp_address,
1633 s32 segmnt_id, s32 req,
1636 struct nldr_nodeobject *hnode = (struct nldr_nodeobject *)ref;
1637 struct nldr_object *nldr_obj;
1638 struct rmm_target_obj *rmm;
1639 u16 mem_phase_bit = MAXFLAGS;
1644 struct rmm_addr *rmm_addr_obj = (struct rmm_addr *)dsp_address;
1645 bool mem_load_req = false;
1646 int status = -ENOMEM; /* Set to fail */
1648 DBC_REQUIRE(mem_sect == DBLL_CODE || mem_sect == DBLL_DATA ||
1649 mem_sect == DBLL_BSS);
1650 nldr_obj = hnode->nldr_obj;
1651 rmm = nldr_obj->rmm;
1652 /* Convert size to DSP words */
1654 (size + nldr_obj->us_dsp_word_size -
1655 1) / nldr_obj->us_dsp_word_size;
1656 /* Modify memory 'align' to account for DSP cache line size */
1657 align = find_lcm(GEM_CACHE_LINE_SIZE, align);
1658 dev_dbg(bridge, "%s: memory align to 0x%x\n", __func__, align);
1659 if (segmnt_id != -1) {
1660 rmm_addr_obj->segid = segmnt_id;
1664 switch (hnode->phase) {
1666 mem_phase_bit = CREATEDATAFLAGBIT;
1669 mem_phase_bit = DELETEDATAFLAGBIT;
1672 mem_phase_bit = EXECUTEDATAFLAGBIT;
1678 if (mem_sect == DBLL_CODE)
1681 if (mem_phase_bit < MAXFLAGS)
1682 segid = hnode->seg_id[mem_phase_bit];
1684 /* Determine if there is a memory loading requirement */
1685 if ((hnode->code_data_flag_mask >> mem_phase_bit) & 0x1)
1686 mem_load_req = true;
1689 mem_sect_type = (mem_sect == DBLL_CODE) ? DYNM_CODE : DYNM_DATA;
1691 /* Find an appropriate segment based on mem_sect */
1692 if (segid == NULLID) {
1693 /* No memory requirements of preferences */
1694 DBC_ASSERT(!mem_load_req);
1697 if (segid <= MAXSEGID) {
1698 DBC_ASSERT(segid < nldr_obj->dload_segs);
1699 /* Attempt to allocate from segid first. */
1700 rmm_addr_obj->segid = segid;
1702 rmm_alloc(rmm, segid, word_size, align, dsp_address, false);
1703 if (DSP_FAILED(status)) {
1704 dev_dbg(bridge, "%s: Unable allocate from segment %d\n",
1708 /* segid > MAXSEGID ==> Internal or external memory */
1709 DBC_ASSERT(segid == MEMINTERNALID || segid == MEMEXTERNALID);
1710 /* Check for any internal or external memory segment,
1711 * depending on segid. */
1712 mem_sect_type |= segid == MEMINTERNALID ?
1713 DYNM_INTERNAL : DYNM_EXTERNAL;
1714 for (i = 0; i < nldr_obj->dload_segs; i++) {
1715 if ((nldr_obj->seg_table[i] & mem_sect_type) !=
1719 status = rmm_alloc(rmm, i, word_size, align,
1720 dsp_address, false);
1721 if (DSP_SUCCEEDED(status)) {
1722 /* Save segid for freeing later */
1723 rmm_addr_obj->segid = i;
1729 /* Haven't found memory yet, attempt to find any segment that works */
1730 if (status == -ENOMEM && !mem_load_req) {
1731 dev_dbg(bridge, "%s: Preferred segment unavailable, trying "
1732 "another\n", __func__);
1733 for (i = 0; i < nldr_obj->dload_segs; i++) {
1734 /* All bits of mem_sect_type must be set */
1735 if ((nldr_obj->seg_table[i] & mem_sect_type) !=
1739 status = rmm_alloc(rmm, i, word_size, align,
1740 dsp_address, false);
1741 if (DSP_SUCCEEDED(status)) {
1743 rmm_addr_obj->segid = i;
1752 static int remote_free(void **ref, u16 space, u32 dsp_address,
1753 u32 size, bool reserve)
1755 struct nldr_object *nldr_obj = (struct nldr_object *)ref;
1756 struct rmm_target_obj *rmm;
1758 int status = -ENOMEM; /* Set to fail */
1760 DBC_REQUIRE(nldr_obj);
1762 rmm = nldr_obj->rmm;
1764 /* Convert size to DSP words */
1766 (size + nldr_obj->us_dsp_word_size -
1767 1) / nldr_obj->us_dsp_word_size;
1769 if (rmm_free(rmm, space, dsp_address, word_size, reserve))
1776 * ======== unload_lib ========
1778 static void unload_lib(struct nldr_nodeobject *nldr_node_obj,
1779 struct lib_node *root)
1781 struct dbll_attrs new_attrs;
1782 struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj;
1785 DBC_ASSERT(root != NULL);
1787 /* Unload dependent libraries */
1788 for (i = 0; i < root->dep_libs; i++)
1789 unload_lib(nldr_node_obj, &root->dep_libs_tree[i]);
1793 new_attrs = nldr_obj->ldr_attrs;
1794 new_attrs.rmm_handle = nldr_obj->rmm;
1795 new_attrs.input_params = nldr_node_obj->priv_ref;
1796 new_attrs.base_image = false;
1797 new_attrs.sym_arg = root;
1800 /* Unload the root library */
1801 nldr_obj->ldr_fxns.unload_fxn(root->lib, &new_attrs);
1802 nldr_obj->ldr_fxns.close_fxn(root->lib);
1805 /* Free dependent library list */
1806 kfree(root->dep_libs_tree);
1807 root->dep_libs_tree = NULL;
1811 * ======== unload_ovly ========
1813 static void unload_ovly(struct nldr_nodeobject *nldr_node_obj,
1814 enum nldr_phase phase)
1816 struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj;
1817 struct ovly_node *po_node = NULL;
1818 struct ovly_sect *phase_sects = NULL;
1819 struct ovly_sect *other_sects_list = NULL;
1822 u16 other_alloc = 0;
1823 u16 *ref_count = NULL;
1824 u16 *other_ref = NULL;
1826 /* Find the node in the table */
1827 for (i = 0; i < nldr_obj->ovly_nodes; i++) {
1829 (nldr_node_obj->uuid, nldr_obj->ovly_table[i].uuid)) {
1831 po_node = &(nldr_obj->ovly_table[i]);
1836 DBC_ASSERT(i < nldr_obj->ovly_nodes);
1839 /* TODO: Should we print warning here? */
1844 ref_count = &(po_node->create_ref);
1845 phase_sects = po_node->create_sects_list;
1846 alloc_num = po_node->create_sects;
1849 ref_count = &(po_node->execute_ref);
1850 phase_sects = po_node->execute_sects_list;
1851 alloc_num = po_node->execute_sects;
1854 ref_count = &(po_node->delete_ref);
1855 other_ref = &(po_node->other_ref);
1856 phase_sects = po_node->delete_sects_list;
1857 /* 'Other' overlay sections are unloaded in the delete phase */
1858 other_sects_list = po_node->other_sects_list;
1859 alloc_num = po_node->delete_sects;
1860 other_alloc = po_node->other_sects;
1866 DBC_ASSERT(ref_count && (*ref_count > 0));
1867 if (ref_count && (*ref_count > 0)) {
1870 DBC_ASSERT(*other_ref > 0);
1875 if (ref_count && *ref_count == 0) {
1876 /* 'Deallocate' memory */
1877 free_sects(nldr_obj, phase_sects, alloc_num);
1879 if (other_ref && *other_ref == 0)
1880 free_sects(nldr_obj, other_sects_list, other_alloc);
1884 * ======== find_in_persistent_lib_array ========
1886 static bool find_in_persistent_lib_array(struct nldr_nodeobject *nldr_node_obj,
1887 struct dbll_library_obj *lib)
1891 for (i = 0; i < nldr_node_obj->pers_libs; i++) {
1892 if (lib == nldr_node_obj->pers_lib_table[i].lib)
1901 * ================ Find LCM (Least Common Multiplier ===
1903 static u32 find_lcm(u32 a, u32 b)
1907 ret = a * b / find_gcf(a, b);
1913 * ================ Find GCF (Greatest Common Factor ) ===
1915 static u32 find_gcf(u32 a, u32 b)
1919 /* Get the GCF (Greatest common factor between the numbers,
1920 * using Euclidian Algo */
1921 while ((c = (a % b))) {
1928 #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
1930 * nldr_find_addr() - Find the closest symbol to the given address based on
1931 * dynamic node object.
1933 * @nldr_node: Dynamic node object
1934 * @sym_addr: Given address to find the dsp symbol
1935 * @offset_range: offset range to look for dsp symbol
1936 * @offset_output: Symbol Output address
1937 * @sym_name: String with the dsp symbol
1939 * This function finds the node library for a given address and
1940 * retrieves the dsp symbol by calling dbll_find_dsp_symbol.
1942 int nldr_find_addr(struct nldr_nodeobject *nldr_node, u32 sym_addr,
1943 u32 offset_range, void *offset_output, char *sym_name)
1946 bool status1 = false;
1948 struct lib_node root = { NULL, 0, NULL };
1949 DBC_REQUIRE(refs > 0);
1950 DBC_REQUIRE(offset_output != NULL);
1951 DBC_REQUIRE(sym_name != NULL);
1952 pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x, %s)\n", __func__, (u32) nldr_node,
1953 sym_addr, offset_range, (u32) offset_output, sym_name);
1955 if (nldr_node->dynamic && *nldr_node->pf_phase_split) {
1956 switch (nldr_node->phase) {
1958 root = nldr_node->create_lib;
1961 root = nldr_node->execute_lib;
1964 root = nldr_node->delete_lib;
1971 /* for Overlay nodes or non-split Dynamic nodes */
1972 root = nldr_node->root;
1975 status1 = dbll_find_dsp_symbol(root.lib, sym_addr,
1976 offset_range, offset_output, sym_name);
1978 /* If symbol not found, check dependent libraries */
1980 for (i = 0; i < root.dep_libs; i++) {
1981 status1 = dbll_find_dsp_symbol(
1982 root.dep_libs_tree[i].lib, sym_addr,
1983 offset_range, offset_output, sym_name);
1988 /* Check persistent libraries */
1990 for (i = 0; i < nldr_node->pers_libs; i++) {
1991 status1 = dbll_find_dsp_symbol(
1992 nldr_node->pers_lib_table[i].lib, sym_addr,
1993 offset_range, offset_output, sym_name);
2000 pr_debug("%s: Address 0x%x not found in range %d.\n",
2001 __func__, sym_addr, offset_range);