]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/staging/tidspbridge/rmgr/drv.c
Merge branch 'for-2637/i2c-all' of git://git.fluff.org/bjdooks/linux
[net-next-2.6.git] / drivers / staging / tidspbridge / rmgr / drv.c
1 /*
2  * drv.c
3  *
4  * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5  *
6  * DSP/BIOS Bridge resource allocation module.
7  *
8  * Copyright (C) 2005-2006 Texas Instruments, Inc.
9  *
10  * This package is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  *
14  * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17  */
18 #include <linux/types.h>
19
20 /*  ----------------------------------- Host OS */
21 #include <dspbridge/host_os.h>
22
23 /*  ----------------------------------- DSP/BIOS Bridge */
24 #include <dspbridge/dbdefs.h>
25
26 /*  ----------------------------------- Trace & Debug */
27 #include <dspbridge/dbc.h>
28
29 /*  ----------------------------------- OS Adaptation Layer */
30 #include <dspbridge/list.h>
31
32 /*  ----------------------------------- This */
33 #include <dspbridge/drv.h>
34 #include <dspbridge/dev.h>
35
36 #include <dspbridge/node.h>
37 #include <dspbridge/proc.h>
38 #include <dspbridge/strm.h>
39 #include <dspbridge/nodepriv.h>
40 #include <dspbridge/dspchnl.h>
41 #include <dspbridge/resourcecleanup.h>
42
43 /*  ----------------------------------- Defines, Data Structures, Typedefs */
44 struct drv_object {
45         struct lst_list *dev_list;
46         struct lst_list *dev_node_string;
47 };
48
49 /*
50  *  This is the Device Extension. Named with the Prefix
51  *  DRV_ since it is living in this module
52  */
53 struct drv_ext {
54         struct list_head link;
55         char sz_string[MAXREGPATHLENGTH];
56 };
57
58 /*  ----------------------------------- Globals */
59 static s32 refs;
60 static bool ext_phys_mem_pool_enabled;
61 struct ext_phys_mem_pool {
62         u32 phys_mem_base;
63         u32 phys_mem_size;
64         u32 virt_mem_base;
65         u32 next_phys_alloc_ptr;
66 };
67 static struct ext_phys_mem_pool ext_mem_pool;
68
69 /*  ----------------------------------- Function Prototypes */
70 static int request_bridge_resources(struct cfg_hostres *res);
71
72
73 /* GPP PROCESS CLEANUP CODE */
74
75 static int drv_proc_free_node_res(int id, void *p, void *data);
76
77 /* Allocate and add a node resource element
78 * This function is called from .Node_Allocate. */
79 int drv_insert_node_res_element(void *hnode, void *node_resource,
80                                        void *process_ctxt)
81 {
82         struct node_res_object **node_res_obj =
83             (struct node_res_object **)node_resource;
84         struct process_context *ctxt = (struct process_context *)process_ctxt;
85         int status = 0;
86         int retval;
87
88         *node_res_obj = kzalloc(sizeof(struct node_res_object), GFP_KERNEL);
89         if (!*node_res_obj) {
90                 status = -ENOMEM;
91                 goto func_end;
92         }
93
94         (*node_res_obj)->hnode = hnode;
95         retval = idr_get_new(ctxt->node_id, *node_res_obj,
96                                                 &(*node_res_obj)->id);
97         if (retval == -EAGAIN) {
98                 if (!idr_pre_get(ctxt->node_id, GFP_KERNEL)) {
99                         pr_err("%s: OUT OF MEMORY\n", __func__);
100                         status = -ENOMEM;
101                         goto func_end;
102                 }
103
104                 retval = idr_get_new(ctxt->node_id, *node_res_obj,
105                                                 &(*node_res_obj)->id);
106         }
107         if (retval) {
108                 pr_err("%s: FAILED, IDR is FULL\n", __func__);
109                 status = -EFAULT;
110         }
111 func_end:
112         if (status)
113                 kfree(*node_res_obj);
114
115         return status;
116 }
117
118 /* Release all Node resources and its context
119  * Actual Node De-Allocation */
120 static int drv_proc_free_node_res(int id, void *p, void *data)
121 {
122         struct process_context *ctxt = data;
123         int status;
124         struct node_res_object *node_res_obj = p;
125         u32 node_state;
126
127         if (node_res_obj->node_allocated) {
128                 node_state = node_get_state(node_res_obj->hnode);
129                 if (node_state <= NODE_DELETING) {
130                         if ((node_state == NODE_RUNNING) ||
131                             (node_state == NODE_PAUSED) ||
132                             (node_state == NODE_TERMINATING))
133                                 node_terminate
134                                     (node_res_obj->hnode, &status);
135
136                         node_delete(node_res_obj, ctxt);
137                 }
138         }
139
140         return 0;
141 }
142
143 /* Release all Mapped and Reserved DMM resources */
144 int drv_remove_all_dmm_res_elements(void *process_ctxt)
145 {
146         struct process_context *ctxt = (struct process_context *)process_ctxt;
147         int status = 0;
148         struct dmm_map_object *temp_map, *map_obj;
149
150         /* Free DMM mapped memory resources */
151         list_for_each_entry_safe(map_obj, temp_map, &ctxt->dmm_map_list, link) {
152                 status = proc_un_map(ctxt->hprocessor,
153                                      (void *)map_obj->dsp_addr, ctxt);
154                 if (status)
155                         pr_err("%s: proc_un_map failed!"
156                                " status = 0x%xn", __func__, status);
157         }
158         return status;
159 }
160
161 /* Update Node allocation status */
162 void drv_proc_node_update_status(void *node_resource, s32 status)
163 {
164         struct node_res_object *node_res_obj =
165             (struct node_res_object *)node_resource;
166         DBC_ASSERT(node_resource != NULL);
167         node_res_obj->node_allocated = status;
168 }
169
170 /* Update Node Heap status */
171 void drv_proc_node_update_heap_status(void *node_resource, s32 status)
172 {
173         struct node_res_object *node_res_obj =
174             (struct node_res_object *)node_resource;
175         DBC_ASSERT(node_resource != NULL);
176         node_res_obj->heap_allocated = status;
177 }
178
179 /* Release all Node resources and its context
180 * This is called from .bridge_release.
181  */
182 int drv_remove_all_node_res_elements(void *process_ctxt)
183 {
184         struct process_context *ctxt = process_ctxt;
185
186         idr_for_each(ctxt->node_id, drv_proc_free_node_res, ctxt);
187         idr_destroy(ctxt->node_id);
188
189         return 0;
190 }
191
192 /* Allocate the STRM resource element
193 * This is called after the actual resource is allocated
194  */
195 int drv_proc_insert_strm_res_element(void *stream_obj,
196                                             void *strm_res, void *process_ctxt)
197 {
198         struct strm_res_object **pstrm_res =
199             (struct strm_res_object **)strm_res;
200         struct process_context *ctxt = (struct process_context *)process_ctxt;
201         int status = 0;
202         int retval;
203
204         *pstrm_res = kzalloc(sizeof(struct strm_res_object), GFP_KERNEL);
205         if (*pstrm_res == NULL) {
206                 status = -EFAULT;
207                 goto func_end;
208         }
209
210         (*pstrm_res)->hstream = stream_obj;
211         retval = idr_get_new(ctxt->stream_id, *pstrm_res,
212                                                 &(*pstrm_res)->id);
213         if (retval == -EAGAIN) {
214                 if (!idr_pre_get(ctxt->stream_id, GFP_KERNEL)) {
215                         pr_err("%s: OUT OF MEMORY\n", __func__);
216                         status = -ENOMEM;
217                         goto func_end;
218                 }
219
220                 retval = idr_get_new(ctxt->stream_id, *pstrm_res,
221                                                 &(*pstrm_res)->id);
222         }
223         if (retval) {
224                 pr_err("%s: FAILED, IDR is FULL\n", __func__);
225                 status = -EPERM;
226         }
227
228 func_end:
229         return status;
230 }
231
232 static int drv_proc_free_strm_res(int id, void *p, void *process_ctxt)
233 {
234         struct process_context *ctxt = process_ctxt;
235         struct strm_res_object *strm_res = p;
236         struct stream_info strm_info;
237         struct dsp_streaminfo user;
238         u8 **ap_buffer = NULL;
239         u8 *buf_ptr;
240         u32 ul_bytes;
241         u32 dw_arg;
242         s32 ul_buf_size;
243
244         if (strm_res->num_bufs) {
245                 ap_buffer = kmalloc((strm_res->num_bufs *
246                                        sizeof(u8 *)), GFP_KERNEL);
247                 if (ap_buffer) {
248                         strm_free_buffer(strm_res,
249                                                   ap_buffer,
250                                                   strm_res->num_bufs,
251                                                   ctxt);
252                         kfree(ap_buffer);
253                 }
254         }
255         strm_info.user_strm = &user;
256         user.number_bufs_in_stream = 0;
257         strm_get_info(strm_res->hstream, &strm_info, sizeof(strm_info));
258         while (user.number_bufs_in_stream--)
259                 strm_reclaim(strm_res->hstream, &buf_ptr, &ul_bytes,
260                              (u32 *) &ul_buf_size, &dw_arg);
261         strm_close(strm_res, ctxt);
262         return 0;
263 }
264
265 /* Release all Stream resources and its context
266 * This is called from .bridge_release.
267  */
268 int drv_remove_all_strm_res_elements(void *process_ctxt)
269 {
270         struct process_context *ctxt = process_ctxt;
271
272         idr_for_each(ctxt->stream_id, drv_proc_free_strm_res, ctxt);
273         idr_destroy(ctxt->stream_id);
274
275         return 0;
276 }
277
278 /* Updating the stream resource element */
279 int drv_proc_update_strm_res(u32 num_bufs, void *strm_resources)
280 {
281         int status = 0;
282         struct strm_res_object **strm_res =
283             (struct strm_res_object **)strm_resources;
284
285         (*strm_res)->num_bufs = num_bufs;
286         return status;
287 }
288
289 /* GPP PROCESS CLEANUP CODE END */
290
291 /*
292  *  ======== = drv_create ======== =
293  *  Purpose:
294  *      DRV Object gets created only once during Driver Loading.
295  */
296 int drv_create(struct drv_object **drv_obj)
297 {
298         int status = 0;
299         struct drv_object *pdrv_object = NULL;
300         struct drv_data *drv_datap = dev_get_drvdata(bridge);
301
302         DBC_REQUIRE(drv_obj != NULL);
303         DBC_REQUIRE(refs > 0);
304
305         pdrv_object = kzalloc(sizeof(struct drv_object), GFP_KERNEL);
306         if (pdrv_object) {
307                 /* Create and Initialize List of device objects */
308                 pdrv_object->dev_list = kzalloc(sizeof(struct lst_list),
309                                                         GFP_KERNEL);
310                 if (pdrv_object->dev_list) {
311                         /* Create and Initialize List of device Extension */
312                         pdrv_object->dev_node_string =
313                                 kzalloc(sizeof(struct lst_list), GFP_KERNEL);
314                         if (!(pdrv_object->dev_node_string)) {
315                                 status = -EPERM;
316                         } else {
317                                 INIT_LIST_HEAD(&pdrv_object->
318                                                dev_node_string->head);
319                                 INIT_LIST_HEAD(&pdrv_object->dev_list->head);
320                         }
321                 } else {
322                         status = -ENOMEM;
323                 }
324         } else {
325                 status = -ENOMEM;
326         }
327         /* Store the DRV Object in the driver data */
328         if (!status) {
329                 if (drv_datap) {
330                         drv_datap->drv_object = (void *)pdrv_object;
331                 } else {
332                         status = -EPERM;
333                         pr_err("%s: Failed to store DRV object\n", __func__);
334                 }
335         }
336
337         if (!status) {
338                 *drv_obj = pdrv_object;
339         } else {
340                 kfree(pdrv_object->dev_list);
341                 kfree(pdrv_object->dev_node_string);
342                 /* Free the DRV Object */
343                 kfree(pdrv_object);
344         }
345
346         DBC_ENSURE(status || pdrv_object);
347         return status;
348 }
349
350 /*
351  *  ======== drv_exit ========
352  *  Purpose:
353  *      Discontinue usage of the DRV module.
354  */
355 void drv_exit(void)
356 {
357         DBC_REQUIRE(refs > 0);
358
359         refs--;
360
361         DBC_ENSURE(refs >= 0);
362 }
363
364 /*
365  *  ======== = drv_destroy ======== =
366  *  purpose:
367  *      Invoked during bridge de-initialization
368  */
369 int drv_destroy(struct drv_object *driver_obj)
370 {
371         int status = 0;
372         struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
373         struct drv_data *drv_datap = dev_get_drvdata(bridge);
374
375         DBC_REQUIRE(refs > 0);
376         DBC_REQUIRE(pdrv_object);
377
378         /*
379          *  Delete the List if it exists.Should not come here
380          *  as the drv_remove_dev_object and the Last drv_request_resources
381          *  removes the list if the lists are empty.
382          */
383         kfree(pdrv_object->dev_list);
384         kfree(pdrv_object->dev_node_string);
385         kfree(pdrv_object);
386         /* Update the DRV Object in the driver data */
387         if (drv_datap) {
388                 drv_datap->drv_object = NULL;
389         } else {
390                 status = -EPERM;
391                 pr_err("%s: Failed to store DRV object\n", __func__);
392         }
393
394         return status;
395 }
396
397 /*
398  *  ======== drv_get_dev_object ========
399  *  Purpose:
400  *      Given a index, returns a handle to DevObject from the list.
401  */
402 int drv_get_dev_object(u32 index, struct drv_object *hdrv_obj,
403                               struct dev_object **device_obj)
404 {
405         int status = 0;
406 #ifdef CONFIG_TIDSPBRIDGE_DEBUG
407         /* used only for Assertions and debug messages */
408         struct drv_object *pdrv_obj = (struct drv_object *)hdrv_obj;
409 #endif
410         struct dev_object *dev_obj;
411         u32 i;
412         DBC_REQUIRE(pdrv_obj);
413         DBC_REQUIRE(device_obj != NULL);
414         DBC_REQUIRE(index >= 0);
415         DBC_REQUIRE(refs > 0);
416         DBC_ASSERT(!(LST_IS_EMPTY(pdrv_obj->dev_list)));
417
418         dev_obj = (struct dev_object *)drv_get_first_dev_object();
419         for (i = 0; i < index; i++) {
420                 dev_obj =
421                     (struct dev_object *)drv_get_next_dev_object((u32) dev_obj);
422         }
423         if (dev_obj) {
424                 *device_obj = (struct dev_object *)dev_obj;
425         } else {
426                 *device_obj = NULL;
427                 status = -EPERM;
428         }
429
430         return status;
431 }
432
433 /*
434  *  ======== drv_get_first_dev_object ========
435  *  Purpose:
436  *      Retrieve the first Device Object handle from an internal linked list of
437  *      of DEV_OBJECTs maintained by DRV.
438  */
439 u32 drv_get_first_dev_object(void)
440 {
441         u32 dw_dev_object = 0;
442         struct drv_object *pdrv_obj;
443         struct drv_data *drv_datap = dev_get_drvdata(bridge);
444
445         if (drv_datap && drv_datap->drv_object) {
446                 pdrv_obj = drv_datap->drv_object;
447                 if ((pdrv_obj->dev_list != NULL) &&
448                     !LST_IS_EMPTY(pdrv_obj->dev_list))
449                         dw_dev_object = (u32) lst_first(pdrv_obj->dev_list);
450         } else {
451                 pr_err("%s: Failed to retrieve the object handle\n", __func__);
452         }
453
454         return dw_dev_object;
455 }
456
457 /*
458  *  ======== DRV_GetFirstDevNodeString ========
459  *  Purpose:
460  *      Retrieve the first Device Extension from an internal linked list of
461  *      of Pointer to dev_node Strings maintained by DRV.
462  */
463 u32 drv_get_first_dev_extension(void)
464 {
465         u32 dw_dev_extension = 0;
466         struct drv_object *pdrv_obj;
467         struct drv_data *drv_datap = dev_get_drvdata(bridge);
468
469         if (drv_datap && drv_datap->drv_object) {
470                 pdrv_obj = drv_datap->drv_object;
471                 if ((pdrv_obj->dev_node_string != NULL) &&
472                     !LST_IS_EMPTY(pdrv_obj->dev_node_string)) {
473                         dw_dev_extension =
474                             (u32) lst_first(pdrv_obj->dev_node_string);
475                 }
476         } else {
477                 pr_err("%s: Failed to retrieve the object handle\n", __func__);
478         }
479
480         return dw_dev_extension;
481 }
482
483 /*
484  *  ======== drv_get_next_dev_object ========
485  *  Purpose:
486  *      Retrieve the next Device Object handle from an internal linked list of
487  *      of DEV_OBJECTs maintained by DRV, after having previously called
488  *      drv_get_first_dev_object() and zero or more DRV_GetNext.
489  */
490 u32 drv_get_next_dev_object(u32 hdev_obj)
491 {
492         u32 dw_next_dev_object = 0;
493         struct drv_object *pdrv_obj;
494         struct drv_data *drv_datap = dev_get_drvdata(bridge);
495
496         DBC_REQUIRE(hdev_obj != 0);
497
498         if (drv_datap && drv_datap->drv_object) {
499                 pdrv_obj = drv_datap->drv_object;
500                 if ((pdrv_obj->dev_list != NULL) &&
501                     !LST_IS_EMPTY(pdrv_obj->dev_list)) {
502                         dw_next_dev_object = (u32) lst_next(pdrv_obj->dev_list,
503                                                             (struct list_head *)
504                                                             hdev_obj);
505                 }
506         } else {
507                 pr_err("%s: Failed to retrieve the object handle\n", __func__);
508         }
509
510         return dw_next_dev_object;
511 }
512
513 /*
514  *  ======== drv_get_next_dev_extension ========
515  *  Purpose:
516  *      Retrieve the next Device Extension from an internal linked list of
517  *      of pointer to DevNodeString maintained by DRV, after having previously
518  *      called drv_get_first_dev_extension() and zero or more
519  *      drv_get_next_dev_extension().
520  */
521 u32 drv_get_next_dev_extension(u32 dev_extension)
522 {
523         u32 dw_dev_extension = 0;
524         struct drv_object *pdrv_obj;
525         struct drv_data *drv_datap = dev_get_drvdata(bridge);
526
527         DBC_REQUIRE(dev_extension != 0);
528
529         if (drv_datap && drv_datap->drv_object) {
530                 pdrv_obj = drv_datap->drv_object;
531                 if ((pdrv_obj->dev_node_string != NULL) &&
532                     !LST_IS_EMPTY(pdrv_obj->dev_node_string)) {
533                         dw_dev_extension =
534                             (u32) lst_next(pdrv_obj->dev_node_string,
535                                            (struct list_head *)dev_extension);
536                 }
537         } else {
538                 pr_err("%s: Failed to retrieve the object handle\n", __func__);
539         }
540
541         return dw_dev_extension;
542 }
543
544 /*
545  *  ======== drv_init ========
546  *  Purpose:
547  *      Initialize DRV module private state.
548  */
549 int drv_init(void)
550 {
551         s32 ret = 1;            /* function return value */
552
553         DBC_REQUIRE(refs >= 0);
554
555         if (ret)
556                 refs++;
557
558         DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
559
560         return ret;
561 }
562
563 /*
564  *  ======== drv_insert_dev_object ========
565  *  Purpose:
566  *      Insert a DevObject into the list of Manager object.
567  */
568 int drv_insert_dev_object(struct drv_object *driver_obj,
569                                  struct dev_object *hdev_obj)
570 {
571         struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
572
573         DBC_REQUIRE(refs > 0);
574         DBC_REQUIRE(hdev_obj != NULL);
575         DBC_REQUIRE(pdrv_object);
576         DBC_ASSERT(pdrv_object->dev_list);
577
578         lst_put_tail(pdrv_object->dev_list, (struct list_head *)hdev_obj);
579
580         DBC_ENSURE(!LST_IS_EMPTY(pdrv_object->dev_list));
581
582         return 0;
583 }
584
585 /*
586  *  ======== drv_remove_dev_object ========
587  *  Purpose:
588  *      Search for and remove a DeviceObject from the given list of DRV
589  *      objects.
590  */
591 int drv_remove_dev_object(struct drv_object *driver_obj,
592                                  struct dev_object *hdev_obj)
593 {
594         int status = -EPERM;
595         struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
596         struct list_head *cur_elem;
597
598         DBC_REQUIRE(refs > 0);
599         DBC_REQUIRE(pdrv_object);
600         DBC_REQUIRE(hdev_obj != NULL);
601
602         DBC_REQUIRE(pdrv_object->dev_list != NULL);
603         DBC_REQUIRE(!LST_IS_EMPTY(pdrv_object->dev_list));
604
605         /* Search list for p_proc_object: */
606         for (cur_elem = lst_first(pdrv_object->dev_list); cur_elem != NULL;
607              cur_elem = lst_next(pdrv_object->dev_list, cur_elem)) {
608                 /* If found, remove it. */
609                 if ((struct dev_object *)cur_elem == hdev_obj) {
610                         lst_remove_elem(pdrv_object->dev_list, cur_elem);
611                         status = 0;
612                         break;
613                 }
614         }
615         /* Remove list if empty. */
616         if (LST_IS_EMPTY(pdrv_object->dev_list)) {
617                 kfree(pdrv_object->dev_list);
618                 pdrv_object->dev_list = NULL;
619         }
620         DBC_ENSURE((pdrv_object->dev_list == NULL) ||
621                    !LST_IS_EMPTY(pdrv_object->dev_list));
622
623         return status;
624 }
625
626 /*
627  *  ======== drv_request_resources ========
628  *  Purpose:
629  *      Requests  resources from the OS.
630  */
631 int drv_request_resources(u32 dw_context, u32 *dev_node_strg)
632 {
633         int status = 0;
634         struct drv_object *pdrv_object;
635         struct drv_ext *pszdev_node;
636         struct drv_data *drv_datap = dev_get_drvdata(bridge);
637
638         DBC_REQUIRE(dw_context != 0);
639         DBC_REQUIRE(dev_node_strg != NULL);
640
641         /*
642          *  Allocate memory to hold the string. This will live untill
643          *  it is freed in the Release resources. Update the driver object
644          *  list.
645          */
646
647         if (!drv_datap || !drv_datap->drv_object)
648                 status = -ENODATA;
649         else
650                 pdrv_object = drv_datap->drv_object;
651
652         if (!status) {
653                 pszdev_node = kzalloc(sizeof(struct drv_ext), GFP_KERNEL);
654                 if (pszdev_node) {
655                         lst_init_elem(&pszdev_node->link);
656                         strncpy(pszdev_node->sz_string,
657                                 (char *)dw_context, MAXREGPATHLENGTH - 1);
658                         pszdev_node->sz_string[MAXREGPATHLENGTH - 1] = '\0';
659                         /* Update the Driver Object List */
660                         *dev_node_strg = (u32) pszdev_node->sz_string;
661                         lst_put_tail(pdrv_object->dev_node_string,
662                                      (struct list_head *)pszdev_node);
663                 } else {
664                         status = -ENOMEM;
665                         *dev_node_strg = 0;
666                 }
667         } else {
668                 dev_dbg(bridge, "%s: Failed to get Driver Object from Registry",
669                         __func__);
670                 *dev_node_strg = 0;
671         }
672
673         DBC_ENSURE((!status && dev_node_strg != NULL &&
674                     !LST_IS_EMPTY(pdrv_object->dev_node_string)) ||
675                    (status && *dev_node_strg == 0));
676
677         return status;
678 }
679
680 /*
681  *  ======== drv_release_resources ========
682  *  Purpose:
683  *      Releases  resources from the OS.
684  */
685 int drv_release_resources(u32 dw_context, struct drv_object *hdrv_obj)
686 {
687         int status = 0;
688         struct drv_object *pdrv_object = (struct drv_object *)hdrv_obj;
689         struct drv_ext *pszdev_node;
690
691         /*
692          *  Irrespective of the status go ahead and clean it
693          *  The following will over write the status.
694          */
695         for (pszdev_node = (struct drv_ext *)drv_get_first_dev_extension();
696              pszdev_node != NULL; pszdev_node = (struct drv_ext *)
697              drv_get_next_dev_extension((u32) pszdev_node)) {
698                 if (!pdrv_object->dev_node_string) {
699                         /* When this could happen? */
700                         continue;
701                 }
702                 if ((u32) pszdev_node == dw_context) {
703                         /* Found it */
704                         /* Delete from the Driver object list */
705                         lst_remove_elem(pdrv_object->dev_node_string,
706                                         (struct list_head *)pszdev_node);
707                         kfree((void *)pszdev_node);
708                         break;
709                 }
710                 /* Delete the List if it is empty */
711                 if (LST_IS_EMPTY(pdrv_object->dev_node_string)) {
712                         kfree(pdrv_object->dev_node_string);
713                         pdrv_object->dev_node_string = NULL;
714                 }
715         }
716         return status;
717 }
718
719 /*
720  *  ======== request_bridge_resources ========
721  *  Purpose:
722  *      Reserves shared memory for bridge.
723  */
724 static int request_bridge_resources(struct cfg_hostres *res)
725 {
726         struct cfg_hostres *host_res = res;
727
728         /* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */
729         host_res->num_mem_windows = 2;
730
731         /* First window is for DSP internal memory */
732         host_res->dw_sys_ctrl_base = ioremap(OMAP_SYSC_BASE, OMAP_SYSC_SIZE);
733         dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", host_res->dw_mem_base[0]);
734         dev_dbg(bridge, "dw_mem_base[3] 0x%x\n", host_res->dw_mem_base[3]);
735
736         /* for 24xx base port is not mapping the mamory for DSP
737          * internal memory TODO Do a ioremap here */
738         /* Second window is for DSP external memory shared with MPU */
739
740         /* These are hard-coded values */
741         host_res->birq_registers = 0;
742         host_res->birq_attrib = 0;
743         host_res->dw_offset_for_monitor = 0;
744         host_res->dw_chnl_offset = 0;
745         /* CHNL_MAXCHANNELS */
746         host_res->dw_num_chnls = CHNL_MAXCHANNELS;
747         host_res->dw_chnl_buf_size = 0x400;
748
749         return 0;
750 }
751
752 /*
753  *  ======== drv_request_bridge_res_dsp ========
754  *  Purpose:
755  *      Reserves shared memory for bridge.
756  */
757 int drv_request_bridge_res_dsp(void **phost_resources)
758 {
759         int status = 0;
760         struct cfg_hostres *host_res;
761         u32 dw_buff_size;
762         u32 dma_addr;
763         u32 shm_size;
764         struct drv_data *drv_datap = dev_get_drvdata(bridge);
765
766         dw_buff_size = sizeof(struct cfg_hostres);
767
768         host_res = kzalloc(dw_buff_size, GFP_KERNEL);
769
770         if (host_res != NULL) {
771                 request_bridge_resources(host_res);
772                 /* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */
773                 host_res->num_mem_windows = 4;
774
775                 host_res->dw_mem_base[0] = 0;
776                 host_res->dw_mem_base[2] = (u32) ioremap(OMAP_DSP_MEM1_BASE,
777                                                          OMAP_DSP_MEM1_SIZE);
778                 host_res->dw_mem_base[3] = (u32) ioremap(OMAP_DSP_MEM2_BASE,
779                                                          OMAP_DSP_MEM2_SIZE);
780                 host_res->dw_mem_base[4] = (u32) ioremap(OMAP_DSP_MEM3_BASE,
781                                                          OMAP_DSP_MEM3_SIZE);
782                 host_res->dw_per_base = ioremap(OMAP_PER_CM_BASE,
783                                                 OMAP_PER_CM_SIZE);
784                 host_res->dw_per_pm_base = (u32) ioremap(OMAP_PER_PRM_BASE,
785                                                          OMAP_PER_PRM_SIZE);
786                 host_res->dw_core_pm_base = (u32) ioremap(OMAP_CORE_PRM_BASE,
787                                                           OMAP_CORE_PRM_SIZE);
788
789                 dev_dbg(bridge, "dw_mem_base[0] 0x%x\n",
790                         host_res->dw_mem_base[0]);
791                 dev_dbg(bridge, "dw_mem_base[1] 0x%x\n",
792                         host_res->dw_mem_base[1]);
793                 dev_dbg(bridge, "dw_mem_base[2] 0x%x\n",
794                         host_res->dw_mem_base[2]);
795                 dev_dbg(bridge, "dw_mem_base[3] 0x%x\n",
796                         host_res->dw_mem_base[3]);
797                 dev_dbg(bridge, "dw_mem_base[4] 0x%x\n",
798                         host_res->dw_mem_base[4]);
799
800                 shm_size = drv_datap->shm_size;
801                 if (shm_size >= 0x10000) {
802                         /* Allocate Physically contiguous,
803                          * non-cacheable  memory */
804                         host_res->dw_mem_base[1] =
805                             (u32) mem_alloc_phys_mem(shm_size, 0x100000,
806                                                      &dma_addr);
807                         if (host_res->dw_mem_base[1] == 0) {
808                                 status = -ENOMEM;
809                                 pr_err("shm reservation Failed\n");
810                         } else {
811                                 host_res->dw_mem_length[1] = shm_size;
812                                 host_res->dw_mem_phys[1] = dma_addr;
813
814                                 dev_dbg(bridge, "%s: Bridge shm address 0x%x "
815                                         "dma_addr %x size %x\n", __func__,
816                                         host_res->dw_mem_base[1],
817                                         dma_addr, shm_size);
818                         }
819                 }
820                 if (!status) {
821                         /* These are hard-coded values */
822                         host_res->birq_registers = 0;
823                         host_res->birq_attrib = 0;
824                         host_res->dw_offset_for_monitor = 0;
825                         host_res->dw_chnl_offset = 0;
826                         /* CHNL_MAXCHANNELS */
827                         host_res->dw_num_chnls = CHNL_MAXCHANNELS;
828                         host_res->dw_chnl_buf_size = 0x400;
829                         dw_buff_size = sizeof(struct cfg_hostres);
830                 }
831                 *phost_resources = host_res;
832         }
833         /* End Mem alloc */
834         return status;
835 }
836
837 void mem_ext_phys_pool_init(u32 pool_phys_base, u32 pool_size)
838 {
839         u32 pool_virt_base;
840
841         /* get the virtual address for the physical memory pool passed */
842         pool_virt_base = (u32) ioremap(pool_phys_base, pool_size);
843
844         if ((void **)pool_virt_base == NULL) {
845                 pr_err("%s: external physical memory map failed\n", __func__);
846                 ext_phys_mem_pool_enabled = false;
847         } else {
848                 ext_mem_pool.phys_mem_base = pool_phys_base;
849                 ext_mem_pool.phys_mem_size = pool_size;
850                 ext_mem_pool.virt_mem_base = pool_virt_base;
851                 ext_mem_pool.next_phys_alloc_ptr = pool_phys_base;
852                 ext_phys_mem_pool_enabled = true;
853         }
854 }
855
856 void mem_ext_phys_pool_release(void)
857 {
858         if (ext_phys_mem_pool_enabled) {
859                 iounmap((void *)(ext_mem_pool.virt_mem_base));
860                 ext_phys_mem_pool_enabled = false;
861         }
862 }
863
864 /*
865  *  ======== mem_ext_phys_mem_alloc ========
866  *  Purpose:
867  *     Allocate physically contiguous, uncached memory from external memory pool
868  */
869
870 static void *mem_ext_phys_mem_alloc(u32 bytes, u32 align, u32 * phys_addr)
871 {
872         u32 new_alloc_ptr;
873         u32 offset;
874         u32 virt_addr;
875
876         if (align == 0)
877                 align = 1;
878
879         if (bytes > ((ext_mem_pool.phys_mem_base + ext_mem_pool.phys_mem_size)
880                      - ext_mem_pool.next_phys_alloc_ptr)) {
881                 phys_addr = NULL;
882                 return NULL;
883         } else {
884                 offset = (ext_mem_pool.next_phys_alloc_ptr & (align - 1));
885                 if (offset == 0)
886                         new_alloc_ptr = ext_mem_pool.next_phys_alloc_ptr;
887                 else
888                         new_alloc_ptr = (ext_mem_pool.next_phys_alloc_ptr) +
889                             (align - offset);
890                 if ((new_alloc_ptr + bytes) <=
891                     (ext_mem_pool.phys_mem_base + ext_mem_pool.phys_mem_size)) {
892                         /* we can allocate */
893                         *phys_addr = new_alloc_ptr;
894                         ext_mem_pool.next_phys_alloc_ptr =
895                             new_alloc_ptr + bytes;
896                         virt_addr =
897                             ext_mem_pool.virt_mem_base + (new_alloc_ptr -
898                                                           ext_mem_pool.
899                                                           phys_mem_base);
900                         return (void *)virt_addr;
901                 } else {
902                         *phys_addr = 0;
903                         return NULL;
904                 }
905         }
906 }
907
908 /*
909  *  ======== mem_alloc_phys_mem ========
910  *  Purpose:
911  *      Allocate physically contiguous, uncached memory
912  */
913 void *mem_alloc_phys_mem(u32 byte_size, u32 align_mask,
914                                 u32 *physical_address)
915 {
916         void *va_mem = NULL;
917         dma_addr_t pa_mem;
918
919         if (byte_size > 0) {
920                 if (ext_phys_mem_pool_enabled) {
921                         va_mem = mem_ext_phys_mem_alloc(byte_size, align_mask,
922                                                         (u32 *) &pa_mem);
923                 } else
924                         va_mem = dma_alloc_coherent(NULL, byte_size, &pa_mem,
925                                                                 GFP_KERNEL);
926                 if (va_mem == NULL)
927                         *physical_address = 0;
928                 else
929                         *physical_address = pa_mem;
930         }
931         return va_mem;
932 }
933
934 /*
935  *  ======== mem_free_phys_mem ========
936  *  Purpose:
937  *      Free the given block of physically contiguous memory.
938  */
939 void mem_free_phys_mem(void *virtual_address, u32 physical_address,
940                        u32 byte_size)
941 {
942         DBC_REQUIRE(virtual_address != NULL);
943
944         if (!ext_phys_mem_pool_enabled)
945                 dma_free_coherent(NULL, byte_size, virtual_address,
946                                   physical_address);
947 }