4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * DSP/BIOS Bridge resource allocation module.
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
18 #include <linux/types.h>
20 /* ----------------------------------- Host OS */
21 #include <dspbridge/host_os.h>
23 /* ----------------------------------- DSP/BIOS Bridge */
24 #include <dspbridge/dbdefs.h>
26 /* ----------------------------------- Trace & Debug */
27 #include <dspbridge/dbc.h>
29 /* ----------------------------------- OS Adaptation Layer */
30 #include <dspbridge/cfg.h>
31 #include <dspbridge/list.h>
33 /* ----------------------------------- This */
34 #include <dspbridge/drv.h>
35 #include <dspbridge/dev.h>
37 #include <dspbridge/node.h>
38 #include <dspbridge/proc.h>
39 #include <dspbridge/strm.h>
40 #include <dspbridge/nodepriv.h>
41 #include <dspbridge/dspchnl.h>
42 #include <dspbridge/resourcecleanup.h>
44 /* ----------------------------------- Defines, Data Structures, Typedefs */
46 struct lst_list *dev_list;
47 struct lst_list *dev_node_string;
51 * This is the Device Extension. Named with the Prefix
52 * DRV_ since it is living in this module
55 struct list_head link;
56 char sz_string[MAXREGPATHLENGTH];
59 /* ----------------------------------- Globals */
61 static bool ext_phys_mem_pool_enabled;
62 struct ext_phys_mem_pool {
66 u32 next_phys_alloc_ptr;
68 static struct ext_phys_mem_pool ext_mem_pool;
70 /* ----------------------------------- Function Prototypes */
71 static int request_bridge_resources(struct cfg_hostres *res);
74 /* GPP PROCESS CLEANUP CODE */
76 static int drv_proc_free_node_res(int id, void *p, void *data);
78 /* Allocate and add a node resource element
79 * This function is called from .Node_Allocate. */
80 int drv_insert_node_res_element(void *hnode, void *node_resource,
83 struct node_res_object **node_res_obj =
84 (struct node_res_object **)node_resource;
85 struct process_context *ctxt = (struct process_context *)process_ctxt;
89 *node_res_obj = kzalloc(sizeof(struct node_res_object), GFP_KERNEL);
95 (*node_res_obj)->hnode = hnode;
96 retval = idr_get_new(ctxt->node_id, *node_res_obj,
97 &(*node_res_obj)->id);
98 if (retval == -EAGAIN) {
99 if (!idr_pre_get(ctxt->node_id, GFP_KERNEL)) {
100 pr_err("%s: OUT OF MEMORY\n", __func__);
105 retval = idr_get_new(ctxt->node_id, *node_res_obj,
106 &(*node_res_obj)->id);
109 pr_err("%s: FAILED, IDR is FULL\n", __func__);
114 kfree(*node_res_obj);
119 /* Release all Node resources and its context
120 * Actual Node De-Allocation */
121 static int drv_proc_free_node_res(int id, void *p, void *data)
123 struct process_context *ctxt = data;
125 struct node_res_object *node_res_obj = p;
128 if (node_res_obj->node_allocated) {
129 node_state = node_get_state(node_res_obj->hnode);
130 if (node_state <= NODE_DELETING) {
131 if ((node_state == NODE_RUNNING) ||
132 (node_state == NODE_PAUSED) ||
133 (node_state == NODE_TERMINATING))
135 (node_res_obj->hnode, &status);
137 node_delete(node_res_obj, ctxt);
144 /* Release all Mapped and Reserved DMM resources */
145 int drv_remove_all_dmm_res_elements(void *process_ctxt)
147 struct process_context *ctxt = (struct process_context *)process_ctxt;
149 struct dmm_map_object *temp_map, *map_obj;
150 struct dmm_rsv_object *temp_rsv, *rsv_obj;
152 /* Free DMM mapped memory resources */
153 list_for_each_entry_safe(map_obj, temp_map, &ctxt->dmm_map_list, link) {
154 status = proc_un_map(ctxt->hprocessor,
155 (void *)map_obj->dsp_addr, ctxt);
157 pr_err("%s: proc_un_map failed!"
158 " status = 0x%xn", __func__, status);
161 /* Free DMM reserved memory resources */
162 list_for_each_entry_safe(rsv_obj, temp_rsv, &ctxt->dmm_rsv_list, link) {
163 status = proc_un_reserve_memory(ctxt->hprocessor, (void *)
164 rsv_obj->dsp_reserved_addr,
167 pr_err("%s: proc_un_reserve_memory failed!"
168 " status = 0x%xn", __func__, status);
173 /* Update Node allocation status */
174 void drv_proc_node_update_status(void *node_resource, s32 status)
176 struct node_res_object *node_res_obj =
177 (struct node_res_object *)node_resource;
178 DBC_ASSERT(node_resource != NULL);
179 node_res_obj->node_allocated = status;
182 /* Update Node Heap status */
183 void drv_proc_node_update_heap_status(void *node_resource, s32 status)
185 struct node_res_object *node_res_obj =
186 (struct node_res_object *)node_resource;
187 DBC_ASSERT(node_resource != NULL);
188 node_res_obj->heap_allocated = status;
191 /* Release all Node resources and its context
192 * This is called from .bridge_release.
194 int drv_remove_all_node_res_elements(void *process_ctxt)
196 struct process_context *ctxt = process_ctxt;
198 idr_for_each(ctxt->node_id, drv_proc_free_node_res, ctxt);
199 idr_destroy(ctxt->node_id);
204 /* Allocate the STRM resource element
205 * This is called after the actual resource is allocated
207 int drv_proc_insert_strm_res_element(void *stream_obj,
208 void *strm_res, void *process_ctxt)
210 struct strm_res_object **pstrm_res =
211 (struct strm_res_object **)strm_res;
212 struct process_context *ctxt = (struct process_context *)process_ctxt;
216 *pstrm_res = kzalloc(sizeof(struct strm_res_object), GFP_KERNEL);
217 if (*pstrm_res == NULL) {
222 (*pstrm_res)->hstream = stream_obj;
223 retval = idr_get_new(ctxt->stream_id, *pstrm_res,
225 if (retval == -EAGAIN) {
226 if (!idr_pre_get(ctxt->stream_id, GFP_KERNEL)) {
227 pr_err("%s: OUT OF MEMORY\n", __func__);
232 retval = idr_get_new(ctxt->stream_id, *pstrm_res,
236 pr_err("%s: FAILED, IDR is FULL\n", __func__);
244 static int drv_proc_free_strm_res(int id, void *p, void *process_ctxt)
246 struct process_context *ctxt = process_ctxt;
247 struct strm_res_object *strm_res = p;
248 struct stream_info strm_info;
249 struct dsp_streaminfo user;
250 u8 **ap_buffer = NULL;
256 if (strm_res->num_bufs) {
257 ap_buffer = kmalloc((strm_res->num_bufs *
258 sizeof(u8 *)), GFP_KERNEL);
260 strm_free_buffer(strm_res,
267 strm_info.user_strm = &user;
268 user.number_bufs_in_stream = 0;
269 strm_get_info(strm_res->hstream, &strm_info, sizeof(strm_info));
270 while (user.number_bufs_in_stream--)
271 strm_reclaim(strm_res->hstream, &buf_ptr, &ul_bytes,
272 (u32 *) &ul_buf_size, &dw_arg);
273 strm_close(strm_res, ctxt);
277 /* Release all Stream resources and its context
278 * This is called from .bridge_release.
280 int drv_remove_all_strm_res_elements(void *process_ctxt)
282 struct process_context *ctxt = process_ctxt;
284 idr_for_each(ctxt->stream_id, drv_proc_free_strm_res, ctxt);
285 idr_destroy(ctxt->stream_id);
290 /* Updating the stream resource element */
291 int drv_proc_update_strm_res(u32 num_bufs, void *strm_resources)
294 struct strm_res_object **strm_res =
295 (struct strm_res_object **)strm_resources;
297 (*strm_res)->num_bufs = num_bufs;
301 /* GPP PROCESS CLEANUP CODE END */
304 * ======== = drv_create ======== =
306 * DRV Object gets created only once during Driver Loading.
308 int drv_create(struct drv_object **drv_obj)
311 struct drv_object *pdrv_object = NULL;
313 DBC_REQUIRE(drv_obj != NULL);
314 DBC_REQUIRE(refs > 0);
316 pdrv_object = kzalloc(sizeof(struct drv_object), GFP_KERNEL);
318 /* Create and Initialize List of device objects */
319 pdrv_object->dev_list = kzalloc(sizeof(struct lst_list),
321 if (pdrv_object->dev_list) {
322 /* Create and Initialize List of device Extension */
323 pdrv_object->dev_node_string =
324 kzalloc(sizeof(struct lst_list), GFP_KERNEL);
325 if (!(pdrv_object->dev_node_string)) {
328 INIT_LIST_HEAD(&pdrv_object->
329 dev_node_string->head);
330 INIT_LIST_HEAD(&pdrv_object->dev_list->head);
338 /* Store the DRV Object in the Registry */
340 status = cfg_set_object((u32) pdrv_object, REG_DRV_OBJECT);
342 *drv_obj = pdrv_object;
344 kfree(pdrv_object->dev_list);
345 kfree(pdrv_object->dev_node_string);
346 /* Free the DRV Object */
350 DBC_ENSURE(status || pdrv_object);
355 * ======== drv_exit ========
357 * Discontinue usage of the DRV module.
361 DBC_REQUIRE(refs > 0);
365 DBC_ENSURE(refs >= 0);
369 * ======== = drv_destroy ======== =
371 * Invoked during bridge de-initialization
373 int drv_destroy(struct drv_object *driver_obj)
376 struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
378 DBC_REQUIRE(refs > 0);
379 DBC_REQUIRE(pdrv_object);
382 * Delete the List if it exists.Should not come here
383 * as the drv_remove_dev_object and the Last drv_request_resources
384 * removes the list if the lists are empty.
386 kfree(pdrv_object->dev_list);
387 kfree(pdrv_object->dev_node_string);
389 /* Update the DRV Object in Registry to be 0 */
390 (void)cfg_set_object(0, REG_DRV_OBJECT);
396 * ======== drv_get_dev_object ========
398 * Given a index, returns a handle to DevObject from the list.
400 int drv_get_dev_object(u32 index, struct drv_object *hdrv_obj,
401 struct dev_object **device_obj)
404 #ifdef CONFIG_TIDSPBRIDGE_DEBUG
405 /* used only for Assertions and debug messages */
406 struct drv_object *pdrv_obj = (struct drv_object *)hdrv_obj;
408 struct dev_object *dev_obj;
410 DBC_REQUIRE(pdrv_obj);
411 DBC_REQUIRE(device_obj != NULL);
412 DBC_REQUIRE(index >= 0);
413 DBC_REQUIRE(refs > 0);
414 DBC_ASSERT(!(LST_IS_EMPTY(pdrv_obj->dev_list)));
416 dev_obj = (struct dev_object *)drv_get_first_dev_object();
417 for (i = 0; i < index; i++) {
419 (struct dev_object *)drv_get_next_dev_object((u32) dev_obj);
422 *device_obj = (struct dev_object *)dev_obj;
432 * ======== drv_get_first_dev_object ========
434 * Retrieve the first Device Object handle from an internal linked list of
435 * of DEV_OBJECTs maintained by DRV.
437 u32 drv_get_first_dev_object(void)
439 u32 dw_dev_object = 0;
440 struct drv_object *pdrv_obj;
442 if (!cfg_get_object((u32 *) &pdrv_obj, REG_DRV_OBJECT)) {
443 if ((pdrv_obj->dev_list != NULL) &&
444 !LST_IS_EMPTY(pdrv_obj->dev_list))
445 dw_dev_object = (u32) lst_first(pdrv_obj->dev_list);
448 return dw_dev_object;
452 * ======== DRV_GetFirstDevNodeString ========
454 * Retrieve the first Device Extension from an internal linked list of
455 * of Pointer to dev_node Strings maintained by DRV.
457 u32 drv_get_first_dev_extension(void)
459 u32 dw_dev_extension = 0;
460 struct drv_object *pdrv_obj;
462 if (!cfg_get_object((u32 *) &pdrv_obj, REG_DRV_OBJECT)) {
464 if ((pdrv_obj->dev_node_string != NULL) &&
465 !LST_IS_EMPTY(pdrv_obj->dev_node_string)) {
467 (u32) lst_first(pdrv_obj->dev_node_string);
471 return dw_dev_extension;
475 * ======== drv_get_next_dev_object ========
477 * Retrieve the next Device Object handle from an internal linked list of
478 * of DEV_OBJECTs maintained by DRV, after having previously called
479 * drv_get_first_dev_object() and zero or more DRV_GetNext.
481 u32 drv_get_next_dev_object(u32 hdev_obj)
483 u32 dw_next_dev_object = 0;
484 struct drv_object *pdrv_obj;
486 DBC_REQUIRE(hdev_obj != 0);
488 if (!cfg_get_object((u32 *) &pdrv_obj, REG_DRV_OBJECT)) {
490 if ((pdrv_obj->dev_list != NULL) &&
491 !LST_IS_EMPTY(pdrv_obj->dev_list)) {
492 dw_next_dev_object = (u32) lst_next(pdrv_obj->dev_list,
497 return dw_next_dev_object;
501 * ======== drv_get_next_dev_extension ========
503 * Retrieve the next Device Extension from an internal linked list of
504 * of pointer to DevNodeString maintained by DRV, after having previously
505 * called drv_get_first_dev_extension() and zero or more
506 * drv_get_next_dev_extension().
508 u32 drv_get_next_dev_extension(u32 dev_extension)
510 u32 dw_dev_extension = 0;
511 struct drv_object *pdrv_obj;
513 DBC_REQUIRE(dev_extension != 0);
515 if (!cfg_get_object((u32 *) &pdrv_obj, REG_DRV_OBJECT)) {
516 if ((pdrv_obj->dev_node_string != NULL) &&
517 !LST_IS_EMPTY(pdrv_obj->dev_node_string)) {
519 (u32) lst_next(pdrv_obj->dev_node_string,
520 (struct list_head *)dev_extension);
524 return dw_dev_extension;
528 * ======== drv_init ========
530 * Initialize DRV module private state.
534 s32 ret = 1; /* function return value */
536 DBC_REQUIRE(refs >= 0);
541 DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
547 * ======== drv_insert_dev_object ========
549 * Insert a DevObject into the list of Manager object.
551 int drv_insert_dev_object(struct drv_object *driver_obj,
552 struct dev_object *hdev_obj)
554 struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
556 DBC_REQUIRE(refs > 0);
557 DBC_REQUIRE(hdev_obj != NULL);
558 DBC_REQUIRE(pdrv_object);
559 DBC_ASSERT(pdrv_object->dev_list);
561 lst_put_tail(pdrv_object->dev_list, (struct list_head *)hdev_obj);
563 DBC_ENSURE(!LST_IS_EMPTY(pdrv_object->dev_list));
569 * ======== drv_remove_dev_object ========
571 * Search for and remove a DeviceObject from the given list of DRV
574 int drv_remove_dev_object(struct drv_object *driver_obj,
575 struct dev_object *hdev_obj)
578 struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
579 struct list_head *cur_elem;
581 DBC_REQUIRE(refs > 0);
582 DBC_REQUIRE(pdrv_object);
583 DBC_REQUIRE(hdev_obj != NULL);
585 DBC_REQUIRE(pdrv_object->dev_list != NULL);
586 DBC_REQUIRE(!LST_IS_EMPTY(pdrv_object->dev_list));
588 /* Search list for p_proc_object: */
589 for (cur_elem = lst_first(pdrv_object->dev_list); cur_elem != NULL;
590 cur_elem = lst_next(pdrv_object->dev_list, cur_elem)) {
591 /* If found, remove it. */
592 if ((struct dev_object *)cur_elem == hdev_obj) {
593 lst_remove_elem(pdrv_object->dev_list, cur_elem);
598 /* Remove list if empty. */
599 if (LST_IS_EMPTY(pdrv_object->dev_list)) {
600 kfree(pdrv_object->dev_list);
601 pdrv_object->dev_list = NULL;
603 DBC_ENSURE((pdrv_object->dev_list == NULL) ||
604 !LST_IS_EMPTY(pdrv_object->dev_list));
610 * ======== drv_request_resources ========
612 * Requests resources from the OS.
614 int drv_request_resources(u32 dw_context, u32 *dev_node_strg)
617 struct drv_object *pdrv_object;
618 struct drv_ext *pszdev_node;
620 DBC_REQUIRE(dw_context != 0);
621 DBC_REQUIRE(dev_node_strg != NULL);
624 * Allocate memory to hold the string. This will live untill
625 * it is freed in the Release resources. Update the driver object
629 status = cfg_get_object((u32 *) &pdrv_object, REG_DRV_OBJECT);
631 pszdev_node = kzalloc(sizeof(struct drv_ext), GFP_KERNEL);
633 lst_init_elem(&pszdev_node->link);
634 strncpy(pszdev_node->sz_string,
635 (char *)dw_context, MAXREGPATHLENGTH - 1);
636 pszdev_node->sz_string[MAXREGPATHLENGTH - 1] = '\0';
637 /* Update the Driver Object List */
638 *dev_node_strg = (u32) pszdev_node->sz_string;
639 lst_put_tail(pdrv_object->dev_node_string,
640 (struct list_head *)pszdev_node);
646 dev_dbg(bridge, "%s: Failed to get Driver Object from Registry",
651 DBC_ENSURE((!status && dev_node_strg != NULL &&
652 !LST_IS_EMPTY(pdrv_object->dev_node_string)) ||
653 (status && *dev_node_strg == 0));
659 * ======== drv_release_resources ========
661 * Releases resources from the OS.
663 int drv_release_resources(u32 dw_context, struct drv_object *hdrv_obj)
666 struct drv_object *pdrv_object = (struct drv_object *)hdrv_obj;
667 struct drv_ext *pszdev_node;
670 * Irrespective of the status go ahead and clean it
671 * The following will over write the status.
673 for (pszdev_node = (struct drv_ext *)drv_get_first_dev_extension();
674 pszdev_node != NULL; pszdev_node = (struct drv_ext *)
675 drv_get_next_dev_extension((u32) pszdev_node)) {
676 if (!pdrv_object->dev_node_string) {
677 /* When this could happen? */
680 if ((u32) pszdev_node == dw_context) {
682 /* Delete from the Driver object list */
683 lst_remove_elem(pdrv_object->dev_node_string,
684 (struct list_head *)pszdev_node);
685 kfree((void *)pszdev_node);
688 /* Delete the List if it is empty */
689 if (LST_IS_EMPTY(pdrv_object->dev_node_string)) {
690 kfree(pdrv_object->dev_node_string);
691 pdrv_object->dev_node_string = NULL;
698 * ======== request_bridge_resources ========
700 * Reserves shared memory for bridge.
702 static int request_bridge_resources(struct cfg_hostres *res)
704 struct cfg_hostres *host_res = res;
706 /* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */
707 host_res->num_mem_windows = 2;
709 /* First window is for DSP internal memory */
710 host_res->dw_sys_ctrl_base = ioremap(OMAP_SYSC_BASE, OMAP_SYSC_SIZE);
711 dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", host_res->dw_mem_base[0]);
712 dev_dbg(bridge, "dw_mem_base[3] 0x%x\n", host_res->dw_mem_base[3]);
713 dev_dbg(bridge, "dw_dmmu_base %p\n", host_res->dw_dmmu_base);
715 /* for 24xx base port is not mapping the mamory for DSP
716 * internal memory TODO Do a ioremap here */
717 /* Second window is for DSP external memory shared with MPU */
719 /* These are hard-coded values */
720 host_res->birq_registers = 0;
721 host_res->birq_attrib = 0;
722 host_res->dw_offset_for_monitor = 0;
723 host_res->dw_chnl_offset = 0;
724 /* CHNL_MAXCHANNELS */
725 host_res->dw_num_chnls = CHNL_MAXCHANNELS;
726 host_res->dw_chnl_buf_size = 0x400;
732 * ======== drv_request_bridge_res_dsp ========
734 * Reserves shared memory for bridge.
736 int drv_request_bridge_res_dsp(void **phost_resources)
739 struct cfg_hostres *host_res;
743 struct drv_data *drv_datap = dev_get_drvdata(bridge);
745 dw_buff_size = sizeof(struct cfg_hostres);
747 host_res = kzalloc(dw_buff_size, GFP_KERNEL);
749 if (host_res != NULL) {
750 request_bridge_resources(host_res);
751 /* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */
752 host_res->num_mem_windows = 4;
754 host_res->dw_mem_base[0] = 0;
755 host_res->dw_mem_base[2] = (u32) ioremap(OMAP_DSP_MEM1_BASE,
757 host_res->dw_mem_base[3] = (u32) ioremap(OMAP_DSP_MEM2_BASE,
759 host_res->dw_mem_base[4] = (u32) ioremap(OMAP_DSP_MEM3_BASE,
761 host_res->dw_per_base = ioremap(OMAP_PER_CM_BASE,
763 host_res->dw_per_pm_base = (u32) ioremap(OMAP_PER_PRM_BASE,
765 host_res->dw_core_pm_base = (u32) ioremap(OMAP_CORE_PRM_BASE,
767 host_res->dw_dmmu_base = ioremap(OMAP_DMMU_BASE,
770 dev_dbg(bridge, "dw_mem_base[0] 0x%x\n",
771 host_res->dw_mem_base[0]);
772 dev_dbg(bridge, "dw_mem_base[1] 0x%x\n",
773 host_res->dw_mem_base[1]);
774 dev_dbg(bridge, "dw_mem_base[2] 0x%x\n",
775 host_res->dw_mem_base[2]);
776 dev_dbg(bridge, "dw_mem_base[3] 0x%x\n",
777 host_res->dw_mem_base[3]);
778 dev_dbg(bridge, "dw_mem_base[4] 0x%x\n",
779 host_res->dw_mem_base[4]);
780 dev_dbg(bridge, "dw_dmmu_base %p\n", host_res->dw_dmmu_base);
782 shm_size = drv_datap->shm_size;
783 if (shm_size >= 0x10000) {
784 /* Allocate Physically contiguous,
785 * non-cacheable memory */
786 host_res->dw_mem_base[1] =
787 (u32) mem_alloc_phys_mem(shm_size, 0x100000,
789 if (host_res->dw_mem_base[1] == 0) {
791 pr_err("shm reservation Failed\n");
793 host_res->dw_mem_length[1] = shm_size;
794 host_res->dw_mem_phys[1] = dma_addr;
796 dev_dbg(bridge, "%s: Bridge shm address 0x%x "
797 "dma_addr %x size %x\n", __func__,
798 host_res->dw_mem_base[1],
803 /* These are hard-coded values */
804 host_res->birq_registers = 0;
805 host_res->birq_attrib = 0;
806 host_res->dw_offset_for_monitor = 0;
807 host_res->dw_chnl_offset = 0;
808 /* CHNL_MAXCHANNELS */
809 host_res->dw_num_chnls = CHNL_MAXCHANNELS;
810 host_res->dw_chnl_buf_size = 0x400;
811 dw_buff_size = sizeof(struct cfg_hostres);
813 *phost_resources = host_res;
819 void mem_ext_phys_pool_init(u32 pool_phys_base, u32 pool_size)
823 /* get the virtual address for the physical memory pool passed */
824 pool_virt_base = (u32) ioremap(pool_phys_base, pool_size);
826 if ((void **)pool_virt_base == NULL) {
827 pr_err("%s: external physical memory map failed\n", __func__);
828 ext_phys_mem_pool_enabled = false;
830 ext_mem_pool.phys_mem_base = pool_phys_base;
831 ext_mem_pool.phys_mem_size = pool_size;
832 ext_mem_pool.virt_mem_base = pool_virt_base;
833 ext_mem_pool.next_phys_alloc_ptr = pool_phys_base;
834 ext_phys_mem_pool_enabled = true;
838 void mem_ext_phys_pool_release(void)
840 if (ext_phys_mem_pool_enabled) {
841 iounmap((void *)(ext_mem_pool.virt_mem_base));
842 ext_phys_mem_pool_enabled = false;
847 * ======== mem_ext_phys_mem_alloc ========
849 * Allocate physically contiguous, uncached memory from external memory pool
852 static void *mem_ext_phys_mem_alloc(u32 bytes, u32 align, u32 * phys_addr)
861 if (bytes > ((ext_mem_pool.phys_mem_base + ext_mem_pool.phys_mem_size)
862 - ext_mem_pool.next_phys_alloc_ptr)) {
866 offset = (ext_mem_pool.next_phys_alloc_ptr & (align - 1));
868 new_alloc_ptr = ext_mem_pool.next_phys_alloc_ptr;
870 new_alloc_ptr = (ext_mem_pool.next_phys_alloc_ptr) +
872 if ((new_alloc_ptr + bytes) <=
873 (ext_mem_pool.phys_mem_base + ext_mem_pool.phys_mem_size)) {
874 /* we can allocate */
875 *phys_addr = new_alloc_ptr;
876 ext_mem_pool.next_phys_alloc_ptr =
877 new_alloc_ptr + bytes;
879 ext_mem_pool.virt_mem_base + (new_alloc_ptr -
882 return (void *)virt_addr;
891 * ======== mem_alloc_phys_mem ========
893 * Allocate physically contiguous, uncached memory
895 void *mem_alloc_phys_mem(u32 byte_size, u32 align_mask,
896 u32 *physical_address)
902 if (ext_phys_mem_pool_enabled) {
903 va_mem = mem_ext_phys_mem_alloc(byte_size, align_mask,
906 va_mem = dma_alloc_coherent(NULL, byte_size, &pa_mem,
909 *physical_address = 0;
911 *physical_address = pa_mem;
917 * ======== mem_free_phys_mem ========
919 * Free the given block of physically contiguous memory.
921 void mem_free_phys_mem(void *virtual_address, u32 physical_address,
924 DBC_REQUIRE(virtual_address != NULL);
926 if (!ext_phys_mem_pool_enabled)
927 dma_free_coherent(NULL, byte_size, virtual_address,