1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
30 * Generic simple memory manager implementation. Intended to be used as a base
31 * class implementation for more advanced memory managers.
33 * Note that the algorithm used is quite simple and there might be substantial
34 * performance gains if a smarter free list is implemented. Currently it is just an
35 * unordered stack of free regions. This could easily be improved if an RB-tree
36 * is used instead. At least if we expect heavy fragmentation.
38 * Aligned allocations can also see improvement.
41 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
46 #include <linux/slab.h>
47 #include <linux/seq_file.h>
49 #define MM_UNUSED_TARGET 4
51 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
53 struct drm_mm_node *child;
56 child = kmalloc(sizeof(*child), GFP_ATOMIC);
58 child = kmalloc(sizeof(*child), GFP_KERNEL);
60 if (unlikely(child == NULL)) {
61 spin_lock(&mm->unused_lock);
62 if (list_empty(&mm->unused_nodes))
66 list_entry(mm->unused_nodes.next,
67 struct drm_mm_node, free_stack);
68 list_del(&child->free_stack);
71 spin_unlock(&mm->unused_lock);
76 /* drm_mm_pre_get() - pre allocate drm_mm_node structure
77 * drm_mm: memory manager struct we are pre-allocating for
79 * Returns 0 on success or -ENOMEM if allocation fails.
81 int drm_mm_pre_get(struct drm_mm *mm)
83 struct drm_mm_node *node;
85 spin_lock(&mm->unused_lock);
86 while (mm->num_unused < MM_UNUSED_TARGET) {
87 spin_unlock(&mm->unused_lock);
88 node = kmalloc(sizeof(*node), GFP_KERNEL);
89 spin_lock(&mm->unused_lock);
91 if (unlikely(node == NULL)) {
92 int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
93 spin_unlock(&mm->unused_lock);
97 list_add_tail(&node->free_stack, &mm->unused_nodes);
99 spin_unlock(&mm->unused_lock);
102 EXPORT_SYMBOL(drm_mm_pre_get);
104 static int drm_mm_create_tail_node(struct drm_mm *mm,
106 unsigned long size, int atomic)
108 struct drm_mm_node *child;
110 child = drm_mm_kmalloc(mm, atomic);
111 if (unlikely(child == NULL))
116 child->start = start;
119 list_add_tail(&child->node_list, &mm->node_list);
120 list_add_tail(&child->free_stack, &mm->free_stack);
125 static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
129 struct drm_mm_node *child;
131 child = drm_mm_kmalloc(parent->mm, atomic);
132 if (unlikely(child == NULL))
135 INIT_LIST_HEAD(&child->free_stack);
139 child->start = parent->start;
140 child->mm = parent->mm;
142 list_add_tail(&child->node_list, &parent->node_list);
143 INIT_LIST_HEAD(&child->free_stack);
145 parent->size -= size;
146 parent->start += size;
151 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
157 struct drm_mm_node *align_splitoff = NULL;
161 tmp = node->start % alignment;
165 drm_mm_split_at_start(node, alignment - tmp, atomic);
166 if (unlikely(align_splitoff == NULL))
170 if (node->size == size) {
171 list_del_init(&node->free_stack);
174 node = drm_mm_split_at_start(node, size, atomic);
178 drm_mm_put_block(align_splitoff);
182 EXPORT_SYMBOL(drm_mm_get_block_generic);
184 struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
191 struct drm_mm_node *align_splitoff = NULL;
195 if (node->start < start)
196 wasted += start - node->start;
198 tmp = ((node->start + wasted) % alignment);
201 wasted += alignment - tmp;
203 align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
204 if (unlikely(align_splitoff == NULL))
208 if (node->size == size) {
209 list_del_init(&node->free_stack);
212 node = drm_mm_split_at_start(node, size, atomic);
216 drm_mm_put_block(align_splitoff);
220 EXPORT_SYMBOL(drm_mm_get_block_range_generic);
223 * Put a block. Merge with the previous and / or next block if they are free.
224 * Otherwise add to the free stack.
227 void drm_mm_put_block(struct drm_mm_node *cur)
230 struct drm_mm *mm = cur->mm;
231 struct list_head *cur_head = &cur->node_list;
232 struct list_head *root_head = &mm->node_list;
233 struct drm_mm_node *prev_node = NULL;
234 struct drm_mm_node *next_node;
238 if (cur_head->prev != root_head) {
240 list_entry(cur_head->prev, struct drm_mm_node, node_list);
241 if (prev_node->free) {
242 prev_node->size += cur->size;
246 if (cur_head->next != root_head) {
248 list_entry(cur_head->next, struct drm_mm_node, node_list);
249 if (next_node->free) {
251 prev_node->size += next_node->size;
252 list_del(&next_node->node_list);
253 list_del(&next_node->free_stack);
254 spin_lock(&mm->unused_lock);
255 if (mm->num_unused < MM_UNUSED_TARGET) {
256 list_add(&next_node->free_stack,
261 spin_unlock(&mm->unused_lock);
263 next_node->size += cur->size;
264 next_node->start = cur->start;
271 list_add(&cur->free_stack, &mm->free_stack);
273 list_del(&cur->node_list);
274 spin_lock(&mm->unused_lock);
275 if (mm->num_unused < MM_UNUSED_TARGET) {
276 list_add(&cur->free_stack, &mm->unused_nodes);
280 spin_unlock(&mm->unused_lock);
284 EXPORT_SYMBOL(drm_mm_put_block);
286 static int check_free_mm_node(struct drm_mm_node *entry, unsigned long size,
291 if (entry->size < size)
295 register unsigned tmp = entry->start % alignment;
297 wasted = alignment - tmp;
300 if (entry->size >= size + wasted) {
307 struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
309 unsigned alignment, int best_match)
311 struct drm_mm_node *entry;
312 struct drm_mm_node *best;
313 unsigned long best_size;
318 list_for_each_entry(entry, &mm->free_stack, free_stack) {
319 if (!check_free_mm_node(entry, size, alignment))
325 if (entry->size < best_size) {
327 best_size = entry->size;
333 EXPORT_SYMBOL(drm_mm_search_free);
335 struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
342 struct drm_mm_node *entry;
343 struct drm_mm_node *best;
344 unsigned long best_size;
349 list_for_each_entry(entry, &mm->free_stack, free_stack) {
350 if (entry->start > end || (entry->start+entry->size) < start)
353 if (!check_free_mm_node(entry, size, alignment))
359 if (entry->size < best_size) {
361 best_size = entry->size;
367 EXPORT_SYMBOL(drm_mm_search_free_in_range);
369 int drm_mm_clean(struct drm_mm * mm)
371 struct list_head *head = &mm->node_list;
373 return (head->next->next == head);
375 EXPORT_SYMBOL(drm_mm_clean);
377 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
379 INIT_LIST_HEAD(&mm->node_list);
380 INIT_LIST_HEAD(&mm->free_stack);
381 INIT_LIST_HEAD(&mm->unused_nodes);
383 spin_lock_init(&mm->unused_lock);
385 return drm_mm_create_tail_node(mm, start, size, 0);
387 EXPORT_SYMBOL(drm_mm_init);
389 void drm_mm_takedown(struct drm_mm * mm)
391 struct list_head *bnode = mm->free_stack.next;
392 struct drm_mm_node *entry;
393 struct drm_mm_node *next;
395 entry = list_entry(bnode, struct drm_mm_node, free_stack);
397 if (entry->node_list.next != &mm->node_list ||
398 entry->free_stack.next != &mm->free_stack) {
399 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
403 list_del(&entry->free_stack);
404 list_del(&entry->node_list);
407 spin_lock(&mm->unused_lock);
408 list_for_each_entry_safe(entry, next, &mm->unused_nodes, free_stack) {
409 list_del(&entry->free_stack);
413 spin_unlock(&mm->unused_lock);
415 BUG_ON(mm->num_unused != 0);
417 EXPORT_SYMBOL(drm_mm_takedown);
419 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
421 struct drm_mm_node *entry;
422 int total_used = 0, total_free = 0, total = 0;
424 list_for_each_entry(entry, &mm->node_list, node_list) {
425 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
426 prefix, entry->start, entry->start + entry->size,
427 entry->size, entry->free ? "free" : "used");
428 total += entry->size;
430 total_free += entry->size;
432 total_used += entry->size;
434 printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total,
435 total_used, total_free);
437 EXPORT_SYMBOL(drm_mm_debug_table);
439 #if defined(CONFIG_DEBUG_FS)
440 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
442 struct drm_mm_node *entry;
443 int total_used = 0, total_free = 0, total = 0;
445 list_for_each_entry(entry, &mm->node_list, node_list) {
446 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used");
447 total += entry->size;
449 total_free += entry->size;
451 total_used += entry->size;
453 seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free);
456 EXPORT_SYMBOL(drm_mm_dump_table);