]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/gpu/drm/drm_mm.c
drm_mm: extract check_free_mm_node
[net-next-2.6.git] / drivers / gpu / drm / drm_mm.c
1 /**************************************************************************
2  *
3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  *
27  **************************************************************************/
28
29 /*
30  * Generic simple memory manager implementation. Intended to be used as a base
31  * class implementation for more advanced memory managers.
32  *
33  * Note that the algorithm used is quite simple and there might be substantial
34  * performance gains if a smarter free list is implemented. Currently it is just an
35  * unordered stack of free regions. This could easily be improved if an RB-tree
36  * is used instead. At least if we expect heavy fragmentation.
37  *
38  * Aligned allocations can also see improvement.
39  *
40  * Authors:
41  * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
42  */
43
44 #include "drmP.h"
45 #include "drm_mm.h"
46 #include <linux/slab.h>
47 #include <linux/seq_file.h>
48
49 #define MM_UNUSED_TARGET 4
50
51 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
52 {
53         struct drm_mm_node *child;
54
55         if (atomic)
56                 child = kmalloc(sizeof(*child), GFP_ATOMIC);
57         else
58                 child = kmalloc(sizeof(*child), GFP_KERNEL);
59
60         if (unlikely(child == NULL)) {
61                 spin_lock(&mm->unused_lock);
62                 if (list_empty(&mm->unused_nodes))
63                         child = NULL;
64                 else {
65                         child =
66                             list_entry(mm->unused_nodes.next,
67                                        struct drm_mm_node, free_stack);
68                         list_del(&child->free_stack);
69                         --mm->num_unused;
70                 }
71                 spin_unlock(&mm->unused_lock);
72         }
73         return child;
74 }
75
76 /* drm_mm_pre_get() - pre allocate drm_mm_node structure
77  * drm_mm:      memory manager struct we are pre-allocating for
78  *
79  * Returns 0 on success or -ENOMEM if allocation fails.
80  */
81 int drm_mm_pre_get(struct drm_mm *mm)
82 {
83         struct drm_mm_node *node;
84
85         spin_lock(&mm->unused_lock);
86         while (mm->num_unused < MM_UNUSED_TARGET) {
87                 spin_unlock(&mm->unused_lock);
88                 node = kmalloc(sizeof(*node), GFP_KERNEL);
89                 spin_lock(&mm->unused_lock);
90
91                 if (unlikely(node == NULL)) {
92                         int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
93                         spin_unlock(&mm->unused_lock);
94                         return ret;
95                 }
96                 ++mm->num_unused;
97                 list_add_tail(&node->free_stack, &mm->unused_nodes);
98         }
99         spin_unlock(&mm->unused_lock);
100         return 0;
101 }
102 EXPORT_SYMBOL(drm_mm_pre_get);
103
104 static int drm_mm_create_tail_node(struct drm_mm *mm,
105                                    unsigned long start,
106                                    unsigned long size, int atomic)
107 {
108         struct drm_mm_node *child;
109
110         child = drm_mm_kmalloc(mm, atomic);
111         if (unlikely(child == NULL))
112                 return -ENOMEM;
113
114         child->free = 1;
115         child->size = size;
116         child->start = start;
117         child->mm = mm;
118
119         list_add_tail(&child->node_list, &mm->node_list);
120         list_add_tail(&child->free_stack, &mm->free_stack);
121
122         return 0;
123 }
124
125 static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
126                                                  unsigned long size,
127                                                  int atomic)
128 {
129         struct drm_mm_node *child;
130
131         child = drm_mm_kmalloc(parent->mm, atomic);
132         if (unlikely(child == NULL))
133                 return NULL;
134
135         INIT_LIST_HEAD(&child->free_stack);
136
137         child->free = 0;
138         child->size = size;
139         child->start = parent->start;
140         child->mm = parent->mm;
141
142         list_add_tail(&child->node_list, &parent->node_list);
143         INIT_LIST_HEAD(&child->free_stack);
144
145         parent->size -= size;
146         parent->start += size;
147         return child;
148 }
149
150
151 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
152                                              unsigned long size,
153                                              unsigned alignment,
154                                              int atomic)
155 {
156
157         struct drm_mm_node *align_splitoff = NULL;
158         unsigned tmp = 0;
159
160         if (alignment)
161                 tmp = node->start % alignment;
162
163         if (tmp) {
164                 align_splitoff =
165                     drm_mm_split_at_start(node, alignment - tmp, atomic);
166                 if (unlikely(align_splitoff == NULL))
167                         return NULL;
168         }
169
170         if (node->size == size) {
171                 list_del_init(&node->free_stack);
172                 node->free = 0;
173         } else {
174                 node = drm_mm_split_at_start(node, size, atomic);
175         }
176
177         if (align_splitoff)
178                 drm_mm_put_block(align_splitoff);
179
180         return node;
181 }
182 EXPORT_SYMBOL(drm_mm_get_block_generic);
183
184 struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
185                                                 unsigned long size,
186                                                 unsigned alignment,
187                                                 unsigned long start,
188                                                 unsigned long end,
189                                                 int atomic)
190 {
191         struct drm_mm_node *align_splitoff = NULL;
192         unsigned tmp = 0;
193         unsigned wasted = 0;
194
195         if (node->start < start)
196                 wasted += start - node->start;
197         if (alignment)
198                 tmp = ((node->start + wasted) % alignment);
199
200         if (tmp)
201                 wasted += alignment - tmp;
202         if (wasted) {
203                 align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
204                 if (unlikely(align_splitoff == NULL))
205                         return NULL;
206         }
207
208         if (node->size == size) {
209                 list_del_init(&node->free_stack);
210                 node->free = 0;
211         } else {
212                 node = drm_mm_split_at_start(node, size, atomic);
213         }
214
215         if (align_splitoff)
216                 drm_mm_put_block(align_splitoff);
217
218         return node;
219 }
220 EXPORT_SYMBOL(drm_mm_get_block_range_generic);
221
222 /*
223  * Put a block. Merge with the previous and / or next block if they are free.
224  * Otherwise add to the free stack.
225  */
226
227 void drm_mm_put_block(struct drm_mm_node *cur)
228 {
229
230         struct drm_mm *mm = cur->mm;
231         struct list_head *cur_head = &cur->node_list;
232         struct list_head *root_head = &mm->node_list;
233         struct drm_mm_node *prev_node = NULL;
234         struct drm_mm_node *next_node;
235
236         int merged = 0;
237
238         if (cur_head->prev != root_head) {
239                 prev_node =
240                     list_entry(cur_head->prev, struct drm_mm_node, node_list);
241                 if (prev_node->free) {
242                         prev_node->size += cur->size;
243                         merged = 1;
244                 }
245         }
246         if (cur_head->next != root_head) {
247                 next_node =
248                     list_entry(cur_head->next, struct drm_mm_node, node_list);
249                 if (next_node->free) {
250                         if (merged) {
251                                 prev_node->size += next_node->size;
252                                 list_del(&next_node->node_list);
253                                 list_del(&next_node->free_stack);
254                                 spin_lock(&mm->unused_lock);
255                                 if (mm->num_unused < MM_UNUSED_TARGET) {
256                                         list_add(&next_node->free_stack,
257                                                  &mm->unused_nodes);
258                                         ++mm->num_unused;
259                                 } else
260                                         kfree(next_node);
261                                 spin_unlock(&mm->unused_lock);
262                         } else {
263                                 next_node->size += cur->size;
264                                 next_node->start = cur->start;
265                                 merged = 1;
266                         }
267                 }
268         }
269         if (!merged) {
270                 cur->free = 1;
271                 list_add(&cur->free_stack, &mm->free_stack);
272         } else {
273                 list_del(&cur->node_list);
274                 spin_lock(&mm->unused_lock);
275                 if (mm->num_unused < MM_UNUSED_TARGET) {
276                         list_add(&cur->free_stack, &mm->unused_nodes);
277                         ++mm->num_unused;
278                 } else
279                         kfree(cur);
280                 spin_unlock(&mm->unused_lock);
281         }
282 }
283
284 EXPORT_SYMBOL(drm_mm_put_block);
285
286 static int check_free_mm_node(struct drm_mm_node *entry, unsigned long size,
287                               unsigned alignment)
288 {
289         unsigned wasted = 0;
290
291         if (entry->size < size)
292                 return 0;
293
294         if (alignment) {
295                 register unsigned tmp = entry->start % alignment;
296                 if (tmp)
297                         wasted = alignment - tmp;
298         }
299
300         if (entry->size >= size + wasted) {
301                 return 1;
302         }
303
304         return 0;
305 }
306
307 struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
308                                        unsigned long size,
309                                        unsigned alignment, int best_match)
310 {
311         struct drm_mm_node *entry;
312         struct drm_mm_node *best;
313         unsigned long best_size;
314
315         best = NULL;
316         best_size = ~0UL;
317
318         list_for_each_entry(entry, &mm->free_stack, free_stack) {
319                 if (!check_free_mm_node(entry, size, alignment))
320                         continue;
321
322                 if (!best_match)
323                         return entry;
324
325                 if (entry->size < best_size) {
326                         best = entry;
327                         best_size = entry->size;
328                 }
329         }
330
331         return best;
332 }
333 EXPORT_SYMBOL(drm_mm_search_free);
334
335 struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
336                                                 unsigned long size,
337                                                 unsigned alignment,
338                                                 unsigned long start,
339                                                 unsigned long end,
340                                                 int best_match)
341 {
342         struct drm_mm_node *entry;
343         struct drm_mm_node *best;
344         unsigned long best_size;
345
346         best = NULL;
347         best_size = ~0UL;
348
349         list_for_each_entry(entry, &mm->free_stack, free_stack) {
350                 if (entry->start > end || (entry->start+entry->size) < start)
351                         continue;
352
353                 if (!check_free_mm_node(entry, size, alignment))
354                         continue;
355
356                 if (!best_match)
357                         return entry;
358
359                 if (entry->size < best_size) {
360                         best = entry;
361                         best_size = entry->size;
362                 }
363         }
364
365         return best;
366 }
367 EXPORT_SYMBOL(drm_mm_search_free_in_range);
368
369 int drm_mm_clean(struct drm_mm * mm)
370 {
371         struct list_head *head = &mm->node_list;
372
373         return (head->next->next == head);
374 }
375 EXPORT_SYMBOL(drm_mm_clean);
376
377 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
378 {
379         INIT_LIST_HEAD(&mm->node_list);
380         INIT_LIST_HEAD(&mm->free_stack);
381         INIT_LIST_HEAD(&mm->unused_nodes);
382         mm->num_unused = 0;
383         spin_lock_init(&mm->unused_lock);
384
385         return drm_mm_create_tail_node(mm, start, size, 0);
386 }
387 EXPORT_SYMBOL(drm_mm_init);
388
389 void drm_mm_takedown(struct drm_mm * mm)
390 {
391         struct list_head *bnode = mm->free_stack.next;
392         struct drm_mm_node *entry;
393         struct drm_mm_node *next;
394
395         entry = list_entry(bnode, struct drm_mm_node, free_stack);
396
397         if (entry->node_list.next != &mm->node_list ||
398             entry->free_stack.next != &mm->free_stack) {
399                 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
400                 return;
401         }
402
403         list_del(&entry->free_stack);
404         list_del(&entry->node_list);
405         kfree(entry);
406
407         spin_lock(&mm->unused_lock);
408         list_for_each_entry_safe(entry, next, &mm->unused_nodes, free_stack) {
409                 list_del(&entry->free_stack);
410                 kfree(entry);
411                 --mm->num_unused;
412         }
413         spin_unlock(&mm->unused_lock);
414
415         BUG_ON(mm->num_unused != 0);
416 }
417 EXPORT_SYMBOL(drm_mm_takedown);
418
419 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
420 {
421         struct drm_mm_node *entry;
422         int total_used = 0, total_free = 0, total = 0;
423
424         list_for_each_entry(entry, &mm->node_list, node_list) {
425                 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
426                         prefix, entry->start, entry->start + entry->size,
427                         entry->size, entry->free ? "free" : "used");
428                 total += entry->size;
429                 if (entry->free)
430                         total_free += entry->size;
431                 else
432                         total_used += entry->size;
433         }
434         printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total,
435                 total_used, total_free);
436 }
437 EXPORT_SYMBOL(drm_mm_debug_table);
438
439 #if defined(CONFIG_DEBUG_FS)
440 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
441 {
442         struct drm_mm_node *entry;
443         int total_used = 0, total_free = 0, total = 0;
444
445         list_for_each_entry(entry, &mm->node_list, node_list) {
446                 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used");
447                 total += entry->size;
448                 if (entry->free)
449                         total_free += entry->size;
450                 else
451                         total_used += entry->size;
452         }
453         seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free);
454         return 0;
455 }
456 EXPORT_SYMBOL(drm_mm_dump_table);
457 #endif