]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/gpu/drm/drm_mm.c
drm/radeon/kms: Disable TV load detect on RS400,RC410,RS480
[net-next-2.6.git] / drivers / gpu / drm / drm_mm.c
CommitLineData
3a1bd924
TH
1/**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 *
27 **************************************************************************/
28
29/*
30 * Generic simple memory manager implementation. Intended to be used as a base
31 * class implementation for more advanced memory managers.
32 *
33 * Note that the algorithm used is quite simple and there might be substantial
34 * performance gains if a smarter free list is implemented. Currently it is just an
35 * unordered stack of free regions. This could easily be improved if an RB-tree
36 * is used instead. At least if we expect heavy fragmentation.
37 *
38 * Aligned allocations can also see improvement.
39 *
40 * Authors:
96de0e25 41 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
3a1bd924
TH
42 */
43
44#include "drmP.h"
249d6048 45#include "drm_mm.h"
1d58420b 46#include <linux/slab.h>
fa8a1238 47#include <linux/seq_file.h>
1d58420b 48
249d6048
JG
49#define MM_UNUSED_TARGET 4
50
55910517 51unsigned long drm_mm_tail_space(struct drm_mm *mm)
1d58420b
TH
52{
53 struct list_head *tail_node;
55910517 54 struct drm_mm_node *entry;
1d58420b
TH
55
56 tail_node = mm->ml_entry.prev;
55910517 57 entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
1d58420b
TH
58 if (!entry->free)
59 return 0;
60
61 return entry->size;
62}
63
55910517 64int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
1d58420b
TH
65{
66 struct list_head *tail_node;
55910517 67 struct drm_mm_node *entry;
1d58420b
TH
68
69 tail_node = mm->ml_entry.prev;
55910517 70 entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
1d58420b
TH
71 if (!entry->free)
72 return -ENOMEM;
73
74 if (entry->size <= size)
75 return -ENOMEM;
76
77 entry->size -= size;
78 return 0;
79}
80
249d6048
JG
81static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
82{
83 struct drm_mm_node *child;
84
85 if (atomic)
86 child = kmalloc(sizeof(*child), GFP_ATOMIC);
87 else
88 child = kmalloc(sizeof(*child), GFP_KERNEL);
89
90 if (unlikely(child == NULL)) {
91 spin_lock(&mm->unused_lock);
92 if (list_empty(&mm->unused_nodes))
93 child = NULL;
94 else {
95 child =
96 list_entry(mm->unused_nodes.next,
97 struct drm_mm_node, fl_entry);
98 list_del(&child->fl_entry);
99 --mm->num_unused;
100 }
101 spin_unlock(&mm->unused_lock);
102 }
103 return child;
104}
105
106int drm_mm_pre_get(struct drm_mm *mm)
107{
108 struct drm_mm_node *node;
109
110 spin_lock(&mm->unused_lock);
111 while (mm->num_unused < MM_UNUSED_TARGET) {
112 spin_unlock(&mm->unused_lock);
113 node = kmalloc(sizeof(*node), GFP_KERNEL);
114 spin_lock(&mm->unused_lock);
115
116 if (unlikely(node == NULL)) {
117 int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
118 spin_unlock(&mm->unused_lock);
119 return ret;
120 }
121 ++mm->num_unused;
122 list_add_tail(&node->fl_entry, &mm->unused_nodes);
123 }
124 spin_unlock(&mm->unused_lock);
125 return 0;
126}
127EXPORT_SYMBOL(drm_mm_pre_get);
1d58420b 128
55910517 129static int drm_mm_create_tail_node(struct drm_mm *mm,
249d6048
JG
130 unsigned long start,
131 unsigned long size, int atomic)
1d58420b 132{
55910517 133 struct drm_mm_node *child;
1d58420b 134
249d6048
JG
135 child = drm_mm_kmalloc(mm, atomic);
136 if (unlikely(child == NULL))
1d58420b
TH
137 return -ENOMEM;
138
139 child->free = 1;
140 child->size = size;
141 child->start = start;
142 child->mm = mm;
143
144 list_add_tail(&child->ml_entry, &mm->ml_entry);
145 list_add_tail(&child->fl_entry, &mm->fl_entry);
146
147 return 0;
148}
149
249d6048 150int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic)
1d58420b
TH
151{
152 struct list_head *tail_node;
55910517 153 struct drm_mm_node *entry;
1d58420b
TH
154
155 tail_node = mm->ml_entry.prev;
55910517 156 entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
1d58420b 157 if (!entry->free) {
249d6048
JG
158 return drm_mm_create_tail_node(mm, entry->start + entry->size,
159 size, atomic);
1d58420b
TH
160 }
161 entry->size += size;
162 return 0;
163}
164
55910517 165static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
249d6048
JG
166 unsigned long size,
167 int atomic)
1d58420b 168{
55910517 169 struct drm_mm_node *child;
1d58420b 170
249d6048
JG
171 child = drm_mm_kmalloc(parent->mm, atomic);
172 if (unlikely(child == NULL))
1d58420b
TH
173 return NULL;
174
175 INIT_LIST_HEAD(&child->fl_entry);
176
177 child->free = 0;
178 child->size = size;
179 child->start = parent->start;
180 child->mm = parent->mm;
181
182 list_add_tail(&child->ml_entry, &parent->ml_entry);
183 INIT_LIST_HEAD(&child->fl_entry);
184
185 parent->size -= size;
186 parent->start += size;
187 return child;
188}
189
190
89579f77
TH
191struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
192 unsigned long size,
193 unsigned alignment,
194 int atomic)
3a1bd924
TH
195{
196
55910517 197 struct drm_mm_node *align_splitoff = NULL;
1d58420b 198 unsigned tmp = 0;
3a1bd924
TH
199
200 if (alignment)
e6c03c5b 201 tmp = node->start % alignment;
1d58420b
TH
202
203 if (tmp) {
249d6048 204 align_splitoff =
89579f77 205 drm_mm_split_at_start(node, alignment - tmp, atomic);
249d6048 206 if (unlikely(align_splitoff == NULL))
1d58420b
TH
207 return NULL;
208 }
3a1bd924 209
e6c03c5b
CW
210 if (node->size == size) {
211 list_del_init(&node->fl_entry);
212 node->free = 0;
3a1bd924 213 } else {
89579f77 214 node = drm_mm_split_at_start(node, size, atomic);
1d58420b 215 }
3a1bd924 216
1d58420b
TH
217 if (align_splitoff)
218 drm_mm_put_block(align_splitoff);
3a1bd924 219
e6c03c5b 220 return node;
3a1bd924 221}
89579f77 222EXPORT_SYMBOL(drm_mm_get_block_generic);
249d6048 223
3a1bd924
TH
224/*
225 * Put a block. Merge with the previous and / or next block if they are free.
226 * Otherwise add to the free stack.
227 */
228
249d6048 229void drm_mm_put_block(struct drm_mm_node *cur)
3a1bd924
TH
230{
231
55910517 232 struct drm_mm *mm = cur->mm;
3a1bd924 233 struct list_head *cur_head = &cur->ml_entry;
1d58420b 234 struct list_head *root_head = &mm->ml_entry;
55910517
DA
235 struct drm_mm_node *prev_node = NULL;
236 struct drm_mm_node *next_node;
3a1bd924 237
a1d0fcf5 238 int merged = 0;
3a1bd924
TH
239
240 if (cur_head->prev != root_head) {
249d6048
JG
241 prev_node =
242 list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
3a1bd924
TH
243 if (prev_node->free) {
244 prev_node->size += cur->size;
a1d0fcf5 245 merged = 1;
3a1bd924
TH
246 }
247 }
248 if (cur_head->next != root_head) {
249d6048
JG
249 next_node =
250 list_entry(cur_head->next, struct drm_mm_node, ml_entry);
3a1bd924
TH
251 if (next_node->free) {
252 if (merged) {
253 prev_node->size += next_node->size;
254 list_del(&next_node->ml_entry);
255 list_del(&next_node->fl_entry);
249d6048
JG
256 if (mm->num_unused < MM_UNUSED_TARGET) {
257 list_add(&next_node->fl_entry,
258 &mm->unused_nodes);
259 ++mm->num_unused;
260 } else
261 kfree(next_node);
3a1bd924
TH
262 } else {
263 next_node->size += cur->size;
264 next_node->start = cur->start;
a1d0fcf5 265 merged = 1;
3a1bd924
TH
266 }
267 }
268 }
269 if (!merged) {
a1d0fcf5 270 cur->free = 1;
1d58420b 271 list_add(&cur->fl_entry, &mm->fl_entry);
3a1bd924
TH
272 } else {
273 list_del(&cur->ml_entry);
249d6048
JG
274 if (mm->num_unused < MM_UNUSED_TARGET) {
275 list_add(&cur->fl_entry, &mm->unused_nodes);
276 ++mm->num_unused;
277 } else
278 kfree(cur);
3a1bd924
TH
279 }
280}
249d6048 281
673a394b 282EXPORT_SYMBOL(drm_mm_put_block);
3a1bd924 283
249d6048
JG
284struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
285 unsigned long size,
286 unsigned alignment, int best_match)
3a1bd924
TH
287{
288 struct list_head *list;
1d58420b 289 const struct list_head *free_stack = &mm->fl_entry;
55910517
DA
290 struct drm_mm_node *entry;
291 struct drm_mm_node *best;
3a1bd924 292 unsigned long best_size;
1d58420b 293 unsigned wasted;
3a1bd924
TH
294
295 best = NULL;
296 best_size = ~0UL;
297
3a1bd924 298 list_for_each(list, free_stack) {
55910517 299 entry = list_entry(list, struct drm_mm_node, fl_entry);
1d58420b
TH
300 wasted = 0;
301
302 if (entry->size < size)
303 continue;
304
305 if (alignment) {
306 register unsigned tmp = entry->start % alignment;
307 if (tmp)
308 wasted += alignment - tmp;
309 }
310
1d58420b 311 if (entry->size >= size + wasted) {
3a1bd924
TH
312 if (!best_match)
313 return entry;
314 if (size < best_size) {
315 best = entry;
316 best_size = entry->size;
317 }
318 }
319 }
320
321 return best;
322}
249d6048 323EXPORT_SYMBOL(drm_mm_search_free);
3a1bd924 324
55910517 325int drm_mm_clean(struct drm_mm * mm)
3a1bd924 326{
1d58420b 327 struct list_head *head = &mm->ml_entry;
3a1bd924 328
1d58420b
TH
329 return (head->next->next == head);
330}
249d6048 331EXPORT_SYMBOL(drm_mm_clean);
3a1bd924 332
55910517 333int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
1d58420b
TH
334{
335 INIT_LIST_HEAD(&mm->ml_entry);
336 INIT_LIST_HEAD(&mm->fl_entry);
249d6048
JG
337 INIT_LIST_HEAD(&mm->unused_nodes);
338 mm->num_unused = 0;
339 spin_lock_init(&mm->unused_lock);
3a1bd924 340
249d6048 341 return drm_mm_create_tail_node(mm, start, size, 0);
3a1bd924 342}
673a394b 343EXPORT_SYMBOL(drm_mm_init);
3a1bd924 344
55910517 345void drm_mm_takedown(struct drm_mm * mm)
3a1bd924 346{
1d58420b 347 struct list_head *bnode = mm->fl_entry.next;
55910517 348 struct drm_mm_node *entry;
249d6048 349 struct drm_mm_node *next;
3a1bd924 350
55910517 351 entry = list_entry(bnode, struct drm_mm_node, fl_entry);
3a1bd924 352
1d58420b
TH
353 if (entry->ml_entry.next != &mm->ml_entry ||
354 entry->fl_entry.next != &mm->fl_entry) {
3a1bd924
TH
355 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
356 return;
357 }
358
359 list_del(&entry->fl_entry);
360 list_del(&entry->ml_entry);
249d6048
JG
361 kfree(entry);
362
363 spin_lock(&mm->unused_lock);
364 list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) {
365 list_del(&entry->fl_entry);
366 kfree(entry);
367 --mm->num_unused;
368 }
369 spin_unlock(&mm->unused_lock);
3a1bd924 370
249d6048 371 BUG_ON(mm->num_unused != 0);
3a1bd924 372}
f453ba04 373EXPORT_SYMBOL(drm_mm_takedown);
fa8a1238
DA
374
375#if defined(CONFIG_DEBUG_FS)
376int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
377{
378 struct drm_mm_node *entry;
379 int total_used = 0, total_free = 0, total = 0;
380
381 list_for_each_entry(entry, &mm->ml_entry, ml_entry) {
382 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used");
383 total += entry->size;
384 if (entry->free)
385 total_free += entry->size;
386 else
387 total_used += entry->size;
388 }
389 seq_printf(m, "total: %d, used %d free %d\n", total, total_free, total_used);
390 return 0;
391}
392EXPORT_SYMBOL(drm_mm_dump_table);
393#endif