]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/gpu/drm/radeon/radeon_ttm.c
drm/radeon/kms: add irq mitigation code for sw interrupt.
[net-next-2.6.git] / drivers / gpu / drm / radeon / radeon_ttm.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
31 */
32#include <ttm/ttm_bo_api.h>
33#include <ttm/ttm_bo_driver.h>
34#include <ttm/ttm_placement.h>
35#include <ttm/ttm_module.h>
36#include <drm/drmP.h>
37#include <drm/radeon_drm.h>
fa8a1238 38#include <linux/seq_file.h>
771fe6b9
JG
39#include "radeon_reg.h"
40#include "radeon.h"
41
42#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
43
fa8a1238
DA
44static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
45
771fe6b9
JG
46static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
47{
48 struct radeon_mman *mman;
49 struct radeon_device *rdev;
50
51 mman = container_of(bdev, struct radeon_mman, bdev);
52 rdev = container_of(mman, struct radeon_device, mman);
53 return rdev;
54}
55
56
57/*
58 * Global memory.
59 */
60static int radeon_ttm_mem_global_init(struct ttm_global_reference *ref)
61{
62 return ttm_mem_global_init(ref->object);
63}
64
65static void radeon_ttm_mem_global_release(struct ttm_global_reference *ref)
66{
67 ttm_mem_global_release(ref->object);
68}
69
70static int radeon_ttm_global_init(struct radeon_device *rdev)
71{
72 struct ttm_global_reference *global_ref;
73 int r;
74
75 rdev->mman.mem_global_referenced = false;
76 global_ref = &rdev->mman.mem_global_ref;
77 global_ref->global_type = TTM_GLOBAL_TTM_MEM;
78 global_ref->size = sizeof(struct ttm_mem_global);
79 global_ref->init = &radeon_ttm_mem_global_init;
80 global_ref->release = &radeon_ttm_mem_global_release;
81 r = ttm_global_item_ref(global_ref);
82 if (r != 0) {
a987fcaa
TH
83 DRM_ERROR("Failed setting up TTM memory accounting "
84 "subsystem.\n");
771fe6b9
JG
85 return r;
86 }
a987fcaa
TH
87
88 rdev->mman.bo_global_ref.mem_glob =
89 rdev->mman.mem_global_ref.object;
90 global_ref = &rdev->mman.bo_global_ref.ref;
91 global_ref->global_type = TTM_GLOBAL_TTM_BO;
7f5f4db2 92 global_ref->size = sizeof(struct ttm_bo_global);
a987fcaa
TH
93 global_ref->init = &ttm_bo_global_init;
94 global_ref->release = &ttm_bo_global_release;
95 r = ttm_global_item_ref(global_ref);
96 if (r != 0) {
97 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
98 ttm_global_item_unref(&rdev->mman.mem_global_ref);
99 return r;
100 }
101
771fe6b9
JG
102 rdev->mman.mem_global_referenced = true;
103 return 0;
104}
105
106static void radeon_ttm_global_fini(struct radeon_device *rdev)
107{
108 if (rdev->mman.mem_global_referenced) {
a987fcaa 109 ttm_global_item_unref(&rdev->mman.bo_global_ref.ref);
771fe6b9
JG
110 ttm_global_item_unref(&rdev->mman.mem_global_ref);
111 rdev->mman.mem_global_referenced = false;
112 }
113}
114
115struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev);
116
117static struct ttm_backend*
118radeon_create_ttm_backend_entry(struct ttm_bo_device *bdev)
119{
120 struct radeon_device *rdev;
121
122 rdev = radeon_get_rdev(bdev);
123#if __OS_HAS_AGP
124 if (rdev->flags & RADEON_IS_AGP) {
125 return ttm_agp_backend_init(bdev, rdev->ddev->agp->bridge);
126 } else
127#endif
128 {
129 return radeon_ttm_backend_create(rdev);
130 }
131}
132
133static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
134{
135 return 0;
136}
137
138static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
139 struct ttm_mem_type_manager *man)
140{
141 struct radeon_device *rdev;
142
143 rdev = radeon_get_rdev(bdev);
144
145 switch (type) {
146 case TTM_PL_SYSTEM:
147 /* System memory */
148 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
149 man->available_caching = TTM_PL_MASK_CACHING;
150 man->default_caching = TTM_PL_FLAG_CACHED;
151 break;
152 case TTM_PL_TT:
153 man->gpu_offset = 0;
154 man->available_caching = TTM_PL_MASK_CACHING;
155 man->default_caching = TTM_PL_FLAG_CACHED;
55c93278 156 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
771fe6b9
JG
157#if __OS_HAS_AGP
158 if (rdev->flags & RADEON_IS_AGP) {
159 if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) {
160 DRM_ERROR("AGP is not enabled for memory type %u\n",
161 (unsigned)type);
162 return -EINVAL;
163 }
164 man->io_offset = rdev->mc.agp_base;
165 man->io_size = rdev->mc.gtt_size;
166 man->io_addr = NULL;
55c93278
MD
167 if (!rdev->ddev->agp->cant_use_aperture)
168 man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
169 TTM_MEMTYPE_FLAG_MAPPABLE;
771fe6b9
JG
170 man->available_caching = TTM_PL_FLAG_UNCACHED |
171 TTM_PL_FLAG_WC;
172 man->default_caching = TTM_PL_FLAG_WC;
173 } else
174#endif
175 {
176 man->io_offset = 0;
177 man->io_size = 0;
178 man->io_addr = NULL;
771fe6b9
JG
179 }
180 break;
181 case TTM_PL_VRAM:
182 /* "On-card" video ram */
183 man->gpu_offset = 0;
184 man->flags = TTM_MEMTYPE_FLAG_FIXED |
185 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
186 TTM_MEMTYPE_FLAG_MAPPABLE;
187 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
188 man->default_caching = TTM_PL_FLAG_WC;
189 man->io_addr = NULL;
190 man->io_offset = rdev->mc.aper_base;
191 man->io_size = rdev->mc.aper_size;
192 break;
193 default:
194 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
195 return -EINVAL;
196 }
197 return 0;
198}
199
200static uint32_t radeon_evict_flags(struct ttm_buffer_object *bo)
201{
202 uint32_t cur_placement = bo->mem.placement & ~TTM_PL_MASK_MEMTYPE;
203
204 switch (bo->mem.mem_type) {
205 default:
206 return (cur_placement & ~TTM_PL_MASK_CACHING) |
207 TTM_PL_FLAG_SYSTEM |
208 TTM_PL_FLAG_CACHED;
209 }
210}
211
212static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
213{
214 return 0;
215}
216
217static void radeon_move_null(struct ttm_buffer_object *bo,
218 struct ttm_mem_reg *new_mem)
219{
220 struct ttm_mem_reg *old_mem = &bo->mem;
221
222 BUG_ON(old_mem->mm_node != NULL);
223 *old_mem = *new_mem;
224 new_mem->mm_node = NULL;
225}
226
227static int radeon_move_blit(struct ttm_buffer_object *bo,
228 bool evict, int no_wait,
229 struct ttm_mem_reg *new_mem,
230 struct ttm_mem_reg *old_mem)
231{
232 struct radeon_device *rdev;
233 uint64_t old_start, new_start;
234 struct radeon_fence *fence;
235 int r;
236
237 rdev = radeon_get_rdev(bo->bdev);
238 r = radeon_fence_create(rdev, &fence);
239 if (unlikely(r)) {
240 return r;
241 }
242 old_start = old_mem->mm_node->start << PAGE_SHIFT;
243 new_start = new_mem->mm_node->start << PAGE_SHIFT;
244
245 switch (old_mem->mem_type) {
246 case TTM_PL_VRAM:
247 old_start += rdev->mc.vram_location;
248 break;
249 case TTM_PL_TT:
250 old_start += rdev->mc.gtt_location;
251 break;
252 default:
253 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
254 return -EINVAL;
255 }
256 switch (new_mem->mem_type) {
257 case TTM_PL_VRAM:
258 new_start += rdev->mc.vram_location;
259 break;
260 case TTM_PL_TT:
261 new_start += rdev->mc.gtt_location;
262 break;
263 default:
264 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
265 return -EINVAL;
266 }
267 if (!rdev->cp.ready) {
268 DRM_ERROR("Trying to move memory with CP turned off.\n");
269 return -EINVAL;
270 }
271 r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
272 /* FIXME: handle copy error */
273 r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
274 evict, no_wait, new_mem);
275 radeon_fence_unref(&fence);
276 return r;
277}
278
279static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
280 bool evict, bool interruptible, bool no_wait,
281 struct ttm_mem_reg *new_mem)
282{
283 struct radeon_device *rdev;
284 struct ttm_mem_reg *old_mem = &bo->mem;
285 struct ttm_mem_reg tmp_mem;
286 uint32_t proposed_placement;
287 int r;
288
289 rdev = radeon_get_rdev(bo->bdev);
290 tmp_mem = *new_mem;
291 tmp_mem.mm_node = NULL;
292 proposed_placement = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
293 r = ttm_bo_mem_space(bo, proposed_placement, &tmp_mem,
294 interruptible, no_wait);
295 if (unlikely(r)) {
296 return r;
297 }
df67bed9
DA
298
299 r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
300 if (unlikely(r)) {
301 goto out_cleanup;
302 }
303
771fe6b9
JG
304 r = ttm_tt_bind(bo->ttm, &tmp_mem);
305 if (unlikely(r)) {
306 goto out_cleanup;
307 }
308 r = radeon_move_blit(bo, true, no_wait, &tmp_mem, old_mem);
309 if (unlikely(r)) {
310 goto out_cleanup;
311 }
312 r = ttm_bo_move_ttm(bo, true, no_wait, new_mem);
313out_cleanup:
314 if (tmp_mem.mm_node) {
a987fcaa
TH
315 struct ttm_bo_global *glob = rdev->mman.bdev.glob;
316
317 spin_lock(&glob->lru_lock);
771fe6b9 318 drm_mm_put_block(tmp_mem.mm_node);
a987fcaa 319 spin_unlock(&glob->lru_lock);
771fe6b9
JG
320 return r;
321 }
322 return r;
323}
324
325static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
326 bool evict, bool interruptible, bool no_wait,
327 struct ttm_mem_reg *new_mem)
328{
329 struct radeon_device *rdev;
330 struct ttm_mem_reg *old_mem = &bo->mem;
331 struct ttm_mem_reg tmp_mem;
332 uint32_t proposed_flags;
333 int r;
334
335 rdev = radeon_get_rdev(bo->bdev);
336 tmp_mem = *new_mem;
337 tmp_mem.mm_node = NULL;
338 proposed_flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
339 r = ttm_bo_mem_space(bo, proposed_flags, &tmp_mem,
340 interruptible, no_wait);
341 if (unlikely(r)) {
342 return r;
343 }
344 r = ttm_bo_move_ttm(bo, true, no_wait, &tmp_mem);
345 if (unlikely(r)) {
346 goto out_cleanup;
347 }
348 r = radeon_move_blit(bo, true, no_wait, new_mem, old_mem);
349 if (unlikely(r)) {
350 goto out_cleanup;
351 }
352out_cleanup:
353 if (tmp_mem.mm_node) {
a987fcaa
TH
354 struct ttm_bo_global *glob = rdev->mman.bdev.glob;
355
356 spin_lock(&glob->lru_lock);
771fe6b9 357 drm_mm_put_block(tmp_mem.mm_node);
a987fcaa 358 spin_unlock(&glob->lru_lock);
771fe6b9
JG
359 return r;
360 }
361 return r;
362}
363
364static int radeon_bo_move(struct ttm_buffer_object *bo,
365 bool evict, bool interruptible, bool no_wait,
366 struct ttm_mem_reg *new_mem)
367{
368 struct radeon_device *rdev;
369 struct ttm_mem_reg *old_mem = &bo->mem;
370 int r;
371
372 rdev = radeon_get_rdev(bo->bdev);
373 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
374 radeon_move_null(bo, new_mem);
375 return 0;
376 }
377 if ((old_mem->mem_type == TTM_PL_TT &&
378 new_mem->mem_type == TTM_PL_SYSTEM) ||
379 (old_mem->mem_type == TTM_PL_SYSTEM &&
380 new_mem->mem_type == TTM_PL_TT)) {
381 /* bind is enought */
382 radeon_move_null(bo, new_mem);
383 return 0;
384 }
3ce0a23d 385 if (!rdev->cp.ready || rdev->asic->copy == NULL) {
771fe6b9 386 /* use memcpy */
1ab2e105 387 goto memcpy;
771fe6b9
JG
388 }
389
390 if (old_mem->mem_type == TTM_PL_VRAM &&
391 new_mem->mem_type == TTM_PL_SYSTEM) {
1ab2e105 392 r = radeon_move_vram_ram(bo, evict, interruptible,
771fe6b9
JG
393 no_wait, new_mem);
394 } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
395 new_mem->mem_type == TTM_PL_VRAM) {
1ab2e105 396 r = radeon_move_ram_vram(bo, evict, interruptible,
771fe6b9
JG
397 no_wait, new_mem);
398 } else {
399 r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem);
771fe6b9 400 }
1ab2e105
MD
401
402 if (r) {
403memcpy:
404 r = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
405 }
406
771fe6b9
JG
407 return r;
408}
409
410const uint32_t radeon_mem_prios[] = {
411 TTM_PL_VRAM,
412 TTM_PL_TT,
413 TTM_PL_SYSTEM,
414};
415
416const uint32_t radeon_busy_prios[] = {
417 TTM_PL_TT,
418 TTM_PL_VRAM,
419 TTM_PL_SYSTEM,
420};
421
422static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
423 bool lazy, bool interruptible)
424{
425 return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible);
426}
427
428static int radeon_sync_obj_flush(void *sync_obj, void *sync_arg)
429{
430 return 0;
431}
432
433static void radeon_sync_obj_unref(void **sync_obj)
434{
435 radeon_fence_unref((struct radeon_fence **)sync_obj);
436}
437
438static void *radeon_sync_obj_ref(void *sync_obj)
439{
440 return radeon_fence_ref((struct radeon_fence *)sync_obj);
441}
442
443static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
444{
445 return radeon_fence_signaled((struct radeon_fence *)sync_obj);
446}
447
448static struct ttm_bo_driver radeon_bo_driver = {
449 .mem_type_prio = radeon_mem_prios,
450 .mem_busy_prio = radeon_busy_prios,
451 .num_mem_type_prio = ARRAY_SIZE(radeon_mem_prios),
452 .num_mem_busy_prio = ARRAY_SIZE(radeon_busy_prios),
453 .create_ttm_backend_entry = &radeon_create_ttm_backend_entry,
454 .invalidate_caches = &radeon_invalidate_caches,
455 .init_mem_type = &radeon_init_mem_type,
456 .evict_flags = &radeon_evict_flags,
457 .move = &radeon_bo_move,
458 .verify_access = &radeon_verify_access,
459 .sync_obj_signaled = &radeon_sync_obj_signaled,
460 .sync_obj_wait = &radeon_sync_obj_wait,
461 .sync_obj_flush = &radeon_sync_obj_flush,
462 .sync_obj_unref = &radeon_sync_obj_unref,
463 .sync_obj_ref = &radeon_sync_obj_ref,
e024e110
DA
464 .move_notify = &radeon_bo_move_notify,
465 .fault_reserve_notify = &radeon_bo_fault_reserve_notify,
771fe6b9
JG
466};
467
468int radeon_ttm_init(struct radeon_device *rdev)
469{
470 int r;
471
472 r = radeon_ttm_global_init(rdev);
473 if (r) {
474 return r;
475 }
476 /* No others user of address space so set it to 0 */
477 r = ttm_bo_device_init(&rdev->mman.bdev,
a987fcaa 478 rdev->mman.bo_global_ref.ref.object,
ad49f501
DA
479 &radeon_bo_driver, DRM_FILE_PAGE_OFFSET,
480 rdev->need_dma32);
771fe6b9
JG
481 if (r) {
482 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
483 return r;
484 }
485 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, 0,
7a50f01a 486 ((rdev->mc.real_vram_size) >> PAGE_SHIFT));
771fe6b9
JG
487 if (r) {
488 DRM_ERROR("Failed initializing VRAM heap.\n");
489 return r;
490 }
491 r = radeon_object_create(rdev, NULL, 256 * 1024, true,
492 RADEON_GEM_DOMAIN_VRAM, false,
493 &rdev->stollen_vga_memory);
494 if (r) {
495 return r;
496 }
497 r = radeon_object_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
498 if (r) {
499 radeon_object_unref(&rdev->stollen_vga_memory);
500 return r;
501 }
502 DRM_INFO("radeon: %uM of VRAM memory ready\n",
3ce0a23d 503 (unsigned)rdev->mc.real_vram_size / (1024 * 1024));
771fe6b9
JG
504 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, 0,
505 ((rdev->mc.gtt_size) >> PAGE_SHIFT));
506 if (r) {
507 DRM_ERROR("Failed initializing GTT heap.\n");
508 return r;
509 }
510 DRM_INFO("radeon: %uM of GTT memory ready.\n",
3ce0a23d 511 (unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
771fe6b9
JG
512 if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
513 rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
514 }
fa8a1238
DA
515
516 r = radeon_ttm_debugfs_init(rdev);
517 if (r) {
518 DRM_ERROR("Failed to init debugfs\n");
519 return r;
520 }
771fe6b9
JG
521 return 0;
522}
523
524void radeon_ttm_fini(struct radeon_device *rdev)
525{
526 if (rdev->stollen_vga_memory) {
527 radeon_object_unpin(rdev->stollen_vga_memory);
528 radeon_object_unref(&rdev->stollen_vga_memory);
529 }
530 ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
531 ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
532 ttm_bo_device_release(&rdev->mman.bdev);
533 radeon_gart_fini(rdev);
534 radeon_ttm_global_fini(rdev);
535 DRM_INFO("radeon: ttm finalized\n");
536}
537
538static struct vm_operations_struct radeon_ttm_vm_ops;
f0f37e2f 539static const struct vm_operations_struct *ttm_vm_ops = NULL;
771fe6b9
JG
540
541static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
542{
543 struct ttm_buffer_object *bo;
544 int r;
545
546 bo = (struct ttm_buffer_object *)vma->vm_private_data;
547 if (bo == NULL) {
548 return VM_FAULT_NOPAGE;
549 }
550 r = ttm_vm_ops->fault(vma, vmf);
551 return r;
552}
553
554int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
555{
556 struct drm_file *file_priv;
557 struct radeon_device *rdev;
558 int r;
559
560 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
561 return drm_mmap(filp, vma);
562 }
563
564 file_priv = (struct drm_file *)filp->private_data;
565 rdev = file_priv->minor->dev->dev_private;
566 if (rdev == NULL) {
567 return -EINVAL;
568 }
569 r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
570 if (unlikely(r != 0)) {
571 return r;
572 }
573 if (unlikely(ttm_vm_ops == NULL)) {
574 ttm_vm_ops = vma->vm_ops;
575 radeon_ttm_vm_ops = *ttm_vm_ops;
576 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
577 }
578 vma->vm_ops = &radeon_ttm_vm_ops;
579 return 0;
580}
581
582
583/*
584 * TTM backend functions.
585 */
586struct radeon_ttm_backend {
587 struct ttm_backend backend;
588 struct radeon_device *rdev;
589 unsigned long num_pages;
590 struct page **pages;
591 struct page *dummy_read_page;
592 bool populated;
593 bool bound;
594 unsigned offset;
595};
596
597static int radeon_ttm_backend_populate(struct ttm_backend *backend,
598 unsigned long num_pages,
599 struct page **pages,
600 struct page *dummy_read_page)
601{
602 struct radeon_ttm_backend *gtt;
603
604 gtt = container_of(backend, struct radeon_ttm_backend, backend);
605 gtt->pages = pages;
606 gtt->num_pages = num_pages;
607 gtt->dummy_read_page = dummy_read_page;
608 gtt->populated = true;
609 return 0;
610}
611
612static void radeon_ttm_backend_clear(struct ttm_backend *backend)
613{
614 struct radeon_ttm_backend *gtt;
615
616 gtt = container_of(backend, struct radeon_ttm_backend, backend);
617 gtt->pages = NULL;
618 gtt->num_pages = 0;
619 gtt->dummy_read_page = NULL;
620 gtt->populated = false;
621 gtt->bound = false;
622}
623
624
625static int radeon_ttm_backend_bind(struct ttm_backend *backend,
626 struct ttm_mem_reg *bo_mem)
627{
628 struct radeon_ttm_backend *gtt;
629 int r;
630
631 gtt = container_of(backend, struct radeon_ttm_backend, backend);
632 gtt->offset = bo_mem->mm_node->start << PAGE_SHIFT;
633 if (!gtt->num_pages) {
634 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend);
635 }
636 r = radeon_gart_bind(gtt->rdev, gtt->offset,
637 gtt->num_pages, gtt->pages);
638 if (r) {
639 DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
640 gtt->num_pages, gtt->offset);
641 return r;
642 }
643 gtt->bound = true;
644 return 0;
645}
646
647static int radeon_ttm_backend_unbind(struct ttm_backend *backend)
648{
649 struct radeon_ttm_backend *gtt;
650
651 gtt = container_of(backend, struct radeon_ttm_backend, backend);
652 radeon_gart_unbind(gtt->rdev, gtt->offset, gtt->num_pages);
653 gtt->bound = false;
654 return 0;
655}
656
657static void radeon_ttm_backend_destroy(struct ttm_backend *backend)
658{
659 struct radeon_ttm_backend *gtt;
660
661 gtt = container_of(backend, struct radeon_ttm_backend, backend);
662 if (gtt->bound) {
663 radeon_ttm_backend_unbind(backend);
664 }
665 kfree(gtt);
666}
667
668static struct ttm_backend_func radeon_backend_func = {
669 .populate = &radeon_ttm_backend_populate,
670 .clear = &radeon_ttm_backend_clear,
671 .bind = &radeon_ttm_backend_bind,
672 .unbind = &radeon_ttm_backend_unbind,
673 .destroy = &radeon_ttm_backend_destroy,
674};
675
676struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev)
677{
678 struct radeon_ttm_backend *gtt;
679
680 gtt = kzalloc(sizeof(struct radeon_ttm_backend), GFP_KERNEL);
681 if (gtt == NULL) {
682 return NULL;
683 }
684 gtt->backend.bdev = &rdev->mman.bdev;
685 gtt->backend.flags = 0;
686 gtt->backend.func = &radeon_backend_func;
687 gtt->rdev = rdev;
688 gtt->pages = NULL;
689 gtt->num_pages = 0;
690 gtt->dummy_read_page = NULL;
691 gtt->populated = false;
692 gtt->bound = false;
693 return &gtt->backend;
694}
fa8a1238
DA
695
696#define RADEON_DEBUGFS_MEM_TYPES 2
697
fa8a1238
DA
698#if defined(CONFIG_DEBUG_FS)
699static int radeon_mm_dump_table(struct seq_file *m, void *data)
700{
701 struct drm_info_node *node = (struct drm_info_node *)m->private;
702 struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
703 struct drm_device *dev = node->minor->dev;
704 struct radeon_device *rdev = dev->dev_private;
705 int ret;
706 struct ttm_bo_global *glob = rdev->mman.bdev.glob;
707
708 spin_lock(&glob->lru_lock);
709 ret = drm_mm_dump_table(m, mm);
710 spin_unlock(&glob->lru_lock);
711 return ret;
712}
713#endif
714
715static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
716{
f4e45d02
MP
717#if defined(CONFIG_DEBUG_FS)
718 static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES];
719 static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES][32];
fa8a1238
DA
720 unsigned i;
721
fa8a1238
DA
722 for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
723 if (i == 0)
724 sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
725 else
726 sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
727 radeon_mem_types_list[i].name = radeon_mem_types_names[i];
728 radeon_mem_types_list[i].show = &radeon_mm_dump_table;
729 radeon_mem_types_list[i].driver_features = 0;
730 if (i == 0)
731 radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].manager;
732 else
733 radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager;
734
735 }
736 return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES);
737
738#endif
739 return 0;
740}