]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/gpu/drm/radeon/radeon_ttm.c
drm/vmwgfx: add support for new TTM fault callback V5
[net-next-2.6.git] / drivers / gpu / drm / radeon / radeon_ttm.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
31 */
32#include <ttm/ttm_bo_api.h>
33#include <ttm/ttm_bo_driver.h>
34#include <ttm/ttm_placement.h>
35#include <ttm/ttm_module.h>
36#include <drm/drmP.h>
37#include <drm/radeon_drm.h>
fa8a1238 38#include <linux/seq_file.h>
771fe6b9
JG
39#include "radeon_reg.h"
40#include "radeon.h"
41
42#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
43
fa8a1238
DA
44static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
45
771fe6b9
JG
46static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
47{
48 struct radeon_mman *mman;
49 struct radeon_device *rdev;
50
51 mman = container_of(bdev, struct radeon_mman, bdev);
52 rdev = container_of(mman, struct radeon_device, mman);
53 return rdev;
54}
55
56
57/*
58 * Global memory.
59 */
60static int radeon_ttm_mem_global_init(struct ttm_global_reference *ref)
61{
62 return ttm_mem_global_init(ref->object);
63}
64
65static void radeon_ttm_mem_global_release(struct ttm_global_reference *ref)
66{
67 ttm_mem_global_release(ref->object);
68}
69
70static int radeon_ttm_global_init(struct radeon_device *rdev)
71{
72 struct ttm_global_reference *global_ref;
73 int r;
74
75 rdev->mman.mem_global_referenced = false;
76 global_ref = &rdev->mman.mem_global_ref;
77 global_ref->global_type = TTM_GLOBAL_TTM_MEM;
78 global_ref->size = sizeof(struct ttm_mem_global);
79 global_ref->init = &radeon_ttm_mem_global_init;
80 global_ref->release = &radeon_ttm_mem_global_release;
81 r = ttm_global_item_ref(global_ref);
82 if (r != 0) {
a987fcaa
TH
83 DRM_ERROR("Failed setting up TTM memory accounting "
84 "subsystem.\n");
771fe6b9
JG
85 return r;
86 }
a987fcaa
TH
87
88 rdev->mman.bo_global_ref.mem_glob =
89 rdev->mman.mem_global_ref.object;
90 global_ref = &rdev->mman.bo_global_ref.ref;
91 global_ref->global_type = TTM_GLOBAL_TTM_BO;
7f5f4db2 92 global_ref->size = sizeof(struct ttm_bo_global);
a987fcaa
TH
93 global_ref->init = &ttm_bo_global_init;
94 global_ref->release = &ttm_bo_global_release;
95 r = ttm_global_item_ref(global_ref);
96 if (r != 0) {
97 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
98 ttm_global_item_unref(&rdev->mman.mem_global_ref);
99 return r;
100 }
101
771fe6b9
JG
102 rdev->mman.mem_global_referenced = true;
103 return 0;
104}
105
106static void radeon_ttm_global_fini(struct radeon_device *rdev)
107{
108 if (rdev->mman.mem_global_referenced) {
a987fcaa 109 ttm_global_item_unref(&rdev->mman.bo_global_ref.ref);
771fe6b9
JG
110 ttm_global_item_unref(&rdev->mman.mem_global_ref);
111 rdev->mman.mem_global_referenced = false;
112 }
113}
114
115struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev);
116
117static struct ttm_backend*
118radeon_create_ttm_backend_entry(struct ttm_bo_device *bdev)
119{
120 struct radeon_device *rdev;
121
122 rdev = radeon_get_rdev(bdev);
123#if __OS_HAS_AGP
124 if (rdev->flags & RADEON_IS_AGP) {
125 return ttm_agp_backend_init(bdev, rdev->ddev->agp->bridge);
126 } else
127#endif
128 {
129 return radeon_ttm_backend_create(rdev);
130 }
131}
132
133static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
134{
135 return 0;
136}
137
138static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
139 struct ttm_mem_type_manager *man)
140{
141 struct radeon_device *rdev;
142
143 rdev = radeon_get_rdev(bdev);
144
145 switch (type) {
146 case TTM_PL_SYSTEM:
147 /* System memory */
148 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
149 man->available_caching = TTM_PL_MASK_CACHING;
150 man->default_caching = TTM_PL_FLAG_CACHED;
151 break;
152 case TTM_PL_TT:
d594e46a 153 man->gpu_offset = rdev->mc.gtt_start;
771fe6b9
JG
154 man->available_caching = TTM_PL_MASK_CACHING;
155 man->default_caching = TTM_PL_FLAG_CACHED;
55c93278 156 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
771fe6b9
JG
157#if __OS_HAS_AGP
158 if (rdev->flags & RADEON_IS_AGP) {
159 if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) {
160 DRM_ERROR("AGP is not enabled for memory type %u\n",
161 (unsigned)type);
162 return -EINVAL;
163 }
164 man->io_offset = rdev->mc.agp_base;
165 man->io_size = rdev->mc.gtt_size;
166 man->io_addr = NULL;
55c93278 167 if (!rdev->ddev->agp->cant_use_aperture)
0a2d50e3 168 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
771fe6b9
JG
169 man->available_caching = TTM_PL_FLAG_UNCACHED |
170 TTM_PL_FLAG_WC;
171 man->default_caching = TTM_PL_FLAG_WC;
172 } else
173#endif
174 {
175 man->io_offset = 0;
176 man->io_size = 0;
177 man->io_addr = NULL;
771fe6b9
JG
178 }
179 break;
180 case TTM_PL_VRAM:
181 /* "On-card" video ram */
d594e46a 182 man->gpu_offset = rdev->mc.vram_start;
771fe6b9 183 man->flags = TTM_MEMTYPE_FLAG_FIXED |
771fe6b9
JG
184 TTM_MEMTYPE_FLAG_MAPPABLE;
185 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
186 man->default_caching = TTM_PL_FLAG_WC;
187 man->io_addr = NULL;
188 man->io_offset = rdev->mc.aper_base;
189 man->io_size = rdev->mc.aper_size;
190 break;
191 default:
192 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
193 return -EINVAL;
194 }
195 return 0;
196}
197
312ea8da
JG
198static void radeon_evict_flags(struct ttm_buffer_object *bo,
199 struct ttm_placement *placement)
771fe6b9 200{
d03d8589
JG
201 struct radeon_bo *rbo;
202 static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
203
204 if (!radeon_ttm_bo_is_radeon_bo(bo)) {
205 placement->fpfn = 0;
206 placement->lpfn = 0;
207 placement->placement = &placements;
208 placement->busy_placement = &placements;
209 placement->num_placement = 1;
210 placement->num_busy_placement = 1;
211 return;
212 }
213 rbo = container_of(bo, struct radeon_bo, tbo);
771fe6b9 214 switch (bo->mem.mem_type) {
312ea8da 215 case TTM_PL_VRAM:
9270eb1b
DA
216 if (rbo->rdev->cp.ready == false)
217 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
218 else
219 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
312ea8da
JG
220 break;
221 case TTM_PL_TT:
771fe6b9 222 default:
312ea8da 223 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
771fe6b9 224 }
eaa5fd1a 225 *placement = rbo->placement;
771fe6b9
JG
226}
227
228static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
229{
230 return 0;
231}
232
233static void radeon_move_null(struct ttm_buffer_object *bo,
234 struct ttm_mem_reg *new_mem)
235{
236 struct ttm_mem_reg *old_mem = &bo->mem;
237
238 BUG_ON(old_mem->mm_node != NULL);
239 *old_mem = *new_mem;
240 new_mem->mm_node = NULL;
241}
242
243static int radeon_move_blit(struct ttm_buffer_object *bo,
9d87fa21
JG
244 bool evict, int no_wait_reserve, bool no_wait_gpu,
245 struct ttm_mem_reg *new_mem,
246 struct ttm_mem_reg *old_mem)
771fe6b9
JG
247{
248 struct radeon_device *rdev;
249 uint64_t old_start, new_start;
250 struct radeon_fence *fence;
251 int r;
252
253 rdev = radeon_get_rdev(bo->bdev);
254 r = radeon_fence_create(rdev, &fence);
255 if (unlikely(r)) {
256 return r;
257 }
258 old_start = old_mem->mm_node->start << PAGE_SHIFT;
259 new_start = new_mem->mm_node->start << PAGE_SHIFT;
260
261 switch (old_mem->mem_type) {
262 case TTM_PL_VRAM:
d594e46a 263 old_start += rdev->mc.vram_start;
771fe6b9
JG
264 break;
265 case TTM_PL_TT:
d594e46a 266 old_start += rdev->mc.gtt_start;
771fe6b9
JG
267 break;
268 default:
269 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
270 return -EINVAL;
271 }
272 switch (new_mem->mem_type) {
273 case TTM_PL_VRAM:
d594e46a 274 new_start += rdev->mc.vram_start;
771fe6b9
JG
275 break;
276 case TTM_PL_TT:
d594e46a 277 new_start += rdev->mc.gtt_start;
771fe6b9
JG
278 break;
279 default:
280 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
281 return -EINVAL;
282 }
283 if (!rdev->cp.ready) {
284 DRM_ERROR("Trying to move memory with CP turned off.\n");
285 return -EINVAL;
286 }
287 r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
288 /* FIXME: handle copy error */
289 r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
9d87fa21 290 evict, no_wait_reserve, no_wait_gpu, new_mem);
771fe6b9
JG
291 radeon_fence_unref(&fence);
292 return r;
293}
294
295static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
9d87fa21
JG
296 bool evict, bool interruptible,
297 bool no_wait_reserve, bool no_wait_gpu,
771fe6b9
JG
298 struct ttm_mem_reg *new_mem)
299{
300 struct radeon_device *rdev;
301 struct ttm_mem_reg *old_mem = &bo->mem;
302 struct ttm_mem_reg tmp_mem;
312ea8da
JG
303 u32 placements;
304 struct ttm_placement placement;
771fe6b9
JG
305 int r;
306
307 rdev = radeon_get_rdev(bo->bdev);
308 tmp_mem = *new_mem;
309 tmp_mem.mm_node = NULL;
312ea8da
JG
310 placement.fpfn = 0;
311 placement.lpfn = 0;
312 placement.num_placement = 1;
313 placement.placement = &placements;
314 placement.num_busy_placement = 1;
315 placement.busy_placement = &placements;
316 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
317 r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
9d87fa21 318 interruptible, no_wait_reserve, no_wait_gpu);
771fe6b9
JG
319 if (unlikely(r)) {
320 return r;
321 }
df67bed9
DA
322
323 r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
324 if (unlikely(r)) {
325 goto out_cleanup;
326 }
327
771fe6b9
JG
328 r = ttm_tt_bind(bo->ttm, &tmp_mem);
329 if (unlikely(r)) {
330 goto out_cleanup;
331 }
9d87fa21 332 r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem);
771fe6b9
JG
333 if (unlikely(r)) {
334 goto out_cleanup;
335 }
9d87fa21 336 r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
771fe6b9
JG
337out_cleanup:
338 if (tmp_mem.mm_node) {
a987fcaa
TH
339 struct ttm_bo_global *glob = rdev->mman.bdev.glob;
340
341 spin_lock(&glob->lru_lock);
771fe6b9 342 drm_mm_put_block(tmp_mem.mm_node);
a987fcaa 343 spin_unlock(&glob->lru_lock);
771fe6b9
JG
344 return r;
345 }
346 return r;
347}
348
349static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
9d87fa21
JG
350 bool evict, bool interruptible,
351 bool no_wait_reserve, bool no_wait_gpu,
771fe6b9
JG
352 struct ttm_mem_reg *new_mem)
353{
354 struct radeon_device *rdev;
355 struct ttm_mem_reg *old_mem = &bo->mem;
356 struct ttm_mem_reg tmp_mem;
312ea8da
JG
357 struct ttm_placement placement;
358 u32 placements;
771fe6b9
JG
359 int r;
360
361 rdev = radeon_get_rdev(bo->bdev);
362 tmp_mem = *new_mem;
363 tmp_mem.mm_node = NULL;
312ea8da
JG
364 placement.fpfn = 0;
365 placement.lpfn = 0;
366 placement.num_placement = 1;
367 placement.placement = &placements;
368 placement.num_busy_placement = 1;
369 placement.busy_placement = &placements;
370 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
9d87fa21 371 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu);
771fe6b9
JG
372 if (unlikely(r)) {
373 return r;
374 }
9d87fa21 375 r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
771fe6b9
JG
376 if (unlikely(r)) {
377 goto out_cleanup;
378 }
9d87fa21 379 r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
771fe6b9
JG
380 if (unlikely(r)) {
381 goto out_cleanup;
382 }
383out_cleanup:
384 if (tmp_mem.mm_node) {
a987fcaa
TH
385 struct ttm_bo_global *glob = rdev->mman.bdev.glob;
386
387 spin_lock(&glob->lru_lock);
771fe6b9 388 drm_mm_put_block(tmp_mem.mm_node);
a987fcaa 389 spin_unlock(&glob->lru_lock);
771fe6b9
JG
390 return r;
391 }
392 return r;
393}
394
395static int radeon_bo_move(struct ttm_buffer_object *bo,
9d87fa21
JG
396 bool evict, bool interruptible,
397 bool no_wait_reserve, bool no_wait_gpu,
398 struct ttm_mem_reg *new_mem)
771fe6b9
JG
399{
400 struct radeon_device *rdev;
401 struct ttm_mem_reg *old_mem = &bo->mem;
402 int r;
403
404 rdev = radeon_get_rdev(bo->bdev);
405 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
406 radeon_move_null(bo, new_mem);
407 return 0;
408 }
409 if ((old_mem->mem_type == TTM_PL_TT &&
410 new_mem->mem_type == TTM_PL_SYSTEM) ||
411 (old_mem->mem_type == TTM_PL_SYSTEM &&
412 new_mem->mem_type == TTM_PL_TT)) {
af901ca1 413 /* bind is enough */
771fe6b9
JG
414 radeon_move_null(bo, new_mem);
415 return 0;
416 }
3ce0a23d 417 if (!rdev->cp.ready || rdev->asic->copy == NULL) {
771fe6b9 418 /* use memcpy */
1ab2e105 419 goto memcpy;
771fe6b9
JG
420 }
421
422 if (old_mem->mem_type == TTM_PL_VRAM &&
423 new_mem->mem_type == TTM_PL_SYSTEM) {
1ab2e105 424 r = radeon_move_vram_ram(bo, evict, interruptible,
9d87fa21 425 no_wait_reserve, no_wait_gpu, new_mem);
771fe6b9
JG
426 } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
427 new_mem->mem_type == TTM_PL_VRAM) {
1ab2e105 428 r = radeon_move_ram_vram(bo, evict, interruptible,
9d87fa21 429 no_wait_reserve, no_wait_gpu, new_mem);
771fe6b9 430 } else {
9d87fa21 431 r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
771fe6b9 432 }
1ab2e105
MD
433
434 if (r) {
435memcpy:
9d87fa21 436 r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
1ab2e105 437 }
771fe6b9
JG
438 return r;
439}
440
0a2d50e3
JG
441static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
442{
443 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
444 struct radeon_device *rdev = radeon_get_rdev(bdev);
445
446 mem->bus.addr = NULL;
447 mem->bus.offset = 0;
448 mem->bus.size = mem->num_pages << PAGE_SHIFT;
449 mem->bus.base = 0;
450 mem->bus.is_iomem = false;
451 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
452 return -EINVAL;
453 switch (mem->mem_type) {
454 case TTM_PL_SYSTEM:
455 /* system memory */
456 return 0;
457 case TTM_PL_TT:
458#if __OS_HAS_AGP
459 if (rdev->flags & RADEON_IS_AGP) {
460 /* RADEON_IS_AGP is set only if AGP is active */
461 mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
462 mem->bus.base = rdev->mc.agp_base;
463 mem->bus.is_iomem = true;
464 }
465#endif
466 break;
467 case TTM_PL_VRAM:
468 mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
469 /* check if it's visible */
470 if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
471 return -EINVAL;
472 mem->bus.base = rdev->mc.aper_base;
473 mem->bus.is_iomem = true;
474 break;
475 default:
476 return -EINVAL;
477 }
478 return 0;
479}
480
481static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
482{
483}
484
771fe6b9
JG
485static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
486 bool lazy, bool interruptible)
487{
488 return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible);
489}
490
491static int radeon_sync_obj_flush(void *sync_obj, void *sync_arg)
492{
493 return 0;
494}
495
496static void radeon_sync_obj_unref(void **sync_obj)
497{
498 radeon_fence_unref((struct radeon_fence **)sync_obj);
499}
500
501static void *radeon_sync_obj_ref(void *sync_obj)
502{
503 return radeon_fence_ref((struct radeon_fence *)sync_obj);
504}
505
506static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
507{
508 return radeon_fence_signaled((struct radeon_fence *)sync_obj);
509}
510
511static struct ttm_bo_driver radeon_bo_driver = {
771fe6b9
JG
512 .create_ttm_backend_entry = &radeon_create_ttm_backend_entry,
513 .invalidate_caches = &radeon_invalidate_caches,
514 .init_mem_type = &radeon_init_mem_type,
515 .evict_flags = &radeon_evict_flags,
516 .move = &radeon_bo_move,
517 .verify_access = &radeon_verify_access,
518 .sync_obj_signaled = &radeon_sync_obj_signaled,
519 .sync_obj_wait = &radeon_sync_obj_wait,
520 .sync_obj_flush = &radeon_sync_obj_flush,
521 .sync_obj_unref = &radeon_sync_obj_unref,
522 .sync_obj_ref = &radeon_sync_obj_ref,
e024e110
DA
523 .move_notify = &radeon_bo_move_notify,
524 .fault_reserve_notify = &radeon_bo_fault_reserve_notify,
0a2d50e3
JG
525 .io_mem_reserve = &radeon_ttm_io_mem_reserve,
526 .io_mem_free = &radeon_ttm_io_mem_free,
771fe6b9
JG
527};
528
529int radeon_ttm_init(struct radeon_device *rdev)
530{
531 int r;
532
533 r = radeon_ttm_global_init(rdev);
534 if (r) {
535 return r;
536 }
537 /* No others user of address space so set it to 0 */
538 r = ttm_bo_device_init(&rdev->mman.bdev,
a987fcaa 539 rdev->mman.bo_global_ref.ref.object,
ad49f501
DA
540 &radeon_bo_driver, DRM_FILE_PAGE_OFFSET,
541 rdev->need_dma32);
771fe6b9
JG
542 if (r) {
543 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
544 return r;
545 }
0a0c7596 546 rdev->mman.initialized = true;
4c788679 547 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
312ea8da 548 rdev->mc.real_vram_size >> PAGE_SHIFT);
771fe6b9
JG
549 if (r) {
550 DRM_ERROR("Failed initializing VRAM heap.\n");
551 return r;
552 }
4c788679
JG
553 r = radeon_bo_create(rdev, NULL, 256 * 1024, true,
554 RADEON_GEM_DOMAIN_VRAM,
555 &rdev->stollen_vga_memory);
771fe6b9
JG
556 if (r) {
557 return r;
558 }
4c788679
JG
559 r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
560 if (r)
561 return r;
562 r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
563 radeon_bo_unreserve(rdev->stollen_vga_memory);
771fe6b9 564 if (r) {
4c788679 565 radeon_bo_unref(&rdev->stollen_vga_memory);
771fe6b9
JG
566 return r;
567 }
568 DRM_INFO("radeon: %uM of VRAM memory ready\n",
3ce0a23d 569 (unsigned)rdev->mc.real_vram_size / (1024 * 1024));
4c788679 570 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
312ea8da 571 rdev->mc.gtt_size >> PAGE_SHIFT);
771fe6b9
JG
572 if (r) {
573 DRM_ERROR("Failed initializing GTT heap.\n");
574 return r;
575 }
576 DRM_INFO("radeon: %uM of GTT memory ready.\n",
3ce0a23d 577 (unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
771fe6b9
JG
578 if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
579 rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
580 }
fa8a1238
DA
581
582 r = radeon_ttm_debugfs_init(rdev);
583 if (r) {
584 DRM_ERROR("Failed to init debugfs\n");
585 return r;
586 }
771fe6b9
JG
587 return 0;
588}
589
590void radeon_ttm_fini(struct radeon_device *rdev)
591{
4c788679
JG
592 int r;
593
0a0c7596
JG
594 if (!rdev->mman.initialized)
595 return;
771fe6b9 596 if (rdev->stollen_vga_memory) {
4c788679
JG
597 r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
598 if (r == 0) {
599 radeon_bo_unpin(rdev->stollen_vga_memory);
600 radeon_bo_unreserve(rdev->stollen_vga_memory);
601 }
602 radeon_bo_unref(&rdev->stollen_vga_memory);
771fe6b9
JG
603 }
604 ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
605 ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
606 ttm_bo_device_release(&rdev->mman.bdev);
607 radeon_gart_fini(rdev);
608 radeon_ttm_global_fini(rdev);
0a0c7596 609 rdev->mman.initialized = false;
771fe6b9
JG
610 DRM_INFO("radeon: ttm finalized\n");
611}
612
613static struct vm_operations_struct radeon_ttm_vm_ops;
f0f37e2f 614static const struct vm_operations_struct *ttm_vm_ops = NULL;
771fe6b9
JG
615
616static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
617{
618 struct ttm_buffer_object *bo;
619 int r;
620
621 bo = (struct ttm_buffer_object *)vma->vm_private_data;
622 if (bo == NULL) {
623 return VM_FAULT_NOPAGE;
624 }
625 r = ttm_vm_ops->fault(vma, vmf);
626 return r;
627}
628
629int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
630{
631 struct drm_file *file_priv;
632 struct radeon_device *rdev;
633 int r;
634
635 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
636 return drm_mmap(filp, vma);
637 }
638
639 file_priv = (struct drm_file *)filp->private_data;
640 rdev = file_priv->minor->dev->dev_private;
641 if (rdev == NULL) {
642 return -EINVAL;
643 }
644 r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
645 if (unlikely(r != 0)) {
646 return r;
647 }
648 if (unlikely(ttm_vm_ops == NULL)) {
649 ttm_vm_ops = vma->vm_ops;
650 radeon_ttm_vm_ops = *ttm_vm_ops;
651 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
652 }
653 vma->vm_ops = &radeon_ttm_vm_ops;
654 return 0;
655}
656
657
658/*
659 * TTM backend functions.
660 */
661struct radeon_ttm_backend {
662 struct ttm_backend backend;
663 struct radeon_device *rdev;
664 unsigned long num_pages;
665 struct page **pages;
666 struct page *dummy_read_page;
667 bool populated;
668 bool bound;
669 unsigned offset;
670};
671
672static int radeon_ttm_backend_populate(struct ttm_backend *backend,
673 unsigned long num_pages,
674 struct page **pages,
675 struct page *dummy_read_page)
676{
677 struct radeon_ttm_backend *gtt;
678
679 gtt = container_of(backend, struct radeon_ttm_backend, backend);
680 gtt->pages = pages;
681 gtt->num_pages = num_pages;
682 gtt->dummy_read_page = dummy_read_page;
683 gtt->populated = true;
684 return 0;
685}
686
687static void radeon_ttm_backend_clear(struct ttm_backend *backend)
688{
689 struct radeon_ttm_backend *gtt;
690
691 gtt = container_of(backend, struct radeon_ttm_backend, backend);
692 gtt->pages = NULL;
693 gtt->num_pages = 0;
694 gtt->dummy_read_page = NULL;
695 gtt->populated = false;
696 gtt->bound = false;
697}
698
699
700static int radeon_ttm_backend_bind(struct ttm_backend *backend,
701 struct ttm_mem_reg *bo_mem)
702{
703 struct radeon_ttm_backend *gtt;
704 int r;
705
706 gtt = container_of(backend, struct radeon_ttm_backend, backend);
707 gtt->offset = bo_mem->mm_node->start << PAGE_SHIFT;
708 if (!gtt->num_pages) {
709 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend);
710 }
711 r = radeon_gart_bind(gtt->rdev, gtt->offset,
712 gtt->num_pages, gtt->pages);
713 if (r) {
714 DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
715 gtt->num_pages, gtt->offset);
716 return r;
717 }
718 gtt->bound = true;
719 return 0;
720}
721
722static int radeon_ttm_backend_unbind(struct ttm_backend *backend)
723{
724 struct radeon_ttm_backend *gtt;
725
726 gtt = container_of(backend, struct radeon_ttm_backend, backend);
727 radeon_gart_unbind(gtt->rdev, gtt->offset, gtt->num_pages);
728 gtt->bound = false;
729 return 0;
730}
731
732static void radeon_ttm_backend_destroy(struct ttm_backend *backend)
733{
734 struct radeon_ttm_backend *gtt;
735
736 gtt = container_of(backend, struct radeon_ttm_backend, backend);
737 if (gtt->bound) {
738 radeon_ttm_backend_unbind(backend);
739 }
740 kfree(gtt);
741}
742
743static struct ttm_backend_func radeon_backend_func = {
744 .populate = &radeon_ttm_backend_populate,
745 .clear = &radeon_ttm_backend_clear,
746 .bind = &radeon_ttm_backend_bind,
747 .unbind = &radeon_ttm_backend_unbind,
748 .destroy = &radeon_ttm_backend_destroy,
749};
750
751struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev)
752{
753 struct radeon_ttm_backend *gtt;
754
755 gtt = kzalloc(sizeof(struct radeon_ttm_backend), GFP_KERNEL);
756 if (gtt == NULL) {
757 return NULL;
758 }
759 gtt->backend.bdev = &rdev->mman.bdev;
760 gtt->backend.flags = 0;
761 gtt->backend.func = &radeon_backend_func;
762 gtt->rdev = rdev;
763 gtt->pages = NULL;
764 gtt->num_pages = 0;
765 gtt->dummy_read_page = NULL;
766 gtt->populated = false;
767 gtt->bound = false;
768 return &gtt->backend;
769}
fa8a1238
DA
770
771#define RADEON_DEBUGFS_MEM_TYPES 2
772
fa8a1238
DA
773#if defined(CONFIG_DEBUG_FS)
774static int radeon_mm_dump_table(struct seq_file *m, void *data)
775{
776 struct drm_info_node *node = (struct drm_info_node *)m->private;
777 struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
778 struct drm_device *dev = node->minor->dev;
779 struct radeon_device *rdev = dev->dev_private;
780 int ret;
781 struct ttm_bo_global *glob = rdev->mman.bdev.glob;
782
783 spin_lock(&glob->lru_lock);
784 ret = drm_mm_dump_table(m, mm);
785 spin_unlock(&glob->lru_lock);
786 return ret;
787}
788#endif
789
790static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
791{
f4e45d02
MP
792#if defined(CONFIG_DEBUG_FS)
793 static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES];
794 static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES][32];
fa8a1238
DA
795 unsigned i;
796
fa8a1238
DA
797 for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
798 if (i == 0)
799 sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
800 else
801 sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
802 radeon_mem_types_list[i].name = radeon_mem_types_names[i];
803 radeon_mem_types_list[i].show = &radeon_mm_dump_table;
804 radeon_mem_types_list[i].driver_features = 0;
805 if (i == 0)
806 radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].manager;
807 else
808 radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager;
809
810 }
811 return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES);
812
813#endif
814 return 0;
815}