]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/gpu/drm/radeon/radeon_object.c
drm/radeon/kms: add irq mitigation code for sw interrupt.
[net-next-2.6.git] / drivers / gpu / drm / radeon / radeon_object.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
31 */
32#include <linux/list.h>
33#include <drm/drmP.h>
34#include "radeon_drm.h"
35#include "radeon.h"
36
37struct radeon_object {
38 struct ttm_buffer_object tobj;
39 struct list_head list;
40 struct radeon_device *rdev;
41 struct drm_gem_object *gobj;
42 struct ttm_bo_kmap_obj kmap;
43 unsigned pin_count;
44 uint64_t gpu_addr;
45 void *kptr;
46 bool is_iomem;
e024e110
DA
47 uint32_t tiling_flags;
48 uint32_t pitch;
49 int surface_reg;
771fe6b9
JG
50};
51
52int radeon_ttm_init(struct radeon_device *rdev);
53void radeon_ttm_fini(struct radeon_device *rdev);
54
55/*
56 * To exclude mutual BO access we rely on bo_reserve exclusion, as all
57 * function are calling it.
58 */
59
60static int radeon_object_reserve(struct radeon_object *robj, bool interruptible)
61{
62 return ttm_bo_reserve(&robj->tobj, interruptible, false, false, 0);
63}
64
65static void radeon_object_unreserve(struct radeon_object *robj)
66{
67 ttm_bo_unreserve(&robj->tobj);
68}
69
70static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj)
71{
72 struct radeon_object *robj;
73
74 robj = container_of(tobj, struct radeon_object, tobj);
75 list_del_init(&robj->list);
e024e110 76 radeon_object_clear_surface_reg(robj);
771fe6b9
JG
77 kfree(robj);
78}
79
80static inline void radeon_object_gpu_addr(struct radeon_object *robj)
81{
82 /* Default gpu address */
83 robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
84 if (robj->tobj.mem.mm_node == NULL) {
85 return;
86 }
87 robj->gpu_addr = ((u64)robj->tobj.mem.mm_node->start) << PAGE_SHIFT;
88 switch (robj->tobj.mem.mem_type) {
89 case TTM_PL_VRAM:
90 robj->gpu_addr += (u64)robj->rdev->mc.vram_location;
91 break;
92 case TTM_PL_TT:
93 robj->gpu_addr += (u64)robj->rdev->mc.gtt_location;
94 break;
95 default:
96 DRM_ERROR("Unknown placement %d\n", robj->tobj.mem.mem_type);
97 robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
98 return;
99 }
100}
101
102static inline uint32_t radeon_object_flags_from_domain(uint32_t domain)
103{
104 uint32_t flags = 0;
105 if (domain & RADEON_GEM_DOMAIN_VRAM) {
664f8659 106 flags |= TTM_PL_FLAG_VRAM | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
771fe6b9
JG
107 }
108 if (domain & RADEON_GEM_DOMAIN_GTT) {
985fe845 109 flags |= TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
771fe6b9
JG
110 }
111 if (domain & RADEON_GEM_DOMAIN_CPU) {
664f8659 112 flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
771fe6b9
JG
113 }
114 if (!flags) {
664f8659 115 flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
771fe6b9
JG
116 }
117 return flags;
118}
119
120int radeon_object_create(struct radeon_device *rdev,
121 struct drm_gem_object *gobj,
122 unsigned long size,
123 bool kernel,
124 uint32_t domain,
125 bool interruptible,
126 struct radeon_object **robj_ptr)
127{
128 struct radeon_object *robj;
129 enum ttm_bo_type type;
130 uint32_t flags;
131 int r;
132
133 if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
134 rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
135 }
136 if (kernel) {
137 type = ttm_bo_type_kernel;
138 } else {
139 type = ttm_bo_type_device;
140 }
141 *robj_ptr = NULL;
142 robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL);
143 if (robj == NULL) {
144 return -ENOMEM;
145 }
146 robj->rdev = rdev;
147 robj->gobj = gobj;
e024e110 148 robj->surface_reg = -1;
771fe6b9
JG
149 INIT_LIST_HEAD(&robj->list);
150
151 flags = radeon_object_flags_from_domain(domain);
152 r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags,
153 0, 0, false, NULL, size,
154 &radeon_ttm_object_object_destroy);
155 if (unlikely(r != 0)) {
156 /* ttm call radeon_ttm_object_object_destroy if error happen */
157 DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n",
158 size, flags, 0);
159 return r;
160 }
161 *robj_ptr = robj;
162 if (gobj) {
163 list_add_tail(&robj->list, &rdev->gem.objects);
164 }
165 return 0;
166}
167
168int radeon_object_kmap(struct radeon_object *robj, void **ptr)
169{
170 int r;
171
172 spin_lock(&robj->tobj.lock);
173 if (robj->kptr) {
174 if (ptr) {
175 *ptr = robj->kptr;
176 }
177 spin_unlock(&robj->tobj.lock);
178 return 0;
179 }
180 spin_unlock(&robj->tobj.lock);
181 r = ttm_bo_kmap(&robj->tobj, 0, robj->tobj.num_pages, &robj->kmap);
182 if (r) {
183 return r;
184 }
185 spin_lock(&robj->tobj.lock);
186 robj->kptr = ttm_kmap_obj_virtual(&robj->kmap, &robj->is_iomem);
187 spin_unlock(&robj->tobj.lock);
188 if (ptr) {
189 *ptr = robj->kptr;
190 }
c88f9f0c 191 radeon_object_check_tiling(robj, 0, 0);
771fe6b9
JG
192 return 0;
193}
194
195void radeon_object_kunmap(struct radeon_object *robj)
196{
197 spin_lock(&robj->tobj.lock);
198 if (robj->kptr == NULL) {
199 spin_unlock(&robj->tobj.lock);
200 return;
201 }
202 robj->kptr = NULL;
203 spin_unlock(&robj->tobj.lock);
c88f9f0c 204 radeon_object_check_tiling(robj, 0, 0);
771fe6b9
JG
205 ttm_bo_kunmap(&robj->kmap);
206}
207
208void radeon_object_unref(struct radeon_object **robj)
209{
210 struct ttm_buffer_object *tobj;
211
212 if ((*robj) == NULL) {
213 return;
214 }
215 tobj = &((*robj)->tobj);
216 ttm_bo_unref(&tobj);
217 if (tobj == NULL) {
218 *robj = NULL;
219 }
220}
221
222int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset)
223{
224 *offset = robj->tobj.addr_space_offset;
225 return 0;
226}
227
228int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
229 uint64_t *gpu_addr)
230{
231 uint32_t flags;
232 uint32_t tmp;
771fe6b9
JG
233 int r;
234
235 flags = radeon_object_flags_from_domain(domain);
236 spin_lock(&robj->tobj.lock);
237 if (robj->pin_count) {
238 robj->pin_count++;
239 if (gpu_addr != NULL) {
240 *gpu_addr = robj->gpu_addr;
241 }
242 spin_unlock(&robj->tobj.lock);
243 return 0;
244 }
245 spin_unlock(&robj->tobj.lock);
246 r = radeon_object_reserve(robj, false);
247 if (unlikely(r != 0)) {
248 DRM_ERROR("radeon: failed to reserve object for pinning it.\n");
249 return r;
250 }
771fe6b9
JG
251 tmp = robj->tobj.mem.placement;
252 ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM);
253 robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING;
254 r = ttm_buffer_object_validate(&robj->tobj,
255 robj->tobj.proposed_placement,
256 false, false);
257 radeon_object_gpu_addr(robj);
258 if (gpu_addr != NULL) {
259 *gpu_addr = robj->gpu_addr;
260 }
261 robj->pin_count = 1;
262 if (unlikely(r != 0)) {
263 DRM_ERROR("radeon: failed to pin object.\n");
264 }
265 radeon_object_unreserve(robj);
771fe6b9
JG
266 return r;
267}
268
269void radeon_object_unpin(struct radeon_object *robj)
270{
271 uint32_t flags;
771fe6b9
JG
272 int r;
273
274 spin_lock(&robj->tobj.lock);
275 if (!robj->pin_count) {
276 spin_unlock(&robj->tobj.lock);
277 printk(KERN_WARNING "Unpin not necessary for %p !\n", robj);
278 return;
279 }
280 robj->pin_count--;
281 if (robj->pin_count) {
282 spin_unlock(&robj->tobj.lock);
283 return;
284 }
285 spin_unlock(&robj->tobj.lock);
286 r = radeon_object_reserve(robj, false);
287 if (unlikely(r != 0)) {
288 DRM_ERROR("radeon: failed to reserve object for unpinning it.\n");
289 return;
290 }
771fe6b9
JG
291 flags = robj->tobj.mem.placement;
292 robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT;
293 r = ttm_buffer_object_validate(&robj->tobj,
294 robj->tobj.proposed_placement,
295 false, false);
296 if (unlikely(r != 0)) {
297 DRM_ERROR("radeon: failed to unpin buffer.\n");
298 }
299 radeon_object_unreserve(robj);
771fe6b9
JG
300}
301
302int radeon_object_wait(struct radeon_object *robj)
303{
304 int r = 0;
305
306 /* FIXME: should use block reservation instead */
307 r = radeon_object_reserve(robj, true);
308 if (unlikely(r != 0)) {
309 DRM_ERROR("radeon: failed to reserve object for waiting.\n");
310 return r;
311 }
312 spin_lock(&robj->tobj.lock);
313 if (robj->tobj.sync_obj) {
3b170c3b 314 r = ttm_bo_wait(&robj->tobj, true, true, false);
771fe6b9
JG
315 }
316 spin_unlock(&robj->tobj.lock);
317 radeon_object_unreserve(robj);
23956dfa 318 radeon_hdp_flush(robj->rdev);
771fe6b9
JG
319 return r;
320}
321
cefb87ef
DA
322int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement)
323{
324 int r = 0;
325
326 r = radeon_object_reserve(robj, true);
327 if (unlikely(r != 0)) {
328 DRM_ERROR("radeon: failed to reserve object for waiting.\n");
329 return r;
330 }
331 spin_lock(&robj->tobj.lock);
332 *cur_placement = robj->tobj.mem.mem_type;
333 if (robj->tobj.sync_obj) {
334 r = ttm_bo_wait(&robj->tobj, true, true, true);
335 }
336 spin_unlock(&robj->tobj.lock);
337 radeon_object_unreserve(robj);
338 return r;
339}
340
771fe6b9
JG
341int radeon_object_evict_vram(struct radeon_device *rdev)
342{
343 if (rdev->flags & RADEON_IS_IGP) {
344 /* Useless to evict on IGP chips */
345 return 0;
346 }
347 return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
348}
349
350void radeon_object_force_delete(struct radeon_device *rdev)
351{
352 struct radeon_object *robj, *n;
353 struct drm_gem_object *gobj;
354
355 if (list_empty(&rdev->gem.objects)) {
356 return;
357 }
358 DRM_ERROR("Userspace still has active objects !\n");
359 list_for_each_entry_safe(robj, n, &rdev->gem.objects, list) {
360 mutex_lock(&rdev->ddev->struct_mutex);
361 gobj = robj->gobj;
362 DRM_ERROR("Force free for (%p,%p,%lu,%lu)\n",
363 gobj, robj, (unsigned long)gobj->size,
364 *((unsigned long *)&gobj->refcount));
365 list_del_init(&robj->list);
366 radeon_object_unref(&robj);
367 gobj->driver_private = NULL;
368 drm_gem_object_unreference(gobj);
369 mutex_unlock(&rdev->ddev->struct_mutex);
370 }
371}
372
373int radeon_object_init(struct radeon_device *rdev)
374{
a4d68279
JG
375 /* Add an MTRR for the VRAM */
376 rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
377 MTRR_TYPE_WRCOMB, 1);
378 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
379 rdev->mc.mc_vram_size >> 20,
380 (unsigned long long)rdev->mc.aper_size >> 20);
381 DRM_INFO("RAM width %dbits %cDR\n",
382 rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
771fe6b9
JG
383 return radeon_ttm_init(rdev);
384}
385
386void radeon_object_fini(struct radeon_device *rdev)
387{
388 radeon_ttm_fini(rdev);
389}
390
391void radeon_object_list_add_object(struct radeon_object_list *lobj,
392 struct list_head *head)
393{
394 if (lobj->wdomain) {
395 list_add(&lobj->list, head);
396 } else {
397 list_add_tail(&lobj->list, head);
398 }
399}
400
401int radeon_object_list_reserve(struct list_head *head)
402{
403 struct radeon_object_list *lobj;
771fe6b9
JG
404 int r;
405
9d8401fc 406 list_for_each_entry(lobj, head, list){
771fe6b9
JG
407 if (!lobj->robj->pin_count) {
408 r = radeon_object_reserve(lobj->robj, true);
409 if (unlikely(r != 0)) {
410 DRM_ERROR("radeon: failed to reserve object.\n");
411 return r;
412 }
413 } else {
414 }
415 }
416 return 0;
417}
418
419void radeon_object_list_unreserve(struct list_head *head)
420{
421 struct radeon_object_list *lobj;
771fe6b9 422
9d8401fc 423 list_for_each_entry(lobj, head, list) {
771fe6b9
JG
424 if (!lobj->robj->pin_count) {
425 radeon_object_unreserve(lobj->robj);
771fe6b9
JG
426 }
427 }
428}
429
430int radeon_object_list_validate(struct list_head *head, void *fence)
431{
432 struct radeon_object_list *lobj;
433 struct radeon_object *robj;
434 struct radeon_fence *old_fence = NULL;
771fe6b9
JG
435 int r;
436
437 r = radeon_object_list_reserve(head);
438 if (unlikely(r != 0)) {
439 radeon_object_list_unreserve(head);
440 return r;
441 }
9d8401fc 442 list_for_each_entry(lobj, head, list) {
771fe6b9 443 robj = lobj->robj;
771fe6b9 444 if (!robj->pin_count) {
664f8659
MD
445 if (lobj->wdomain) {
446 robj->tobj.proposed_placement =
447 radeon_object_flags_from_domain(lobj->wdomain);
448 } else {
449 robj->tobj.proposed_placement =
450 radeon_object_flags_from_domain(lobj->rdomain);
451 }
771fe6b9
JG
452 r = ttm_buffer_object_validate(&robj->tobj,
453 robj->tobj.proposed_placement,
454 true, false);
455 if (unlikely(r)) {
771fe6b9
JG
456 DRM_ERROR("radeon: failed to validate.\n");
457 return r;
458 }
459 radeon_object_gpu_addr(robj);
460 }
461 lobj->gpu_offset = robj->gpu_addr;
e024e110 462 lobj->tiling_flags = robj->tiling_flags;
771fe6b9
JG
463 if (fence) {
464 old_fence = (struct radeon_fence *)robj->tobj.sync_obj;
465 robj->tobj.sync_obj = radeon_fence_ref(fence);
466 robj->tobj.sync_obj_arg = NULL;
467 }
468 if (old_fence) {
469 radeon_fence_unref(&old_fence);
470 }
471 }
472 return 0;
473}
474
475void radeon_object_list_unvalidate(struct list_head *head)
476{
477 struct radeon_object_list *lobj;
478 struct radeon_fence *old_fence = NULL;
771fe6b9 479
9d8401fc 480 list_for_each_entry(lobj, head, list) {
771fe6b9
JG
481 old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj;
482 lobj->robj->tobj.sync_obj = NULL;
483 if (old_fence) {
484 radeon_fence_unref(&old_fence);
485 }
486 }
487 radeon_object_list_unreserve(head);
488}
489
490void radeon_object_list_clean(struct list_head *head)
491{
492 radeon_object_list_unreserve(head);
493}
494
495int radeon_object_fbdev_mmap(struct radeon_object *robj,
496 struct vm_area_struct *vma)
497{
498 return ttm_fbdev_mmap(vma, &robj->tobj);
499}
500
501unsigned long radeon_object_size(struct radeon_object *robj)
502{
503 return robj->tobj.num_pages << PAGE_SHIFT;
504}
e024e110
DA
505
506int radeon_object_get_surface_reg(struct radeon_object *robj)
507{
508 struct radeon_device *rdev = robj->rdev;
509 struct radeon_surface_reg *reg;
510 struct radeon_object *old_object;
511 int steal;
512 int i;
513
514 if (!robj->tiling_flags)
515 return 0;
516
517 if (robj->surface_reg >= 0) {
518 reg = &rdev->surface_regs[robj->surface_reg];
519 i = robj->surface_reg;
520 goto out;
521 }
522
523 steal = -1;
524 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
525
526 reg = &rdev->surface_regs[i];
527 if (!reg->robj)
528 break;
529
530 old_object = reg->robj;
531 if (old_object->pin_count == 0)
532 steal = i;
533 }
534
535 /* if we are all out */
536 if (i == RADEON_GEM_MAX_SURFACES) {
537 if (steal == -1)
538 return -ENOMEM;
539 /* find someone with a surface reg and nuke their BO */
540 reg = &rdev->surface_regs[steal];
541 old_object = reg->robj;
542 /* blow away the mapping */
543 DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
544 ttm_bo_unmap_virtual(&old_object->tobj);
545 old_object->surface_reg = -1;
546 i = steal;
547 }
548
549 robj->surface_reg = i;
550 reg->robj = robj;
551
552out:
553 radeon_set_surface_reg(rdev, i, robj->tiling_flags, robj->pitch,
554 robj->tobj.mem.mm_node->start << PAGE_SHIFT,
555 robj->tobj.num_pages << PAGE_SHIFT);
556 return 0;
557}
558
559void radeon_object_clear_surface_reg(struct radeon_object *robj)
560{
561 struct radeon_device *rdev = robj->rdev;
562 struct radeon_surface_reg *reg;
563
564 if (robj->surface_reg == -1)
565 return;
566
567 reg = &rdev->surface_regs[robj->surface_reg];
568 radeon_clear_surface_reg(rdev, robj->surface_reg);
569
570 reg->robj = NULL;
571 robj->surface_reg = -1;
572}
573
574void radeon_object_set_tiling_flags(struct radeon_object *robj,
575 uint32_t tiling_flags, uint32_t pitch)
576{
577 robj->tiling_flags = tiling_flags;
578 robj->pitch = pitch;
579}
580
581void radeon_object_get_tiling_flags(struct radeon_object *robj,
582 uint32_t *tiling_flags,
583 uint32_t *pitch)
584{
585 if (tiling_flags)
586 *tiling_flags = robj->tiling_flags;
587 if (pitch)
588 *pitch = robj->pitch;
589}
590
591int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved,
592 bool force_drop)
593{
594 if (!(robj->tiling_flags & RADEON_TILING_SURFACE))
595 return 0;
596
597 if (force_drop) {
598 radeon_object_clear_surface_reg(robj);
599 return 0;
600 }
601
602 if (robj->tobj.mem.mem_type != TTM_PL_VRAM) {
603 if (!has_moved)
604 return 0;
605
606 if (robj->surface_reg >= 0)
607 radeon_object_clear_surface_reg(robj);
608 return 0;
609 }
610
611 if ((robj->surface_reg >= 0) && !has_moved)
612 return 0;
613
614 return radeon_object_get_surface_reg(robj);
615}
616
617void radeon_bo_move_notify(struct ttm_buffer_object *bo,
618 struct ttm_mem_reg *mem)
619{
620 struct radeon_object *robj = container_of(bo, struct radeon_object, tobj);
621 radeon_object_check_tiling(robj, 0, 1);
622}
623
624void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
625{
626 struct radeon_object *robj = container_of(bo, struct radeon_object, tobj);
627 radeon_object_check_tiling(robj, 0, 0);
628}