1 /**************************************************************************
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #include "ttm/ttm_module.h"
32 #include "ttm/ttm_bo_driver.h"
33 #include "ttm/ttm_placement.h"
34 #include <linux/jiffies.h>
35 #include <linux/slab.h>
36 #include <linux/sched.h>
38 #include <linux/file.h>
39 #include <linux/module.h>
41 #define TTM_ASSERT_LOCKED(param)
42 #define TTM_DEBUG(fmt, arg...)
43 #define TTM_BO_HASH_ORDER 13
45 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
46 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
47 static void ttm_bo_global_kobj_release(struct kobject *kobj);
49 static struct attribute ttm_bo_count = {
54 static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
58 for (i = 0; i <= TTM_PL_PRIV5; i++)
59 if (flags & (1 << i)) {
66 static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
68 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
70 printk(KERN_ERR TTM_PFX " has_type: %d\n", man->has_type);
71 printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type);
72 printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags);
73 printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset);
74 printk(KERN_ERR TTM_PFX " size: %llu\n", man->size);
75 printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n",
76 man->available_caching);
77 printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n",
78 man->default_caching);
79 if (mem_type != TTM_PL_SYSTEM)
80 (*man->func->debug)(man, TTM_PFX);
83 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
84 struct ttm_placement *placement)
88 printk(KERN_ERR TTM_PFX "No space for %p (%lu pages, %luK, %luM)\n",
89 bo, bo->mem.num_pages, bo->mem.size >> 10,
91 for (i = 0; i < placement->num_placement; i++) {
92 ret = ttm_mem_type_from_flags(placement->placement[i],
96 printk(KERN_ERR TTM_PFX " placement[%d]=0x%08X (%d)\n",
97 i, placement->placement[i], mem_type);
98 ttm_mem_type_debug(bo->bdev, mem_type);
102 static ssize_t ttm_bo_global_show(struct kobject *kobj,
103 struct attribute *attr,
106 struct ttm_bo_global *glob =
107 container_of(kobj, struct ttm_bo_global, kobj);
109 return snprintf(buffer, PAGE_SIZE, "%lu\n",
110 (unsigned long) atomic_read(&glob->bo_count));
113 static struct attribute *ttm_bo_global_attrs[] = {
118 static const struct sysfs_ops ttm_bo_global_ops = {
119 .show = &ttm_bo_global_show
122 static struct kobj_type ttm_bo_glob_kobj_type = {
123 .release = &ttm_bo_global_kobj_release,
124 .sysfs_ops = &ttm_bo_global_ops,
125 .default_attrs = ttm_bo_global_attrs
129 static inline uint32_t ttm_bo_type_flags(unsigned type)
134 static void ttm_bo_release_list(struct kref *list_kref)
136 struct ttm_buffer_object *bo =
137 container_of(list_kref, struct ttm_buffer_object, list_kref);
138 struct ttm_bo_device *bdev = bo->bdev;
140 BUG_ON(atomic_read(&bo->list_kref.refcount));
141 BUG_ON(atomic_read(&bo->kref.refcount));
142 BUG_ON(atomic_read(&bo->cpu_writers));
143 BUG_ON(bo->sync_obj != NULL);
144 BUG_ON(bo->mem.mm_node != NULL);
145 BUG_ON(!list_empty(&bo->lru));
146 BUG_ON(!list_empty(&bo->ddestroy));
149 ttm_tt_destroy(bo->ttm);
150 atomic_dec(&bo->glob->bo_count);
154 ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
159 int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
162 return wait_event_interruptible(bo->event_queue,
163 atomic_read(&bo->reserved) == 0);
165 wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
169 EXPORT_SYMBOL(ttm_bo_wait_unreserved);
171 static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
173 struct ttm_bo_device *bdev = bo->bdev;
174 struct ttm_mem_type_manager *man;
176 BUG_ON(!atomic_read(&bo->reserved));
178 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
180 BUG_ON(!list_empty(&bo->lru));
182 man = &bdev->man[bo->mem.mem_type];
183 list_add_tail(&bo->lru, &man->lru);
184 kref_get(&bo->list_kref);
186 if (bo->ttm != NULL) {
187 list_add_tail(&bo->swap, &bo->glob->swap_lru);
188 kref_get(&bo->list_kref);
194 * Call with the lru_lock held.
197 static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
201 if (!list_empty(&bo->swap)) {
202 list_del_init(&bo->swap);
205 if (!list_empty(&bo->lru)) {
206 list_del_init(&bo->lru);
211 * TODO: Add a driver hook to delete from
212 * driver-specific LRU's here.
218 int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
220 bool no_wait, bool use_sequence, uint32_t sequence)
222 struct ttm_bo_global *glob = bo->glob;
225 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
226 if (use_sequence && bo->seq_valid &&
227 (sequence - bo->val_seq < (1 << 31))) {
234 spin_unlock(&glob->lru_lock);
235 ret = ttm_bo_wait_unreserved(bo, interruptible);
236 spin_lock(&glob->lru_lock);
243 bo->val_seq = sequence;
244 bo->seq_valid = true;
246 bo->seq_valid = false;
251 EXPORT_SYMBOL(ttm_bo_reserve);
253 static void ttm_bo_ref_bug(struct kref *list_kref)
258 int ttm_bo_reserve(struct ttm_buffer_object *bo,
260 bool no_wait, bool use_sequence, uint32_t sequence)
262 struct ttm_bo_global *glob = bo->glob;
266 spin_lock(&glob->lru_lock);
267 ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
269 if (likely(ret == 0))
270 put_count = ttm_bo_del_from_lru(bo);
271 spin_unlock(&glob->lru_lock);
274 kref_put(&bo->list_kref, ttm_bo_ref_bug);
279 void ttm_bo_unreserve(struct ttm_buffer_object *bo)
281 struct ttm_bo_global *glob = bo->glob;
283 spin_lock(&glob->lru_lock);
284 ttm_bo_add_to_lru(bo);
285 atomic_set(&bo->reserved, 0);
286 wake_up_all(&bo->event_queue);
287 spin_unlock(&glob->lru_lock);
289 EXPORT_SYMBOL(ttm_bo_unreserve);
292 * Call bo->mutex locked.
294 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
296 struct ttm_bo_device *bdev = bo->bdev;
297 struct ttm_bo_global *glob = bo->glob;
299 uint32_t page_flags = 0;
301 TTM_ASSERT_LOCKED(&bo->mutex);
304 if (bdev->need_dma32)
305 page_flags |= TTM_PAGE_FLAG_DMA32;
308 case ttm_bo_type_device:
310 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
311 case ttm_bo_type_kernel:
312 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
313 page_flags, glob->dummy_read_page);
314 if (unlikely(bo->ttm == NULL))
317 case ttm_bo_type_user:
318 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
319 page_flags | TTM_PAGE_FLAG_USER,
320 glob->dummy_read_page);
321 if (unlikely(bo->ttm == NULL)) {
326 ret = ttm_tt_set_user(bo->ttm, current,
327 bo->buffer_start, bo->num_pages);
328 if (unlikely(ret != 0))
329 ttm_tt_destroy(bo->ttm);
332 printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
340 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
341 struct ttm_mem_reg *mem,
342 bool evict, bool interruptible,
343 bool no_wait_reserve, bool no_wait_gpu)
345 struct ttm_bo_device *bdev = bo->bdev;
346 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
347 bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
348 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
349 struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
352 if (old_is_pci || new_is_pci ||
353 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0))
354 ttm_bo_unmap_virtual(bo);
357 * Create and bind a ttm if required.
360 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
361 ret = ttm_bo_add_ttm(bo, false);
365 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
369 if (mem->mem_type != TTM_PL_SYSTEM) {
370 ret = ttm_tt_bind(bo->ttm, mem);
375 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
383 if (bdev->driver->move_notify)
384 bdev->driver->move_notify(bo, mem);
386 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
387 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
388 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
389 else if (bdev->driver->move)
390 ret = bdev->driver->move(bo, evict, interruptible,
391 no_wait_reserve, no_wait_gpu, mem);
393 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
400 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
402 printk(KERN_ERR TTM_PFX "Can not flush read caches\n");
406 if (bo->mem.mm_node) {
407 spin_lock(&bo->lock);
408 bo->offset = (bo->mem.start << PAGE_SHIFT) +
409 bdev->man[bo->mem.mem_type].gpu_offset;
410 bo->cur_placement = bo->mem.placement;
411 spin_unlock(&bo->lock);
418 new_man = &bdev->man[bo->mem.mem_type];
419 if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
420 ttm_tt_unbind(bo->ttm);
421 ttm_tt_destroy(bo->ttm);
430 * Will release GPU memory type usage on destruction.
431 * This is the place to put in driver specific hooks to release
432 * driver private resources.
433 * Will release the bo::reserved lock.
436 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
439 ttm_tt_unbind(bo->ttm);
440 ttm_tt_destroy(bo->ttm);
444 ttm_bo_mem_put(bo, &bo->mem);
446 atomic_set(&bo->reserved, 0);
447 wake_up_all(&bo->event_queue);
450 static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
452 struct ttm_bo_device *bdev = bo->bdev;
453 struct ttm_bo_global *glob = bo->glob;
454 struct ttm_bo_driver *driver;
460 spin_lock(&bo->lock);
461 (void) ttm_bo_wait(bo, false, false, true);
464 spin_lock(&glob->lru_lock);
467 * Lock inversion between bo::reserve and bo::lock here,
468 * but that's OK, since we're only trylocking.
471 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
473 if (unlikely(ret == -EBUSY))
476 spin_unlock(&bo->lock);
477 put_count = ttm_bo_del_from_lru(bo);
479 spin_unlock(&glob->lru_lock);
480 ttm_bo_cleanup_memtype_use(bo);
483 kref_put(&bo->list_kref, ttm_bo_ref_bug);
487 spin_lock(&glob->lru_lock);
490 sync_obj = bo->sync_obj;
491 sync_obj_arg = bo->sync_obj_arg;
492 driver = bdev->driver;
494 kref_get(&bo->list_kref);
495 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
496 spin_unlock(&glob->lru_lock);
497 spin_unlock(&bo->lock);
500 driver->sync_obj_flush(sync_obj, sync_obj_arg);
501 schedule_delayed_work(&bdev->wq,
502 ((HZ / 100) < 1) ? 1 : HZ / 100);
506 * function ttm_bo_cleanup_refs
507 * If bo idle, remove from delayed- and lru lists, and unref.
508 * If not idle, do nothing.
510 * @interruptible Any sleeps should occur interruptibly.
511 * @no_wait_reserve Never wait for reserve. Return -EBUSY instead.
512 * @no_wait_gpu Never wait for gpu. Return -EBUSY instead.
515 static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
517 bool no_wait_reserve,
520 struct ttm_bo_global *glob = bo->glob;
525 spin_lock(&bo->lock);
526 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
527 spin_unlock(&bo->lock);
529 if (unlikely(ret != 0))
532 spin_lock(&glob->lru_lock);
533 ret = ttm_bo_reserve_locked(bo, interruptible,
534 no_wait_reserve, false, 0);
536 if (unlikely(ret != 0) || list_empty(&bo->ddestroy)) {
537 spin_unlock(&glob->lru_lock);
542 * We can re-check for sync object without taking
543 * the bo::lock since setting the sync object requires
544 * also bo::reserved. A busy object at this point may
545 * be caused by another thread recently starting an accelerated
549 if (unlikely(bo->sync_obj)) {
550 atomic_set(&bo->reserved, 0);
551 wake_up_all(&bo->event_queue);
552 spin_unlock(&glob->lru_lock);
556 put_count = ttm_bo_del_from_lru(bo);
557 list_del_init(&bo->ddestroy);
560 spin_unlock(&glob->lru_lock);
561 ttm_bo_cleanup_memtype_use(bo);
564 kref_put(&bo->list_kref, ttm_bo_ref_bug);
570 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
571 * encountered buffers.
574 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
576 struct ttm_bo_global *glob = bdev->glob;
577 struct ttm_buffer_object *entry = NULL;
580 spin_lock(&glob->lru_lock);
581 if (list_empty(&bdev->ddestroy))
584 entry = list_first_entry(&bdev->ddestroy,
585 struct ttm_buffer_object, ddestroy);
586 kref_get(&entry->list_kref);
589 struct ttm_buffer_object *nentry = NULL;
591 if (entry->ddestroy.next != &bdev->ddestroy) {
592 nentry = list_first_entry(&entry->ddestroy,
593 struct ttm_buffer_object, ddestroy);
594 kref_get(&nentry->list_kref);
597 spin_unlock(&glob->lru_lock);
598 ret = ttm_bo_cleanup_refs(entry, false, !remove_all,
600 kref_put(&entry->list_kref, ttm_bo_release_list);
606 spin_lock(&glob->lru_lock);
607 if (list_empty(&entry->ddestroy))
612 spin_unlock(&glob->lru_lock);
615 kref_put(&entry->list_kref, ttm_bo_release_list);
619 static void ttm_bo_delayed_workqueue(struct work_struct *work)
621 struct ttm_bo_device *bdev =
622 container_of(work, struct ttm_bo_device, wq.work);
624 if (ttm_bo_delayed_delete(bdev, false)) {
625 schedule_delayed_work(&bdev->wq,
626 ((HZ / 100) < 1) ? 1 : HZ / 100);
630 static void ttm_bo_release(struct kref *kref)
632 struct ttm_buffer_object *bo =
633 container_of(kref, struct ttm_buffer_object, kref);
634 struct ttm_bo_device *bdev = bo->bdev;
636 if (likely(bo->vm_node != NULL)) {
637 rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
638 drm_mm_put_block(bo->vm_node);
641 write_unlock(&bdev->vm_lock);
642 ttm_bo_cleanup_refs_or_queue(bo);
643 kref_put(&bo->list_kref, ttm_bo_release_list);
644 write_lock(&bdev->vm_lock);
647 void ttm_bo_unref(struct ttm_buffer_object **p_bo)
649 struct ttm_buffer_object *bo = *p_bo;
650 struct ttm_bo_device *bdev = bo->bdev;
653 write_lock(&bdev->vm_lock);
654 kref_put(&bo->kref, ttm_bo_release);
655 write_unlock(&bdev->vm_lock);
657 EXPORT_SYMBOL(ttm_bo_unref);
659 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
661 return cancel_delayed_work_sync(&bdev->wq);
663 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
665 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
668 schedule_delayed_work(&bdev->wq,
669 ((HZ / 100) < 1) ? 1 : HZ / 100);
671 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
673 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
674 bool no_wait_reserve, bool no_wait_gpu)
676 struct ttm_bo_device *bdev = bo->bdev;
677 struct ttm_mem_reg evict_mem;
678 struct ttm_placement placement;
681 spin_lock(&bo->lock);
682 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
683 spin_unlock(&bo->lock);
685 if (unlikely(ret != 0)) {
686 if (ret != -ERESTARTSYS) {
687 printk(KERN_ERR TTM_PFX
688 "Failed to expire sync object before "
689 "buffer eviction.\n");
694 BUG_ON(!atomic_read(&bo->reserved));
697 evict_mem.mm_node = NULL;
698 evict_mem.bus.io_reserved = false;
702 placement.num_placement = 0;
703 placement.num_busy_placement = 0;
704 bdev->driver->evict_flags(bo, &placement);
705 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
706 no_wait_reserve, no_wait_gpu);
708 if (ret != -ERESTARTSYS) {
709 printk(KERN_ERR TTM_PFX
710 "Failed to find memory space for "
711 "buffer 0x%p eviction.\n", bo);
712 ttm_bo_mem_space_debug(bo, &placement);
717 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
718 no_wait_reserve, no_wait_gpu);
720 if (ret != -ERESTARTSYS)
721 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
722 ttm_bo_mem_put(bo, &evict_mem);
730 static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
732 bool interruptible, bool no_wait_reserve,
735 struct ttm_bo_global *glob = bdev->glob;
736 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
737 struct ttm_buffer_object *bo;
738 int ret, put_count = 0;
741 spin_lock(&glob->lru_lock);
742 if (list_empty(&man->lru)) {
743 spin_unlock(&glob->lru_lock);
747 bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
748 kref_get(&bo->list_kref);
750 if (!list_empty(&bo->ddestroy)) {
751 spin_unlock(&glob->lru_lock);
752 ret = ttm_bo_cleanup_refs(bo, interruptible,
753 no_wait_reserve, no_wait_gpu);
754 kref_put(&bo->list_kref, ttm_bo_release_list);
756 if (likely(ret == 0 || ret == -ERESTARTSYS))
762 ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
764 if (unlikely(ret == -EBUSY)) {
765 spin_unlock(&glob->lru_lock);
766 if (likely(!no_wait_gpu))
767 ret = ttm_bo_wait_unreserved(bo, interruptible);
769 kref_put(&bo->list_kref, ttm_bo_release_list);
772 * We *need* to retry after releasing the lru lock.
775 if (unlikely(ret != 0))
780 put_count = ttm_bo_del_from_lru(bo);
781 spin_unlock(&glob->lru_lock);
786 kref_put(&bo->list_kref, ttm_bo_ref_bug);
788 ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
789 ttm_bo_unreserve(bo);
791 kref_put(&bo->list_kref, ttm_bo_release_list);
795 void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
797 struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
800 (*man->func->put_node)(man, mem);
802 EXPORT_SYMBOL(ttm_bo_mem_put);
805 * Repeatedly evict memory from the LRU for @mem_type until we create enough
806 * space, or we've evicted everything and there isn't enough space.
808 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
810 struct ttm_placement *placement,
811 struct ttm_mem_reg *mem,
813 bool no_wait_reserve,
816 struct ttm_bo_device *bdev = bo->bdev;
817 struct ttm_bo_global *glob = bdev->glob;
818 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
822 ret = (*man->func->get_node)(man, bo, placement, mem);
823 if (unlikely(ret != 0))
827 spin_lock(&glob->lru_lock);
828 if (list_empty(&man->lru)) {
829 spin_unlock(&glob->lru_lock);
832 spin_unlock(&glob->lru_lock);
833 ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
834 no_wait_reserve, no_wait_gpu);
835 if (unlikely(ret != 0))
838 if (mem->mm_node == NULL)
840 mem->mem_type = mem_type;
844 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
845 uint32_t cur_placement,
846 uint32_t proposed_placement)
848 uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
849 uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
852 * Keep current caching if possible.
855 if ((cur_placement & caching) != 0)
856 result |= (cur_placement & caching);
857 else if ((man->default_caching & caching) != 0)
858 result |= man->default_caching;
859 else if ((TTM_PL_FLAG_CACHED & caching) != 0)
860 result |= TTM_PL_FLAG_CACHED;
861 else if ((TTM_PL_FLAG_WC & caching) != 0)
862 result |= TTM_PL_FLAG_WC;
863 else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
864 result |= TTM_PL_FLAG_UNCACHED;
869 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
872 uint32_t proposed_placement,
873 uint32_t *masked_placement)
875 uint32_t cur_flags = ttm_bo_type_flags(mem_type);
877 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
880 if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
883 if ((proposed_placement & man->available_caching) == 0)
886 cur_flags |= (proposed_placement & man->available_caching);
888 *masked_placement = cur_flags;
893 * Creates space for memory region @mem according to its type.
895 * This function first searches for free space in compatible memory types in
896 * the priority order defined by the driver. If free space isn't found, then
897 * ttm_bo_mem_force_space is attempted in priority order to evict and find
900 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
901 struct ttm_placement *placement,
902 struct ttm_mem_reg *mem,
903 bool interruptible, bool no_wait_reserve,
906 struct ttm_bo_device *bdev = bo->bdev;
907 struct ttm_mem_type_manager *man;
908 uint32_t mem_type = TTM_PL_SYSTEM;
909 uint32_t cur_flags = 0;
910 bool type_found = false;
911 bool type_ok = false;
912 bool has_erestartsys = false;
916 for (i = 0; i < placement->num_placement; ++i) {
917 ret = ttm_mem_type_from_flags(placement->placement[i],
921 man = &bdev->man[mem_type];
923 type_ok = ttm_bo_mt_compatible(man,
924 bo->type == ttm_bo_type_user,
926 placement->placement[i],
932 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
935 * Use the access and other non-mapping-related flag bits from
936 * the memory placement flags to the current flags
938 ttm_flag_masked(&cur_flags, placement->placement[i],
939 ~TTM_PL_MASK_MEMTYPE);
941 if (mem_type == TTM_PL_SYSTEM)
944 if (man->has_type && man->use_type) {
946 ret = (*man->func->get_node)(man, bo, placement, mem);
954 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
955 mem->mem_type = mem_type;
956 mem->placement = cur_flags;
963 for (i = 0; i < placement->num_busy_placement; ++i) {
964 ret = ttm_mem_type_from_flags(placement->busy_placement[i],
968 man = &bdev->man[mem_type];
971 if (!ttm_bo_mt_compatible(man,
972 bo->type == ttm_bo_type_user,
974 placement->busy_placement[i],
978 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
981 * Use the access and other non-mapping-related flag bits from
982 * the memory placement flags to the current flags
984 ttm_flag_masked(&cur_flags, placement->busy_placement[i],
985 ~TTM_PL_MASK_MEMTYPE);
988 if (mem_type == TTM_PL_SYSTEM) {
989 mem->mem_type = mem_type;
990 mem->placement = cur_flags;
995 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
996 interruptible, no_wait_reserve, no_wait_gpu);
997 if (ret == 0 && mem->mm_node) {
998 mem->placement = cur_flags;
1001 if (ret == -ERESTARTSYS)
1002 has_erestartsys = true;
1004 ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
1007 EXPORT_SYMBOL(ttm_bo_mem_space);
1009 int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
1011 if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
1014 return wait_event_interruptible(bo->event_queue,
1015 atomic_read(&bo->cpu_writers) == 0);
1017 EXPORT_SYMBOL(ttm_bo_wait_cpu);
1019 int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1020 struct ttm_placement *placement,
1021 bool interruptible, bool no_wait_reserve,
1025 struct ttm_mem_reg mem;
1027 BUG_ON(!atomic_read(&bo->reserved));
1030 * FIXME: It's possible to pipeline buffer moves.
1031 * Have the driver move function wait for idle when necessary,
1032 * instead of doing it here.
1034 spin_lock(&bo->lock);
1035 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
1036 spin_unlock(&bo->lock);
1039 mem.num_pages = bo->num_pages;
1040 mem.size = mem.num_pages << PAGE_SHIFT;
1041 mem.page_alignment = bo->mem.page_alignment;
1042 mem.bus.io_reserved = false;
1044 * Determine where to move the buffer.
1046 ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
1049 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
1051 if (ret && mem.mm_node)
1052 ttm_bo_mem_put(bo, &mem);
1056 static int ttm_bo_mem_compat(struct ttm_placement *placement,
1057 struct ttm_mem_reg *mem)
1061 if (mem->mm_node && placement->lpfn != 0 &&
1062 (mem->start < placement->fpfn ||
1063 mem->start + mem->num_pages > placement->lpfn))
1066 for (i = 0; i < placement->num_placement; i++) {
1067 if ((placement->placement[i] & mem->placement &
1068 TTM_PL_MASK_CACHING) &&
1069 (placement->placement[i] & mem->placement &
1076 int ttm_bo_validate(struct ttm_buffer_object *bo,
1077 struct ttm_placement *placement,
1078 bool interruptible, bool no_wait_reserve,
1083 BUG_ON(!atomic_read(&bo->reserved));
1084 /* Check that range is valid */
1085 if (placement->lpfn || placement->fpfn)
1086 if (placement->fpfn > placement->lpfn ||
1087 (placement->lpfn - placement->fpfn) < bo->num_pages)
1090 * Check whether we need to move buffer.
1092 ret = ttm_bo_mem_compat(placement, &bo->mem);
1094 ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
1099 * Use the access and other non-mapping-related flag bits from
1100 * the compatible memory placement flags to the active flags
1102 ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
1103 ~TTM_PL_MASK_MEMTYPE);
1106 * We might need to add a TTM.
1108 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
1109 ret = ttm_bo_add_ttm(bo, true);
1115 EXPORT_SYMBOL(ttm_bo_validate);
1117 int ttm_bo_check_placement(struct ttm_buffer_object *bo,
1118 struct ttm_placement *placement)
1122 if (placement->fpfn || placement->lpfn) {
1123 if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) {
1124 printk(KERN_ERR TTM_PFX "Page number range to small "
1125 "Need %lu pages, range is [%u, %u]\n",
1126 bo->mem.num_pages, placement->fpfn,
1131 for (i = 0; i < placement->num_placement; i++) {
1132 if (!capable(CAP_SYS_ADMIN)) {
1133 if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) {
1134 printk(KERN_ERR TTM_PFX "Need to be root to "
1135 "modify NO_EVICT status.\n");
1140 for (i = 0; i < placement->num_busy_placement; i++) {
1141 if (!capable(CAP_SYS_ADMIN)) {
1142 if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) {
1143 printk(KERN_ERR TTM_PFX "Need to be root to "
1144 "modify NO_EVICT status.\n");
1152 int ttm_bo_init(struct ttm_bo_device *bdev,
1153 struct ttm_buffer_object *bo,
1155 enum ttm_bo_type type,
1156 struct ttm_placement *placement,
1157 uint32_t page_alignment,
1158 unsigned long buffer_start,
1160 struct file *persistant_swap_storage,
1162 void (*destroy) (struct ttm_buffer_object *))
1165 unsigned long num_pages;
1167 size += buffer_start & ~PAGE_MASK;
1168 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1169 if (num_pages == 0) {
1170 printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
1173 bo->destroy = destroy;
1175 spin_lock_init(&bo->lock);
1176 kref_init(&bo->kref);
1177 kref_init(&bo->list_kref);
1178 atomic_set(&bo->cpu_writers, 0);
1179 atomic_set(&bo->reserved, 1);
1180 init_waitqueue_head(&bo->event_queue);
1181 INIT_LIST_HEAD(&bo->lru);
1182 INIT_LIST_HEAD(&bo->ddestroy);
1183 INIT_LIST_HEAD(&bo->swap);
1185 bo->glob = bdev->glob;
1187 bo->num_pages = num_pages;
1188 bo->mem.size = num_pages << PAGE_SHIFT;
1189 bo->mem.mem_type = TTM_PL_SYSTEM;
1190 bo->mem.num_pages = bo->num_pages;
1191 bo->mem.mm_node = NULL;
1192 bo->mem.page_alignment = page_alignment;
1193 bo->mem.bus.io_reserved = false;
1194 bo->buffer_start = buffer_start & PAGE_MASK;
1196 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1197 bo->seq_valid = false;
1198 bo->persistant_swap_storage = persistant_swap_storage;
1199 bo->acc_size = acc_size;
1200 atomic_inc(&bo->glob->bo_count);
1202 ret = ttm_bo_check_placement(bo, placement);
1203 if (unlikely(ret != 0))
1207 * For ttm_bo_type_device buffers, allocate
1208 * address space from the device.
1210 if (bo->type == ttm_bo_type_device) {
1211 ret = ttm_bo_setup_vm(bo);
1216 ret = ttm_bo_validate(bo, placement, interruptible, false, false);
1220 ttm_bo_unreserve(bo);
1224 ttm_bo_unreserve(bo);
1229 EXPORT_SYMBOL(ttm_bo_init);
1231 static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
1232 unsigned long num_pages)
1234 size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
1237 return glob->ttm_bo_size + 2 * page_array_size;
1240 int ttm_bo_create(struct ttm_bo_device *bdev,
1242 enum ttm_bo_type type,
1243 struct ttm_placement *placement,
1244 uint32_t page_alignment,
1245 unsigned long buffer_start,
1247 struct file *persistant_swap_storage,
1248 struct ttm_buffer_object **p_bo)
1250 struct ttm_buffer_object *bo;
1251 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1255 ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
1256 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1257 if (unlikely(ret != 0))
1260 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1262 if (unlikely(bo == NULL)) {
1263 ttm_mem_global_free(mem_glob, acc_size);
1267 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1268 buffer_start, interruptible,
1269 persistant_swap_storage, acc_size, NULL);
1270 if (likely(ret == 0))
1276 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1277 unsigned mem_type, bool allow_errors)
1279 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1280 struct ttm_bo_global *glob = bdev->glob;
1284 * Can't use standard list traversal since we're unlocking.
1287 spin_lock(&glob->lru_lock);
1288 while (!list_empty(&man->lru)) {
1289 spin_unlock(&glob->lru_lock);
1290 ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
1295 printk(KERN_ERR TTM_PFX
1296 "Cleanup eviction failed\n");
1299 spin_lock(&glob->lru_lock);
1301 spin_unlock(&glob->lru_lock);
1305 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1307 struct ttm_mem_type_manager *man;
1310 if (mem_type >= TTM_NUM_MEM_TYPES) {
1311 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
1314 man = &bdev->man[mem_type];
1316 if (!man->has_type) {
1317 printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
1318 "memory manager type %u\n", mem_type);
1322 man->use_type = false;
1323 man->has_type = false;
1327 ttm_bo_force_list_clean(bdev, mem_type, false);
1329 ret = (*man->func->takedown)(man);
1334 EXPORT_SYMBOL(ttm_bo_clean_mm);
1336 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1338 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1340 if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1341 printk(KERN_ERR TTM_PFX
1342 "Illegal memory manager memory type %u.\n",
1347 if (!man->has_type) {
1348 printk(KERN_ERR TTM_PFX
1349 "Memory type %u has not been initialized.\n",
1354 return ttm_bo_force_list_clean(bdev, mem_type, true);
1356 EXPORT_SYMBOL(ttm_bo_evict_mm);
1358 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1359 unsigned long p_size)
1362 struct ttm_mem_type_manager *man;
1364 if (type >= TTM_NUM_MEM_TYPES) {
1365 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
1369 man = &bdev->man[type];
1370 if (man->has_type) {
1371 printk(KERN_ERR TTM_PFX
1372 "Memory manager already initialized for type %d\n",
1377 ret = bdev->driver->init_mem_type(bdev, type, man);
1383 if (type != TTM_PL_SYSTEM) {
1385 printk(KERN_ERR TTM_PFX
1386 "Zero size memory manager type %d\n",
1391 ret = (*man->func->init)(man, p_size);
1395 man->has_type = true;
1396 man->use_type = true;
1399 INIT_LIST_HEAD(&man->lru);
1403 EXPORT_SYMBOL(ttm_bo_init_mm);
1405 static void ttm_bo_global_kobj_release(struct kobject *kobj)
1407 struct ttm_bo_global *glob =
1408 container_of(kobj, struct ttm_bo_global, kobj);
1410 ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
1411 __free_page(glob->dummy_read_page);
1415 void ttm_bo_global_release(struct drm_global_reference *ref)
1417 struct ttm_bo_global *glob = ref->object;
1419 kobject_del(&glob->kobj);
1420 kobject_put(&glob->kobj);
1422 EXPORT_SYMBOL(ttm_bo_global_release);
1424 int ttm_bo_global_init(struct drm_global_reference *ref)
1426 struct ttm_bo_global_ref *bo_ref =
1427 container_of(ref, struct ttm_bo_global_ref, ref);
1428 struct ttm_bo_global *glob = ref->object;
1431 mutex_init(&glob->device_list_mutex);
1432 spin_lock_init(&glob->lru_lock);
1433 glob->mem_glob = bo_ref->mem_glob;
1434 glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1436 if (unlikely(glob->dummy_read_page == NULL)) {
1441 INIT_LIST_HEAD(&glob->swap_lru);
1442 INIT_LIST_HEAD(&glob->device_list);
1444 ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
1445 ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
1446 if (unlikely(ret != 0)) {
1447 printk(KERN_ERR TTM_PFX
1448 "Could not register buffer object swapout.\n");
1452 glob->ttm_bo_extra_size =
1453 ttm_round_pot(sizeof(struct ttm_tt)) +
1454 ttm_round_pot(sizeof(struct ttm_backend));
1456 glob->ttm_bo_size = glob->ttm_bo_extra_size +
1457 ttm_round_pot(sizeof(struct ttm_buffer_object));
1459 atomic_set(&glob->bo_count, 0);
1461 ret = kobject_init_and_add(
1462 &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
1463 if (unlikely(ret != 0))
1464 kobject_put(&glob->kobj);
1467 __free_page(glob->dummy_read_page);
1472 EXPORT_SYMBOL(ttm_bo_global_init);
1475 int ttm_bo_device_release(struct ttm_bo_device *bdev)
1478 unsigned i = TTM_NUM_MEM_TYPES;
1479 struct ttm_mem_type_manager *man;
1480 struct ttm_bo_global *glob = bdev->glob;
1483 man = &bdev->man[i];
1484 if (man->has_type) {
1485 man->use_type = false;
1486 if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1488 printk(KERN_ERR TTM_PFX
1489 "DRM memory manager type %d "
1490 "is not clean.\n", i);
1492 man->has_type = false;
1496 mutex_lock(&glob->device_list_mutex);
1497 list_del(&bdev->device_list);
1498 mutex_unlock(&glob->device_list_mutex);
1500 if (!cancel_delayed_work(&bdev->wq))
1501 flush_scheduled_work();
1503 while (ttm_bo_delayed_delete(bdev, true))
1506 spin_lock(&glob->lru_lock);
1507 if (list_empty(&bdev->ddestroy))
1508 TTM_DEBUG("Delayed destroy list was clean\n");
1510 if (list_empty(&bdev->man[0].lru))
1511 TTM_DEBUG("Swap list was clean\n");
1512 spin_unlock(&glob->lru_lock);
1514 BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
1515 write_lock(&bdev->vm_lock);
1516 drm_mm_takedown(&bdev->addr_space_mm);
1517 write_unlock(&bdev->vm_lock);
1521 EXPORT_SYMBOL(ttm_bo_device_release);
1523 int ttm_bo_device_init(struct ttm_bo_device *bdev,
1524 struct ttm_bo_global *glob,
1525 struct ttm_bo_driver *driver,
1526 uint64_t file_page_offset,
1531 rwlock_init(&bdev->vm_lock);
1532 bdev->driver = driver;
1534 memset(bdev->man, 0, sizeof(bdev->man));
1537 * Initialize the system memory buffer type.
1538 * Other types need to be driver / IOCTL initialized.
1540 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1541 if (unlikely(ret != 0))
1544 bdev->addr_space_rb = RB_ROOT;
1545 ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1546 if (unlikely(ret != 0))
1547 goto out_no_addr_mm;
1549 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1550 bdev->nice_mode = true;
1551 INIT_LIST_HEAD(&bdev->ddestroy);
1552 bdev->dev_mapping = NULL;
1554 bdev->need_dma32 = need_dma32;
1556 mutex_lock(&glob->device_list_mutex);
1557 list_add_tail(&bdev->device_list, &glob->device_list);
1558 mutex_unlock(&glob->device_list_mutex);
1562 ttm_bo_clean_mm(bdev, 0);
1566 EXPORT_SYMBOL(ttm_bo_device_init);
1569 * buffer object vm functions.
1572 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1574 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1576 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1577 if (mem->mem_type == TTM_PL_SYSTEM)
1580 if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1583 if (mem->placement & TTM_PL_FLAG_CACHED)
1589 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1591 struct ttm_bo_device *bdev = bo->bdev;
1592 loff_t offset = (loff_t) bo->addr_space_offset;
1593 loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
1595 if (!bdev->dev_mapping)
1597 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1598 ttm_mem_io_free(bdev, &bo->mem);
1600 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1602 static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1604 struct ttm_bo_device *bdev = bo->bdev;
1605 struct rb_node **cur = &bdev->addr_space_rb.rb_node;
1606 struct rb_node *parent = NULL;
1607 struct ttm_buffer_object *cur_bo;
1608 unsigned long offset = bo->vm_node->start;
1609 unsigned long cur_offset;
1613 cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
1614 cur_offset = cur_bo->vm_node->start;
1615 if (offset < cur_offset)
1616 cur = &parent->rb_left;
1617 else if (offset > cur_offset)
1618 cur = &parent->rb_right;
1623 rb_link_node(&bo->vm_rb, parent, cur);
1624 rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
1630 * @bo: the buffer to allocate address space for
1632 * Allocate address space in the drm device so that applications
1633 * can mmap the buffer and access the contents. This only
1634 * applies to ttm_bo_type_device objects as others are not
1635 * placed in the drm device address space.
1638 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1640 struct ttm_bo_device *bdev = bo->bdev;
1644 ret = drm_mm_pre_get(&bdev->addr_space_mm);
1645 if (unlikely(ret != 0))
1648 write_lock(&bdev->vm_lock);
1649 bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1650 bo->mem.num_pages, 0, 0);
1652 if (unlikely(bo->vm_node == NULL)) {
1657 bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1658 bo->mem.num_pages, 0);
1660 if (unlikely(bo->vm_node == NULL)) {
1661 write_unlock(&bdev->vm_lock);
1665 ttm_bo_vm_insert_rb(bo);
1666 write_unlock(&bdev->vm_lock);
1667 bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1671 write_unlock(&bdev->vm_lock);
1675 int ttm_bo_wait(struct ttm_buffer_object *bo,
1676 bool lazy, bool interruptible, bool no_wait)
1678 struct ttm_bo_driver *driver = bo->bdev->driver;
1683 if (likely(bo->sync_obj == NULL))
1686 while (bo->sync_obj) {
1688 if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
1689 void *tmp_obj = bo->sync_obj;
1690 bo->sync_obj = NULL;
1691 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1692 spin_unlock(&bo->lock);
1693 driver->sync_obj_unref(&tmp_obj);
1694 spin_lock(&bo->lock);
1701 sync_obj = driver->sync_obj_ref(bo->sync_obj);
1702 sync_obj_arg = bo->sync_obj_arg;
1703 spin_unlock(&bo->lock);
1704 ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
1705 lazy, interruptible);
1706 if (unlikely(ret != 0)) {
1707 driver->sync_obj_unref(&sync_obj);
1708 spin_lock(&bo->lock);
1711 spin_lock(&bo->lock);
1712 if (likely(bo->sync_obj == sync_obj &&
1713 bo->sync_obj_arg == sync_obj_arg)) {
1714 void *tmp_obj = bo->sync_obj;
1715 bo->sync_obj = NULL;
1716 clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1718 spin_unlock(&bo->lock);
1719 driver->sync_obj_unref(&sync_obj);
1720 driver->sync_obj_unref(&tmp_obj);
1721 spin_lock(&bo->lock);
1723 spin_unlock(&bo->lock);
1724 driver->sync_obj_unref(&sync_obj);
1725 spin_lock(&bo->lock);
1730 EXPORT_SYMBOL(ttm_bo_wait);
1732 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1737 * Using ttm_bo_reserve makes sure the lru lists are updated.
1740 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1741 if (unlikely(ret != 0))
1743 spin_lock(&bo->lock);
1744 ret = ttm_bo_wait(bo, false, true, no_wait);
1745 spin_unlock(&bo->lock);
1746 if (likely(ret == 0))
1747 atomic_inc(&bo->cpu_writers);
1748 ttm_bo_unreserve(bo);
1751 EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
1753 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1755 if (atomic_dec_and_test(&bo->cpu_writers))
1756 wake_up_all(&bo->event_queue);
1758 EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
1761 * A buffer object shrink method that tries to swap out the first
1762 * buffer object on the bo_global::swap_lru list.
1765 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1767 struct ttm_bo_global *glob =
1768 container_of(shrink, struct ttm_bo_global, shrink);
1769 struct ttm_buffer_object *bo;
1772 uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1774 spin_lock(&glob->lru_lock);
1775 while (ret == -EBUSY) {
1776 if (unlikely(list_empty(&glob->swap_lru))) {
1777 spin_unlock(&glob->lru_lock);
1781 bo = list_first_entry(&glob->swap_lru,
1782 struct ttm_buffer_object, swap);
1783 kref_get(&bo->list_kref);
1785 if (!list_empty(&bo->ddestroy)) {
1786 spin_unlock(&glob->lru_lock);
1787 (void) ttm_bo_cleanup_refs(bo, false, false, false);
1788 kref_put(&bo->list_kref, ttm_bo_release_list);
1793 * Reserve buffer. Since we unlock while sleeping, we need
1794 * to re-check that nobody removed us from the swap-list while
1798 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
1799 if (unlikely(ret == -EBUSY)) {
1800 spin_unlock(&glob->lru_lock);
1801 ttm_bo_wait_unreserved(bo, false);
1802 kref_put(&bo->list_kref, ttm_bo_release_list);
1803 spin_lock(&glob->lru_lock);
1808 put_count = ttm_bo_del_from_lru(bo);
1809 spin_unlock(&glob->lru_lock);
1812 kref_put(&bo->list_kref, ttm_bo_ref_bug);
1815 * Wait for GPU, then move to system cached.
1818 spin_lock(&bo->lock);
1819 ret = ttm_bo_wait(bo, false, false, false);
1820 spin_unlock(&bo->lock);
1822 if (unlikely(ret != 0))
1825 if ((bo->mem.placement & swap_placement) != swap_placement) {
1826 struct ttm_mem_reg evict_mem;
1828 evict_mem = bo->mem;
1829 evict_mem.mm_node = NULL;
1830 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1831 evict_mem.mem_type = TTM_PL_SYSTEM;
1833 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1834 false, false, false);
1835 if (unlikely(ret != 0))
1839 ttm_bo_unmap_virtual(bo);
1842 * Swap out. Buffer will be swapped in again as soon as
1843 * anyone tries to access a ttm page.
1846 if (bo->bdev->driver->swap_notify)
1847 bo->bdev->driver->swap_notify(bo);
1849 ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
1854 * Unreserve without putting on LRU to avoid swapping out an
1855 * already swapped buffer.
1858 atomic_set(&bo->reserved, 0);
1859 wake_up_all(&bo->event_queue);
1860 kref_put(&bo->list_kref, ttm_bo_release_list);
1864 void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1866 while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
1869 EXPORT_SYMBOL(ttm_bo_swapout_all);