2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/slab.h>
35 #include <linux/swap.h>
36 #include <linux/pci.h>
37 #include <linux/intel-gtt.h>
39 static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
40 static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
41 static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
42 static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
43 static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
45 static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
48 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
49 static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
50 static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
52 static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
53 static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
54 struct drm_i915_gem_pwrite *args,
55 struct drm_file *file_priv);
56 static void i915_gem_free_object_tail(struct drm_gem_object *obj);
58 static LIST_HEAD(shrink_list);
59 static DEFINE_SPINLOCK(shrink_list_lock);
62 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
64 return obj_priv->gtt_space &&
66 obj_priv->pin_count == 0;
69 int i915_gem_do_init(struct drm_device *dev, unsigned long start,
72 drm_i915_private_t *dev_priv = dev->dev_private;
75 (start & (PAGE_SIZE - 1)) != 0 ||
76 (end & (PAGE_SIZE - 1)) != 0) {
80 drm_mm_init(&dev_priv->mm.gtt_space, start,
83 dev->gtt_total = (uint32_t) (end - start);
89 i915_gem_init_ioctl(struct drm_device *dev, void *data,
90 struct drm_file *file_priv)
92 struct drm_i915_gem_init *args = data;
95 mutex_lock(&dev->struct_mutex);
96 ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
97 mutex_unlock(&dev->struct_mutex);
103 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
104 struct drm_file *file_priv)
106 struct drm_i915_gem_get_aperture *args = data;
108 if (!(dev->driver->driver_features & DRIVER_GEM))
111 args->aper_size = dev->gtt_total;
112 args->aper_available_size = (args->aper_size -
113 atomic_read(&dev->pin_memory));
120 * Creates a new mm object and returns a handle to it.
123 i915_gem_create_ioctl(struct drm_device *dev, void *data,
124 struct drm_file *file_priv)
126 struct drm_i915_gem_create *args = data;
127 struct drm_gem_object *obj;
131 args->size = roundup(args->size, PAGE_SIZE);
133 /* Allocate the new object */
134 obj = i915_gem_alloc_object(dev, args->size);
138 ret = drm_gem_handle_create(file_priv, obj, &handle);
140 drm_gem_object_unreference_unlocked(obj);
144 /* Sink the floating reference from kref_init(handlecount) */
145 drm_gem_object_handle_unreference_unlocked(obj);
147 args->handle = handle;
152 fast_shmem_read(struct page **pages,
153 loff_t page_base, int page_offset,
160 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
163 unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
164 kunmap_atomic(vaddr, KM_USER0);
172 static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
174 drm_i915_private_t *dev_priv = obj->dev->dev_private;
175 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
177 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
178 obj_priv->tiling_mode != I915_TILING_NONE;
182 slow_shmem_copy(struct page *dst_page,
184 struct page *src_page,
188 char *dst_vaddr, *src_vaddr;
190 dst_vaddr = kmap(dst_page);
191 src_vaddr = kmap(src_page);
193 memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
200 slow_shmem_bit17_copy(struct page *gpu_page,
202 struct page *cpu_page,
207 char *gpu_vaddr, *cpu_vaddr;
209 /* Use the unswizzled path if this page isn't affected. */
210 if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
212 return slow_shmem_copy(cpu_page, cpu_offset,
213 gpu_page, gpu_offset, length);
215 return slow_shmem_copy(gpu_page, gpu_offset,
216 cpu_page, cpu_offset, length);
219 gpu_vaddr = kmap(gpu_page);
220 cpu_vaddr = kmap(cpu_page);
222 /* Copy the data, XORing A6 with A17 (1). The user already knows he's
223 * XORing with the other bits (A9 for Y, A9 and A10 for X)
226 int cacheline_end = ALIGN(gpu_offset + 1, 64);
227 int this_length = min(cacheline_end - gpu_offset, length);
228 int swizzled_gpu_offset = gpu_offset ^ 64;
231 memcpy(cpu_vaddr + cpu_offset,
232 gpu_vaddr + swizzled_gpu_offset,
235 memcpy(gpu_vaddr + swizzled_gpu_offset,
236 cpu_vaddr + cpu_offset,
239 cpu_offset += this_length;
240 gpu_offset += this_length;
241 length -= this_length;
249 * This is the fast shmem pread path, which attempts to copy_from_user directly
250 * from the backing pages of the object to the user's address space. On a
251 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
254 i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
255 struct drm_i915_gem_pread *args,
256 struct drm_file *file_priv)
258 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
260 loff_t offset, page_base;
261 char __user *user_data;
262 int page_offset, page_length;
265 user_data = (char __user *) (uintptr_t) args->data_ptr;
268 mutex_lock(&dev->struct_mutex);
270 ret = i915_gem_object_get_pages(obj, 0);
274 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
279 obj_priv = to_intel_bo(obj);
280 offset = args->offset;
283 /* Operation in this page
285 * page_base = page offset within aperture
286 * page_offset = offset within page
287 * page_length = bytes to copy for this page
289 page_base = (offset & ~(PAGE_SIZE-1));
290 page_offset = offset & (PAGE_SIZE-1);
291 page_length = remain;
292 if ((page_offset + remain) > PAGE_SIZE)
293 page_length = PAGE_SIZE - page_offset;
295 ret = fast_shmem_read(obj_priv->pages,
296 page_base, page_offset,
297 user_data, page_length);
301 remain -= page_length;
302 user_data += page_length;
303 offset += page_length;
307 i915_gem_object_put_pages(obj);
309 mutex_unlock(&dev->struct_mutex);
315 i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
319 ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
321 /* If we've insufficient memory to map in the pages, attempt
322 * to make some space by throwing out some old buffers.
324 if (ret == -ENOMEM) {
325 struct drm_device *dev = obj->dev;
327 ret = i915_gem_evict_something(dev, obj->size,
328 i915_gem_get_gtt_alignment(obj));
332 ret = i915_gem_object_get_pages(obj, 0);
339 * This is the fallback shmem pread path, which allocates temporary storage
340 * in kernel space to copy_to_user into outside of the struct_mutex, so we
341 * can copy out of the object's backing pages while holding the struct mutex
342 * and not take page faults.
345 i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
346 struct drm_i915_gem_pread *args,
347 struct drm_file *file_priv)
349 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
350 struct mm_struct *mm = current->mm;
351 struct page **user_pages;
353 loff_t offset, pinned_pages, i;
354 loff_t first_data_page, last_data_page, num_pages;
355 int shmem_page_index, shmem_page_offset;
356 int data_page_index, data_page_offset;
359 uint64_t data_ptr = args->data_ptr;
360 int do_bit17_swizzling;
364 /* Pin the user pages containing the data. We can't fault while
365 * holding the struct mutex, yet we want to hold it while
366 * dereferencing the user data.
368 first_data_page = data_ptr / PAGE_SIZE;
369 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
370 num_pages = last_data_page - first_data_page + 1;
372 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
373 if (user_pages == NULL)
376 down_read(&mm->mmap_sem);
377 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
378 num_pages, 1, 0, user_pages, NULL);
379 up_read(&mm->mmap_sem);
380 if (pinned_pages < num_pages) {
382 goto fail_put_user_pages;
385 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
387 mutex_lock(&dev->struct_mutex);
389 ret = i915_gem_object_get_pages_or_evict(obj);
393 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
398 obj_priv = to_intel_bo(obj);
399 offset = args->offset;
402 /* Operation in this page
404 * shmem_page_index = page number within shmem file
405 * shmem_page_offset = offset within page in shmem file
406 * data_page_index = page number in get_user_pages return
407 * data_page_offset = offset with data_page_index page.
408 * page_length = bytes to copy for this page
410 shmem_page_index = offset / PAGE_SIZE;
411 shmem_page_offset = offset & ~PAGE_MASK;
412 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
413 data_page_offset = data_ptr & ~PAGE_MASK;
415 page_length = remain;
416 if ((shmem_page_offset + page_length) > PAGE_SIZE)
417 page_length = PAGE_SIZE - shmem_page_offset;
418 if ((data_page_offset + page_length) > PAGE_SIZE)
419 page_length = PAGE_SIZE - data_page_offset;
421 if (do_bit17_swizzling) {
422 slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
424 user_pages[data_page_index],
429 slow_shmem_copy(user_pages[data_page_index],
431 obj_priv->pages[shmem_page_index],
436 remain -= page_length;
437 data_ptr += page_length;
438 offset += page_length;
442 i915_gem_object_put_pages(obj);
444 mutex_unlock(&dev->struct_mutex);
446 for (i = 0; i < pinned_pages; i++) {
447 SetPageDirty(user_pages[i]);
448 page_cache_release(user_pages[i]);
450 drm_free_large(user_pages);
456 * Reads data from the object referenced by handle.
458 * On error, the contents of *data are undefined.
461 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
462 struct drm_file *file_priv)
464 struct drm_i915_gem_pread *args = data;
465 struct drm_gem_object *obj;
466 struct drm_i915_gem_object *obj_priv;
469 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
472 obj_priv = to_intel_bo(obj);
474 /* Bounds check source.
476 * XXX: This could use review for overflow issues...
478 if (args->offset > obj->size || args->size > obj->size ||
479 args->offset + args->size > obj->size) {
484 if (!access_ok(VERIFY_WRITE,
485 (char __user *)(uintptr_t)args->data_ptr,
491 if (i915_gem_object_needs_bit17_swizzle(obj)) {
492 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
494 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
496 ret = i915_gem_shmem_pread_slow(dev, obj, args,
501 drm_gem_object_unreference_unlocked(obj);
505 /* This is the fast write path which cannot handle
506 * page faults in the source data
510 fast_user_write(struct io_mapping *mapping,
511 loff_t page_base, int page_offset,
512 char __user *user_data,
516 unsigned long unwritten;
518 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base, KM_USER0);
519 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
521 io_mapping_unmap_atomic(vaddr_atomic, KM_USER0);
527 /* Here's the write path which can sleep for
532 slow_kernel_write(struct io_mapping *mapping,
533 loff_t gtt_base, int gtt_offset,
534 struct page *user_page, int user_offset,
537 char __iomem *dst_vaddr;
540 dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
541 src_vaddr = kmap(user_page);
543 memcpy_toio(dst_vaddr + gtt_offset,
544 src_vaddr + user_offset,
548 io_mapping_unmap(dst_vaddr);
552 fast_shmem_write(struct page **pages,
553 loff_t page_base, int page_offset,
558 unsigned long unwritten;
560 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
563 unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
564 kunmap_atomic(vaddr, KM_USER0);
572 * This is the fast pwrite path, where we copy the data directly from the
573 * user into the GTT, uncached.
576 i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
577 struct drm_i915_gem_pwrite *args,
578 struct drm_file *file_priv)
580 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
581 drm_i915_private_t *dev_priv = dev->dev_private;
583 loff_t offset, page_base;
584 char __user *user_data;
585 int page_offset, page_length;
588 user_data = (char __user *) (uintptr_t) args->data_ptr;
592 mutex_lock(&dev->struct_mutex);
593 ret = i915_gem_object_pin(obj, 0);
595 mutex_unlock(&dev->struct_mutex);
598 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
602 obj_priv = to_intel_bo(obj);
603 offset = obj_priv->gtt_offset + args->offset;
606 /* Operation in this page
608 * page_base = page offset within aperture
609 * page_offset = offset within page
610 * page_length = bytes to copy for this page
612 page_base = (offset & ~(PAGE_SIZE-1));
613 page_offset = offset & (PAGE_SIZE-1);
614 page_length = remain;
615 if ((page_offset + remain) > PAGE_SIZE)
616 page_length = PAGE_SIZE - page_offset;
618 ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
619 page_offset, user_data, page_length);
621 /* If we get a fault while copying data, then (presumably) our
622 * source page isn't available. Return the error and we'll
623 * retry in the slow path.
628 remain -= page_length;
629 user_data += page_length;
630 offset += page_length;
634 i915_gem_object_unpin(obj);
635 mutex_unlock(&dev->struct_mutex);
641 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
642 * the memory and maps it using kmap_atomic for copying.
644 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
645 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
648 i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
649 struct drm_i915_gem_pwrite *args,
650 struct drm_file *file_priv)
652 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
653 drm_i915_private_t *dev_priv = dev->dev_private;
655 loff_t gtt_page_base, offset;
656 loff_t first_data_page, last_data_page, num_pages;
657 loff_t pinned_pages, i;
658 struct page **user_pages;
659 struct mm_struct *mm = current->mm;
660 int gtt_page_offset, data_page_offset, data_page_index, page_length;
662 uint64_t data_ptr = args->data_ptr;
666 /* Pin the user pages containing the data. We can't fault while
667 * holding the struct mutex, and all of the pwrite implementations
668 * want to hold it while dereferencing the user data.
670 first_data_page = data_ptr / PAGE_SIZE;
671 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
672 num_pages = last_data_page - first_data_page + 1;
674 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
675 if (user_pages == NULL)
678 down_read(&mm->mmap_sem);
679 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
680 num_pages, 0, 0, user_pages, NULL);
681 up_read(&mm->mmap_sem);
682 if (pinned_pages < num_pages) {
684 goto out_unpin_pages;
687 mutex_lock(&dev->struct_mutex);
688 ret = i915_gem_object_pin(obj, 0);
692 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
694 goto out_unpin_object;
696 obj_priv = to_intel_bo(obj);
697 offset = obj_priv->gtt_offset + args->offset;
700 /* Operation in this page
702 * gtt_page_base = page offset within aperture
703 * gtt_page_offset = offset within page in aperture
704 * data_page_index = page number in get_user_pages return
705 * data_page_offset = offset with data_page_index page.
706 * page_length = bytes to copy for this page
708 gtt_page_base = offset & PAGE_MASK;
709 gtt_page_offset = offset & ~PAGE_MASK;
710 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
711 data_page_offset = data_ptr & ~PAGE_MASK;
713 page_length = remain;
714 if ((gtt_page_offset + page_length) > PAGE_SIZE)
715 page_length = PAGE_SIZE - gtt_page_offset;
716 if ((data_page_offset + page_length) > PAGE_SIZE)
717 page_length = PAGE_SIZE - data_page_offset;
719 slow_kernel_write(dev_priv->mm.gtt_mapping,
720 gtt_page_base, gtt_page_offset,
721 user_pages[data_page_index],
725 remain -= page_length;
726 offset += page_length;
727 data_ptr += page_length;
731 i915_gem_object_unpin(obj);
733 mutex_unlock(&dev->struct_mutex);
735 for (i = 0; i < pinned_pages; i++)
736 page_cache_release(user_pages[i]);
737 drm_free_large(user_pages);
743 * This is the fast shmem pwrite path, which attempts to directly
744 * copy_from_user into the kmapped pages backing the object.
747 i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
748 struct drm_i915_gem_pwrite *args,
749 struct drm_file *file_priv)
751 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
753 loff_t offset, page_base;
754 char __user *user_data;
755 int page_offset, page_length;
758 user_data = (char __user *) (uintptr_t) args->data_ptr;
761 mutex_lock(&dev->struct_mutex);
763 ret = i915_gem_object_get_pages(obj, 0);
767 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
771 obj_priv = to_intel_bo(obj);
772 offset = args->offset;
776 /* Operation in this page
778 * page_base = page offset within aperture
779 * page_offset = offset within page
780 * page_length = bytes to copy for this page
782 page_base = (offset & ~(PAGE_SIZE-1));
783 page_offset = offset & (PAGE_SIZE-1);
784 page_length = remain;
785 if ((page_offset + remain) > PAGE_SIZE)
786 page_length = PAGE_SIZE - page_offset;
788 ret = fast_shmem_write(obj_priv->pages,
789 page_base, page_offset,
790 user_data, page_length);
794 remain -= page_length;
795 user_data += page_length;
796 offset += page_length;
800 i915_gem_object_put_pages(obj);
802 mutex_unlock(&dev->struct_mutex);
808 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
809 * the memory and maps it using kmap_atomic for copying.
811 * This avoids taking mmap_sem for faulting on the user's address while the
812 * struct_mutex is held.
815 i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
816 struct drm_i915_gem_pwrite *args,
817 struct drm_file *file_priv)
819 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
820 struct mm_struct *mm = current->mm;
821 struct page **user_pages;
823 loff_t offset, pinned_pages, i;
824 loff_t first_data_page, last_data_page, num_pages;
825 int shmem_page_index, shmem_page_offset;
826 int data_page_index, data_page_offset;
829 uint64_t data_ptr = args->data_ptr;
830 int do_bit17_swizzling;
834 /* Pin the user pages containing the data. We can't fault while
835 * holding the struct mutex, and all of the pwrite implementations
836 * want to hold it while dereferencing the user data.
838 first_data_page = data_ptr / PAGE_SIZE;
839 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
840 num_pages = last_data_page - first_data_page + 1;
842 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
843 if (user_pages == NULL)
846 down_read(&mm->mmap_sem);
847 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
848 num_pages, 0, 0, user_pages, NULL);
849 up_read(&mm->mmap_sem);
850 if (pinned_pages < num_pages) {
852 goto fail_put_user_pages;
855 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
857 mutex_lock(&dev->struct_mutex);
859 ret = i915_gem_object_get_pages_or_evict(obj);
863 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
867 obj_priv = to_intel_bo(obj);
868 offset = args->offset;
872 /* Operation in this page
874 * shmem_page_index = page number within shmem file
875 * shmem_page_offset = offset within page in shmem file
876 * data_page_index = page number in get_user_pages return
877 * data_page_offset = offset with data_page_index page.
878 * page_length = bytes to copy for this page
880 shmem_page_index = offset / PAGE_SIZE;
881 shmem_page_offset = offset & ~PAGE_MASK;
882 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
883 data_page_offset = data_ptr & ~PAGE_MASK;
885 page_length = remain;
886 if ((shmem_page_offset + page_length) > PAGE_SIZE)
887 page_length = PAGE_SIZE - shmem_page_offset;
888 if ((data_page_offset + page_length) > PAGE_SIZE)
889 page_length = PAGE_SIZE - data_page_offset;
891 if (do_bit17_swizzling) {
892 slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
894 user_pages[data_page_index],
899 slow_shmem_copy(obj_priv->pages[shmem_page_index],
901 user_pages[data_page_index],
906 remain -= page_length;
907 data_ptr += page_length;
908 offset += page_length;
912 i915_gem_object_put_pages(obj);
914 mutex_unlock(&dev->struct_mutex);
916 for (i = 0; i < pinned_pages; i++)
917 page_cache_release(user_pages[i]);
918 drm_free_large(user_pages);
924 * Writes data to the object referenced by handle.
926 * On error, the contents of the buffer that were to be modified are undefined.
929 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
930 struct drm_file *file_priv)
932 struct drm_i915_gem_pwrite *args = data;
933 struct drm_gem_object *obj;
934 struct drm_i915_gem_object *obj_priv;
937 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
940 obj_priv = to_intel_bo(obj);
942 /* Bounds check destination.
944 * XXX: This could use review for overflow issues...
946 if (args->offset > obj->size || args->size > obj->size ||
947 args->offset + args->size > obj->size) {
952 if (!access_ok(VERIFY_READ,
953 (char __user *)(uintptr_t)args->data_ptr,
959 /* We can only do the GTT pwrite on untiled buffers, as otherwise
960 * it would end up going through the fenced access, and we'll get
961 * different detiling behavior between reading and writing.
962 * pread/pwrite currently are reading and writing from the CPU
963 * perspective, requiring manual detiling by the client.
965 if (obj_priv->phys_obj)
966 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
967 else if (obj_priv->tiling_mode == I915_TILING_NONE &&
968 dev->gtt_total != 0 &&
969 obj->write_domain != I915_GEM_DOMAIN_CPU) {
970 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
971 if (ret == -EFAULT) {
972 ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
975 } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
976 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
978 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
979 if (ret == -EFAULT) {
980 ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
987 DRM_INFO("pwrite failed %d\n", ret);
991 drm_gem_object_unreference_unlocked(obj);
996 * Called when user space prepares to use an object with the CPU, either
997 * through the mmap ioctl's mapping or a GTT mapping.
1000 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1001 struct drm_file *file_priv)
1003 struct drm_i915_private *dev_priv = dev->dev_private;
1004 struct drm_i915_gem_set_domain *args = data;
1005 struct drm_gem_object *obj;
1006 struct drm_i915_gem_object *obj_priv;
1007 uint32_t read_domains = args->read_domains;
1008 uint32_t write_domain = args->write_domain;
1011 if (!(dev->driver->driver_features & DRIVER_GEM))
1014 /* Only handle setting domains to types used by the CPU. */
1015 if (write_domain & I915_GEM_GPU_DOMAINS)
1018 if (read_domains & I915_GEM_GPU_DOMAINS)
1021 /* Having something in the write domain implies it's in the read
1022 * domain, and only that read domain. Enforce that in the request.
1024 if (write_domain != 0 && read_domains != write_domain)
1027 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1030 obj_priv = to_intel_bo(obj);
1032 mutex_lock(&dev->struct_mutex);
1034 intel_mark_busy(dev, obj);
1037 DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
1038 obj, obj->size, read_domains, write_domain);
1040 if (read_domains & I915_GEM_DOMAIN_GTT) {
1041 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1043 /* Update the LRU on the fence for the CPU access that's
1046 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
1047 struct drm_i915_fence_reg *reg =
1048 &dev_priv->fence_regs[obj_priv->fence_reg];
1049 list_move_tail(®->lru_list,
1050 &dev_priv->mm.fence_list);
1053 /* Silently promote "you're not bound, there was nothing to do"
1054 * to success, since the client was just asking us to
1055 * make sure everything was done.
1060 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1064 /* Maintain LRU order of "inactive" objects */
1065 if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
1066 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1068 drm_gem_object_unreference(obj);
1069 mutex_unlock(&dev->struct_mutex);
1074 * Called when user space has done writes to this buffer
1077 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1078 struct drm_file *file_priv)
1080 struct drm_i915_gem_sw_finish *args = data;
1081 struct drm_gem_object *obj;
1082 struct drm_i915_gem_object *obj_priv;
1085 if (!(dev->driver->driver_features & DRIVER_GEM))
1088 mutex_lock(&dev->struct_mutex);
1089 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1091 mutex_unlock(&dev->struct_mutex);
1096 DRM_INFO("%s: sw_finish %d (%p %zd)\n",
1097 __func__, args->handle, obj, obj->size);
1099 obj_priv = to_intel_bo(obj);
1101 /* Pinned buffers may be scanout, so flush the cache */
1102 if (obj_priv->pin_count)
1103 i915_gem_object_flush_cpu_write_domain(obj);
1105 drm_gem_object_unreference(obj);
1106 mutex_unlock(&dev->struct_mutex);
1111 * Maps the contents of an object, returning the address it is mapped
1114 * While the mapping holds a reference on the contents of the object, it doesn't
1115 * imply a ref on the object itself.
1118 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1119 struct drm_file *file_priv)
1121 struct drm_i915_gem_mmap *args = data;
1122 struct drm_gem_object *obj;
1126 if (!(dev->driver->driver_features & DRIVER_GEM))
1129 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1133 offset = args->offset;
1135 down_write(¤t->mm->mmap_sem);
1136 addr = do_mmap(obj->filp, 0, args->size,
1137 PROT_READ | PROT_WRITE, MAP_SHARED,
1139 up_write(¤t->mm->mmap_sem);
1140 drm_gem_object_unreference_unlocked(obj);
1141 if (IS_ERR((void *)addr))
1144 args->addr_ptr = (uint64_t) addr;
1150 * i915_gem_fault - fault a page into the GTT
1151 * vma: VMA in question
1154 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1155 * from userspace. The fault handler takes care of binding the object to
1156 * the GTT (if needed), allocating and programming a fence register (again,
1157 * only if needed based on whether the old reg is still valid or the object
1158 * is tiled) and inserting a new PTE into the faulting process.
1160 * Note that the faulting process may involve evicting existing objects
1161 * from the GTT and/or fence registers to make room. So performance may
1162 * suffer if the GTT working set is large or there are few fence registers
1165 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1167 struct drm_gem_object *obj = vma->vm_private_data;
1168 struct drm_device *dev = obj->dev;
1169 drm_i915_private_t *dev_priv = dev->dev_private;
1170 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1171 pgoff_t page_offset;
1174 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1176 /* We don't use vmf->pgoff since that has the fake offset */
1177 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1180 /* Now bind it into the GTT if needed */
1181 mutex_lock(&dev->struct_mutex);
1182 if (!obj_priv->gtt_space) {
1183 ret = i915_gem_object_bind_to_gtt(obj, 0);
1187 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1192 /* Need a new fence register? */
1193 if (obj_priv->tiling_mode != I915_TILING_NONE) {
1194 ret = i915_gem_object_get_fence_reg(obj);
1199 if (i915_gem_object_is_inactive(obj_priv))
1200 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1202 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
1205 /* Finally, remap it using the new GTT offset */
1206 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1208 mutex_unlock(&dev->struct_mutex);
1213 return VM_FAULT_NOPAGE;
1216 return VM_FAULT_OOM;
1218 return VM_FAULT_SIGBUS;
1223 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1224 * @obj: obj in question
1226 * GEM memory mapping works by handing back to userspace a fake mmap offset
1227 * it can use in a subsequent mmap(2) call. The DRM core code then looks
1228 * up the object based on the offset and sets up the various memory mapping
1231 * This routine allocates and attaches a fake offset for @obj.
1234 i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1236 struct drm_device *dev = obj->dev;
1237 struct drm_gem_mm *mm = dev->mm_private;
1238 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1239 struct drm_map_list *list;
1240 struct drm_local_map *map;
1243 /* Set the object up for mmap'ing */
1244 list = &obj->map_list;
1245 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
1250 map->type = _DRM_GEM;
1251 map->size = obj->size;
1254 /* Get a DRM GEM mmap offset allocated... */
1255 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
1256 obj->size / PAGE_SIZE, 0, 0);
1257 if (!list->file_offset_node) {
1258 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
1263 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
1264 obj->size / PAGE_SIZE, 0);
1265 if (!list->file_offset_node) {
1270 list->hash.key = list->file_offset_node->start;
1271 if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
1272 DRM_ERROR("failed to add to map hash\n");
1277 /* By now we should be all set, any drm_mmap request on the offset
1278 * below will get to our mmap & fault handler */
1279 obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
1284 drm_mm_put_block(list->file_offset_node);
1292 * i915_gem_release_mmap - remove physical page mappings
1293 * @obj: obj in question
1295 * Preserve the reservation of the mmapping with the DRM core code, but
1296 * relinquish ownership of the pages back to the system.
1298 * It is vital that we remove the page mapping if we have mapped a tiled
1299 * object through the GTT and then lose the fence register due to
1300 * resource pressure. Similarly if the object has been moved out of the
1301 * aperture, than pages mapped into userspace must be revoked. Removing the
1302 * mapping will then trigger a page fault on the next user access, allowing
1303 * fixup by i915_gem_fault().
1306 i915_gem_release_mmap(struct drm_gem_object *obj)
1308 struct drm_device *dev = obj->dev;
1309 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1311 if (dev->dev_mapping)
1312 unmap_mapping_range(dev->dev_mapping,
1313 obj_priv->mmap_offset, obj->size, 1);
1317 i915_gem_free_mmap_offset(struct drm_gem_object *obj)
1319 struct drm_device *dev = obj->dev;
1320 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1321 struct drm_gem_mm *mm = dev->mm_private;
1322 struct drm_map_list *list;
1324 list = &obj->map_list;
1325 drm_ht_remove_item(&mm->offset_hash, &list->hash);
1327 if (list->file_offset_node) {
1328 drm_mm_put_block(list->file_offset_node);
1329 list->file_offset_node = NULL;
1337 obj_priv->mmap_offset = 0;
1341 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1342 * @obj: object to check
1344 * Return the required GTT alignment for an object, taking into account
1345 * potential fence register mapping if needed.
1348 i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
1350 struct drm_device *dev = obj->dev;
1351 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1355 * Minimum alignment is 4k (GTT page size), but might be greater
1356 * if a fence register is needed for the object.
1358 if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
1362 * Previous chips need to be aligned to the size of the smallest
1363 * fence register that can contain the object.
1370 for (i = start; i < obj->size; i <<= 1)
1377 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1379 * @data: GTT mapping ioctl data
1380 * @file_priv: GEM object info
1382 * Simply returns the fake offset to userspace so it can mmap it.
1383 * The mmap call will end up in drm_gem_mmap(), which will set things
1384 * up so we can get faults in the handler above.
1386 * The fault handler will take care of binding the object into the GTT
1387 * (since it may have been evicted to make room for something), allocating
1388 * a fence register, and mapping the appropriate aperture address into
1392 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1393 struct drm_file *file_priv)
1395 struct drm_i915_gem_mmap_gtt *args = data;
1396 struct drm_gem_object *obj;
1397 struct drm_i915_gem_object *obj_priv;
1400 if (!(dev->driver->driver_features & DRIVER_GEM))
1403 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1407 mutex_lock(&dev->struct_mutex);
1409 obj_priv = to_intel_bo(obj);
1411 if (obj_priv->madv != I915_MADV_WILLNEED) {
1412 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1413 drm_gem_object_unreference(obj);
1414 mutex_unlock(&dev->struct_mutex);
1419 if (!obj_priv->mmap_offset) {
1420 ret = i915_gem_create_mmap_offset(obj);
1422 drm_gem_object_unreference(obj);
1423 mutex_unlock(&dev->struct_mutex);
1428 args->offset = obj_priv->mmap_offset;
1431 * Pull it into the GTT so that we have a page list (makes the
1432 * initial fault faster and any subsequent flushing possible).
1434 if (!obj_priv->agp_mem) {
1435 ret = i915_gem_object_bind_to_gtt(obj, 0);
1437 drm_gem_object_unreference(obj);
1438 mutex_unlock(&dev->struct_mutex);
1443 drm_gem_object_unreference(obj);
1444 mutex_unlock(&dev->struct_mutex);
1450 i915_gem_object_put_pages(struct drm_gem_object *obj)
1452 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1453 int page_count = obj->size / PAGE_SIZE;
1456 BUG_ON(obj_priv->pages_refcount == 0);
1457 BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
1459 if (--obj_priv->pages_refcount != 0)
1462 if (obj_priv->tiling_mode != I915_TILING_NONE)
1463 i915_gem_object_save_bit_17_swizzle(obj);
1465 if (obj_priv->madv == I915_MADV_DONTNEED)
1466 obj_priv->dirty = 0;
1468 for (i = 0; i < page_count; i++) {
1469 if (obj_priv->dirty)
1470 set_page_dirty(obj_priv->pages[i]);
1472 if (obj_priv->madv == I915_MADV_WILLNEED)
1473 mark_page_accessed(obj_priv->pages[i]);
1475 page_cache_release(obj_priv->pages[i]);
1477 obj_priv->dirty = 0;
1479 drm_free_large(obj_priv->pages);
1480 obj_priv->pages = NULL;
1484 i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno,
1485 struct intel_ring_buffer *ring)
1487 struct drm_device *dev = obj->dev;
1488 drm_i915_private_t *dev_priv = dev->dev_private;
1489 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1490 BUG_ON(ring == NULL);
1491 obj_priv->ring = ring;
1493 /* Add a reference if we're newly entering the active list. */
1494 if (!obj_priv->active) {
1495 drm_gem_object_reference(obj);
1496 obj_priv->active = 1;
1498 /* Move from whatever list we were on to the tail of execution. */
1499 spin_lock(&dev_priv->mm.active_list_lock);
1500 list_move_tail(&obj_priv->list, &ring->active_list);
1501 spin_unlock(&dev_priv->mm.active_list_lock);
1502 obj_priv->last_rendering_seqno = seqno;
1506 i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1508 struct drm_device *dev = obj->dev;
1509 drm_i915_private_t *dev_priv = dev->dev_private;
1510 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1512 BUG_ON(!obj_priv->active);
1513 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
1514 obj_priv->last_rendering_seqno = 0;
1517 /* Immediately discard the backing storage */
1519 i915_gem_object_truncate(struct drm_gem_object *obj)
1521 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1522 struct inode *inode;
1524 /* Our goal here is to return as much of the memory as
1525 * is possible back to the system as we are called from OOM.
1526 * To do this we must instruct the shmfs to drop all of its
1527 * backing pages, *now*. Here we mirror the actions taken
1528 * when by shmem_delete_inode() to release the backing store.
1530 inode = obj->filp->f_path.dentry->d_inode;
1531 truncate_inode_pages(inode->i_mapping, 0);
1532 if (inode->i_op->truncate_range)
1533 inode->i_op->truncate_range(inode, 0, (loff_t)-1);
1535 obj_priv->madv = __I915_MADV_PURGED;
1539 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
1541 return obj_priv->madv == I915_MADV_DONTNEED;
1545 i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1547 struct drm_device *dev = obj->dev;
1548 drm_i915_private_t *dev_priv = dev->dev_private;
1549 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1551 i915_verify_inactive(dev, __FILE__, __LINE__);
1552 if (obj_priv->pin_count != 0)
1553 list_del_init(&obj_priv->list);
1555 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1557 BUG_ON(!list_empty(&obj_priv->gpu_write_list));
1559 obj_priv->last_rendering_seqno = 0;
1560 obj_priv->ring = NULL;
1561 if (obj_priv->active) {
1562 obj_priv->active = 0;
1563 drm_gem_object_unreference(obj);
1565 i915_verify_inactive(dev, __FILE__, __LINE__);
1569 i915_gem_process_flushing_list(struct drm_device *dev,
1570 uint32_t flush_domains, uint32_t seqno,
1571 struct intel_ring_buffer *ring)
1573 drm_i915_private_t *dev_priv = dev->dev_private;
1574 struct drm_i915_gem_object *obj_priv, *next;
1576 list_for_each_entry_safe(obj_priv, next,
1577 &dev_priv->mm.gpu_write_list,
1579 struct drm_gem_object *obj = &obj_priv->base;
1581 if ((obj->write_domain & flush_domains) ==
1582 obj->write_domain &&
1583 obj_priv->ring->ring_flag == ring->ring_flag) {
1584 uint32_t old_write_domain = obj->write_domain;
1586 obj->write_domain = 0;
1587 list_del_init(&obj_priv->gpu_write_list);
1588 i915_gem_object_move_to_active(obj, seqno, ring);
1590 /* update the fence lru list */
1591 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
1592 struct drm_i915_fence_reg *reg =
1593 &dev_priv->fence_regs[obj_priv->fence_reg];
1594 list_move_tail(®->lru_list,
1595 &dev_priv->mm.fence_list);
1598 trace_i915_gem_object_change_domain(obj,
1606 i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1607 uint32_t flush_domains, struct intel_ring_buffer *ring)
1609 drm_i915_private_t *dev_priv = dev->dev_private;
1610 struct drm_i915_file_private *i915_file_priv = NULL;
1611 struct drm_i915_gem_request *request;
1615 if (file_priv != NULL)
1616 i915_file_priv = file_priv->driver_priv;
1618 request = kzalloc(sizeof(*request), GFP_KERNEL);
1619 if (request == NULL)
1622 seqno = ring->add_request(dev, ring, file_priv, flush_domains);
1624 request->seqno = seqno;
1625 request->ring = ring;
1626 request->emitted_jiffies = jiffies;
1627 was_empty = list_empty(&ring->request_list);
1628 list_add_tail(&request->list, &ring->request_list);
1630 if (i915_file_priv) {
1631 list_add_tail(&request->client_list,
1632 &i915_file_priv->mm.request_list);
1634 INIT_LIST_HEAD(&request->client_list);
1637 /* Associate any objects on the flushing list matching the write
1638 * domain we're flushing with our flush.
1640 if (flush_domains != 0)
1641 i915_gem_process_flushing_list(dev, flush_domains, seqno, ring);
1643 if (!dev_priv->mm.suspended) {
1644 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
1646 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1652 * Command execution barrier
1654 * Ensures that all commands in the ring are finished
1655 * before signalling the CPU
1658 i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
1660 uint32_t flush_domains = 0;
1662 /* The sampler always gets flushed on i965 (sigh) */
1664 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
1666 ring->flush(dev, ring,
1667 I915_GEM_DOMAIN_COMMAND, flush_domains);
1668 return flush_domains;
1672 * Moves buffers associated only with the given active seqno from the active
1673 * to inactive list, potentially freeing them.
1676 i915_gem_retire_request(struct drm_device *dev,
1677 struct drm_i915_gem_request *request)
1679 drm_i915_private_t *dev_priv = dev->dev_private;
1681 trace_i915_gem_request_retire(dev, request->seqno);
1683 /* Move any buffers on the active list that are no longer referenced
1684 * by the ringbuffer to the flushing/inactive lists as appropriate.
1686 spin_lock(&dev_priv->mm.active_list_lock);
1687 while (!list_empty(&request->ring->active_list)) {
1688 struct drm_gem_object *obj;
1689 struct drm_i915_gem_object *obj_priv;
1691 obj_priv = list_first_entry(&request->ring->active_list,
1692 struct drm_i915_gem_object,
1694 obj = &obj_priv->base;
1696 /* If the seqno being retired doesn't match the oldest in the
1697 * list, then the oldest in the list must still be newer than
1700 if (obj_priv->last_rendering_seqno != request->seqno)
1704 DRM_INFO("%s: retire %d moves to inactive list %p\n",
1705 __func__, request->seqno, obj);
1708 if (obj->write_domain != 0)
1709 i915_gem_object_move_to_flushing(obj);
1711 /* Take a reference on the object so it won't be
1712 * freed while the spinlock is held. The list
1713 * protection for this spinlock is safe when breaking
1714 * the lock like this since the next thing we do
1715 * is just get the head of the list again.
1717 drm_gem_object_reference(obj);
1718 i915_gem_object_move_to_inactive(obj);
1719 spin_unlock(&dev_priv->mm.active_list_lock);
1720 drm_gem_object_unreference(obj);
1721 spin_lock(&dev_priv->mm.active_list_lock);
1725 spin_unlock(&dev_priv->mm.active_list_lock);
1729 * Returns true if seq1 is later than seq2.
1732 i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1734 return (int32_t)(seq1 - seq2) >= 0;
1738 i915_get_gem_seqno(struct drm_device *dev,
1739 struct intel_ring_buffer *ring)
1741 return ring->get_gem_seqno(dev, ring);
1745 * This function clears the request list as sequence numbers are passed.
1748 i915_gem_retire_requests_ring(struct drm_device *dev,
1749 struct intel_ring_buffer *ring)
1751 drm_i915_private_t *dev_priv = dev->dev_private;
1754 if (!ring->status_page.page_addr
1755 || list_empty(&ring->request_list))
1758 seqno = i915_get_gem_seqno(dev, ring);
1760 while (!list_empty(&ring->request_list)) {
1761 struct drm_i915_gem_request *request;
1762 uint32_t retiring_seqno;
1764 request = list_first_entry(&ring->request_list,
1765 struct drm_i915_gem_request,
1767 retiring_seqno = request->seqno;
1769 if (i915_seqno_passed(seqno, retiring_seqno) ||
1770 atomic_read(&dev_priv->mm.wedged)) {
1771 i915_gem_retire_request(dev, request);
1773 list_del(&request->list);
1774 list_del(&request->client_list);
1780 if (unlikely (dev_priv->trace_irq_seqno &&
1781 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
1783 ring->user_irq_put(dev, ring);
1784 dev_priv->trace_irq_seqno = 0;
1789 i915_gem_retire_requests(struct drm_device *dev)
1791 drm_i915_private_t *dev_priv = dev->dev_private;
1793 if (!list_empty(&dev_priv->mm.deferred_free_list)) {
1794 struct drm_i915_gem_object *obj_priv, *tmp;
1796 /* We must be careful that during unbind() we do not
1797 * accidentally infinitely recurse into retire requests.
1799 * retire -> free -> unbind -> wait -> retire_ring
1801 list_for_each_entry_safe(obj_priv, tmp,
1802 &dev_priv->mm.deferred_free_list,
1804 i915_gem_free_object_tail(&obj_priv->base);
1807 i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
1809 i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
1813 i915_gem_retire_work_handler(struct work_struct *work)
1815 drm_i915_private_t *dev_priv;
1816 struct drm_device *dev;
1818 dev_priv = container_of(work, drm_i915_private_t,
1819 mm.retire_work.work);
1820 dev = dev_priv->dev;
1822 mutex_lock(&dev->struct_mutex);
1823 i915_gem_retire_requests(dev);
1825 if (!dev_priv->mm.suspended &&
1826 (!list_empty(&dev_priv->render_ring.request_list) ||
1828 !list_empty(&dev_priv->bsd_ring.request_list))))
1829 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1830 mutex_unlock(&dev->struct_mutex);
1834 i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
1835 int interruptible, struct intel_ring_buffer *ring)
1837 drm_i915_private_t *dev_priv = dev->dev_private;
1843 if (atomic_read(&dev_priv->mm.wedged))
1846 if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) {
1847 if (HAS_PCH_SPLIT(dev))
1848 ier = I915_READ(DEIER) | I915_READ(GTIER);
1850 ier = I915_READ(IER);
1852 DRM_ERROR("something (likely vbetool) disabled "
1853 "interrupts, re-enabling\n");
1854 i915_driver_irq_preinstall(dev);
1855 i915_driver_irq_postinstall(dev);
1858 trace_i915_gem_request_wait_begin(dev, seqno);
1860 ring->waiting_gem_seqno = seqno;
1861 ring->user_irq_get(dev, ring);
1863 ret = wait_event_interruptible(ring->irq_queue,
1865 ring->get_gem_seqno(dev, ring), seqno)
1866 || atomic_read(&dev_priv->mm.wedged));
1868 wait_event(ring->irq_queue,
1870 ring->get_gem_seqno(dev, ring), seqno)
1871 || atomic_read(&dev_priv->mm.wedged));
1873 ring->user_irq_put(dev, ring);
1874 ring->waiting_gem_seqno = 0;
1876 trace_i915_gem_request_wait_end(dev, seqno);
1878 if (atomic_read(&dev_priv->mm.wedged))
1881 if (ret && ret != -ERESTARTSYS)
1882 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
1883 __func__, ret, seqno, ring->get_gem_seqno(dev, ring));
1885 /* Directly dispatch request retiring. While we have the work queue
1886 * to handle this, the waiter on a request often wants an associated
1887 * buffer to have made it to the inactive list, and we would need
1888 * a separate wait queue to handle that.
1891 i915_gem_retire_requests_ring(dev, ring);
1897 * Waits for a sequence number to be signaled, and cleans up the
1898 * request and object lists appropriately for that event.
1901 i915_wait_request(struct drm_device *dev, uint32_t seqno,
1902 struct intel_ring_buffer *ring)
1904 return i915_do_wait_request(dev, seqno, 1, ring);
1908 i915_gem_flush(struct drm_device *dev,
1909 uint32_t invalidate_domains,
1910 uint32_t flush_domains)
1912 drm_i915_private_t *dev_priv = dev->dev_private;
1913 if (flush_domains & I915_GEM_DOMAIN_CPU)
1914 drm_agp_chipset_flush(dev);
1915 dev_priv->render_ring.flush(dev, &dev_priv->render_ring,
1920 dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring,
1926 * Ensures that all rendering to the object has completed and the object is
1927 * safe to unbind from the GTT or access from the CPU.
1930 i915_gem_object_wait_rendering(struct drm_gem_object *obj)
1932 struct drm_device *dev = obj->dev;
1933 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1936 /* This function only exists to support waiting for existing rendering,
1937 * not for emitting required flushes.
1939 BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
1941 /* If there is rendering queued on the buffer being evicted, wait for
1944 if (obj_priv->active) {
1946 DRM_INFO("%s: object %p wait for seqno %08x\n",
1947 __func__, obj, obj_priv->last_rendering_seqno);
1949 ret = i915_wait_request(dev,
1950 obj_priv->last_rendering_seqno, obj_priv->ring);
1959 * Unbinds an object from the GTT aperture.
1962 i915_gem_object_unbind(struct drm_gem_object *obj)
1964 struct drm_device *dev = obj->dev;
1965 drm_i915_private_t *dev_priv = dev->dev_private;
1966 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1970 DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
1971 DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
1973 if (obj_priv->gtt_space == NULL)
1976 if (obj_priv->pin_count != 0) {
1977 DRM_ERROR("Attempting to unbind pinned buffer\n");
1981 /* blow away mappings if mapped through GTT */
1982 i915_gem_release_mmap(obj);
1984 /* Move the object to the CPU domain to ensure that
1985 * any possible CPU writes while it's not in the GTT
1986 * are flushed when we go to remap it. This will
1987 * also ensure that all pending GPU writes are finished
1990 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
1991 if (ret == -ERESTARTSYS)
1993 /* Continue on if we fail due to EIO, the GPU is hung so we
1994 * should be safe and we need to cleanup or else we might
1995 * cause memory corruption through use-after-free.
1998 /* release the fence reg _after_ flushing */
1999 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
2000 i915_gem_clear_fence_reg(obj);
2002 if (obj_priv->agp_mem != NULL) {
2003 drm_unbind_agp(obj_priv->agp_mem);
2004 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
2005 obj_priv->agp_mem = NULL;
2008 i915_gem_object_put_pages(obj);
2009 BUG_ON(obj_priv->pages_refcount);
2011 if (obj_priv->gtt_space) {
2012 atomic_dec(&dev->gtt_count);
2013 atomic_sub(obj->size, &dev->gtt_memory);
2015 drm_mm_put_block(obj_priv->gtt_space);
2016 obj_priv->gtt_space = NULL;
2019 /* Remove ourselves from the LRU list if present. */
2020 spin_lock(&dev_priv->mm.active_list_lock);
2021 if (!list_empty(&obj_priv->list))
2022 list_del_init(&obj_priv->list);
2023 spin_unlock(&dev_priv->mm.active_list_lock);
2025 if (i915_gem_object_is_purgeable(obj_priv))
2026 i915_gem_object_truncate(obj);
2028 trace_i915_gem_object_unbind(obj);
2034 i915_gpu_idle(struct drm_device *dev)
2036 drm_i915_private_t *dev_priv = dev->dev_private;
2038 uint32_t seqno1, seqno2;
2041 spin_lock(&dev_priv->mm.active_list_lock);
2042 lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
2043 list_empty(&dev_priv->render_ring.active_list) &&
2045 list_empty(&dev_priv->bsd_ring.active_list)));
2046 spin_unlock(&dev_priv->mm.active_list_lock);
2051 /* Flush everything onto the inactive list. */
2052 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2053 seqno1 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
2054 &dev_priv->render_ring);
2057 ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring);
2060 seqno2 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
2061 &dev_priv->bsd_ring);
2065 ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring);
2075 i915_gem_object_get_pages(struct drm_gem_object *obj,
2078 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2080 struct address_space *mapping;
2081 struct inode *inode;
2084 BUG_ON(obj_priv->pages_refcount
2085 == DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT);
2087 if (obj_priv->pages_refcount++ != 0)
2090 /* Get the list of pages out of our struct file. They'll be pinned
2091 * at this point until we release them.
2093 page_count = obj->size / PAGE_SIZE;
2094 BUG_ON(obj_priv->pages != NULL);
2095 obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
2096 if (obj_priv->pages == NULL) {
2097 obj_priv->pages_refcount--;
2101 inode = obj->filp->f_path.dentry->d_inode;
2102 mapping = inode->i_mapping;
2103 for (i = 0; i < page_count; i++) {
2104 page = read_cache_page_gfp(mapping, i,
2112 obj_priv->pages[i] = page;
2115 if (obj_priv->tiling_mode != I915_TILING_NONE)
2116 i915_gem_object_do_bit_17_swizzle(obj);
2122 page_cache_release(obj_priv->pages[i]);
2124 drm_free_large(obj_priv->pages);
2125 obj_priv->pages = NULL;
2126 obj_priv->pages_refcount--;
2127 return PTR_ERR(page);
2130 static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
2132 struct drm_gem_object *obj = reg->obj;
2133 struct drm_device *dev = obj->dev;
2134 drm_i915_private_t *dev_priv = dev->dev_private;
2135 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2136 int regnum = obj_priv->fence_reg;
2139 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2141 val |= obj_priv->gtt_offset & 0xfffff000;
2142 val |= (uint64_t)((obj_priv->stride / 128) - 1) <<
2143 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2145 if (obj_priv->tiling_mode == I915_TILING_Y)
2146 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2147 val |= I965_FENCE_REG_VALID;
2149 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val);
2152 static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
2154 struct drm_gem_object *obj = reg->obj;
2155 struct drm_device *dev = obj->dev;
2156 drm_i915_private_t *dev_priv = dev->dev_private;
2157 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2158 int regnum = obj_priv->fence_reg;
2161 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2163 val |= obj_priv->gtt_offset & 0xfffff000;
2164 val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2165 if (obj_priv->tiling_mode == I915_TILING_Y)
2166 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2167 val |= I965_FENCE_REG_VALID;
2169 I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
2172 static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
2174 struct drm_gem_object *obj = reg->obj;
2175 struct drm_device *dev = obj->dev;
2176 drm_i915_private_t *dev_priv = dev->dev_private;
2177 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2178 int regnum = obj_priv->fence_reg;
2180 uint32_t fence_reg, val;
2183 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
2184 (obj_priv->gtt_offset & (obj->size - 1))) {
2185 WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
2186 __func__, obj_priv->gtt_offset, obj->size);
2190 if (obj_priv->tiling_mode == I915_TILING_Y &&
2191 HAS_128_BYTE_Y_TILING(dev))
2196 /* Note: pitch better be a power of two tile widths */
2197 pitch_val = obj_priv->stride / tile_width;
2198 pitch_val = ffs(pitch_val) - 1;
2200 if (obj_priv->tiling_mode == I915_TILING_Y &&
2201 HAS_128_BYTE_Y_TILING(dev))
2202 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2204 WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
2206 val = obj_priv->gtt_offset;
2207 if (obj_priv->tiling_mode == I915_TILING_Y)
2208 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2209 val |= I915_FENCE_SIZE_BITS(obj->size);
2210 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2211 val |= I830_FENCE_REG_VALID;
2214 fence_reg = FENCE_REG_830_0 + (regnum * 4);
2216 fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
2217 I915_WRITE(fence_reg, val);
2220 static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2222 struct drm_gem_object *obj = reg->obj;
2223 struct drm_device *dev = obj->dev;
2224 drm_i915_private_t *dev_priv = dev->dev_private;
2225 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2226 int regnum = obj_priv->fence_reg;
2229 uint32_t fence_size_bits;
2231 if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
2232 (obj_priv->gtt_offset & (obj->size - 1))) {
2233 WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
2234 __func__, obj_priv->gtt_offset);
2238 pitch_val = obj_priv->stride / 128;
2239 pitch_val = ffs(pitch_val) - 1;
2240 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2242 val = obj_priv->gtt_offset;
2243 if (obj_priv->tiling_mode == I915_TILING_Y)
2244 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2245 fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
2246 WARN_ON(fence_size_bits & ~0x00000f00);
2247 val |= fence_size_bits;
2248 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2249 val |= I830_FENCE_REG_VALID;
2251 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
2254 static int i915_find_fence_reg(struct drm_device *dev)
2256 struct drm_i915_fence_reg *reg = NULL;
2257 struct drm_i915_gem_object *obj_priv = NULL;
2258 struct drm_i915_private *dev_priv = dev->dev_private;
2259 struct drm_gem_object *obj = NULL;
2262 /* First try to find a free reg */
2264 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2265 reg = &dev_priv->fence_regs[i];
2269 obj_priv = to_intel_bo(reg->obj);
2270 if (!obj_priv->pin_count)
2277 /* None available, try to steal one or wait for a user to finish */
2278 i = I915_FENCE_REG_NONE;
2279 list_for_each_entry(reg, &dev_priv->mm.fence_list,
2282 obj_priv = to_intel_bo(obj);
2284 if (obj_priv->pin_count)
2288 i = obj_priv->fence_reg;
2292 BUG_ON(i == I915_FENCE_REG_NONE);
2294 /* We only have a reference on obj from the active list. put_fence_reg
2295 * might drop that one, causing a use-after-free in it. So hold a
2296 * private reference to obj like the other callers of put_fence_reg
2297 * (set_tiling ioctl) do. */
2298 drm_gem_object_reference(obj);
2299 ret = i915_gem_object_put_fence_reg(obj);
2300 drm_gem_object_unreference(obj);
2308 * i915_gem_object_get_fence_reg - set up a fence reg for an object
2309 * @obj: object to map through a fence reg
2311 * When mapping objects through the GTT, userspace wants to be able to write
2312 * to them without having to worry about swizzling if the object is tiled.
2314 * This function walks the fence regs looking for a free one for @obj,
2315 * stealing one if it can't find any.
2317 * It then sets up the reg based on the object's properties: address, pitch
2318 * and tiling format.
2321 i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2323 struct drm_device *dev = obj->dev;
2324 struct drm_i915_private *dev_priv = dev->dev_private;
2325 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2326 struct drm_i915_fence_reg *reg = NULL;
2329 /* Just update our place in the LRU if our fence is getting used. */
2330 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
2331 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2332 list_move_tail(®->lru_list, &dev_priv->mm.fence_list);
2336 switch (obj_priv->tiling_mode) {
2337 case I915_TILING_NONE:
2338 WARN(1, "allocating a fence for non-tiled object?\n");
2341 if (!obj_priv->stride)
2343 WARN((obj_priv->stride & (512 - 1)),
2344 "object 0x%08x is X tiled but has non-512B pitch\n",
2345 obj_priv->gtt_offset);
2348 if (!obj_priv->stride)
2350 WARN((obj_priv->stride & (128 - 1)),
2351 "object 0x%08x is Y tiled but has non-128B pitch\n",
2352 obj_priv->gtt_offset);
2356 ret = i915_find_fence_reg(dev);
2360 obj_priv->fence_reg = ret;
2361 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2362 list_add_tail(®->lru_list, &dev_priv->mm.fence_list);
2366 switch (INTEL_INFO(dev)->gen) {
2368 sandybridge_write_fence_reg(reg);
2372 i965_write_fence_reg(reg);
2375 i915_write_fence_reg(reg);
2378 i830_write_fence_reg(reg);
2382 trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
2383 obj_priv->tiling_mode);
2389 * i915_gem_clear_fence_reg - clear out fence register info
2390 * @obj: object to clear
2392 * Zeroes out the fence register itself and clears out the associated
2393 * data structures in dev_priv and obj_priv.
2396 i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2398 struct drm_device *dev = obj->dev;
2399 drm_i915_private_t *dev_priv = dev->dev_private;
2400 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2401 struct drm_i915_fence_reg *reg =
2402 &dev_priv->fence_regs[obj_priv->fence_reg];
2405 switch (INTEL_INFO(dev)->gen) {
2407 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
2408 (obj_priv->fence_reg * 8), 0);
2412 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
2415 if (obj_priv->fence_reg >= 8)
2416 fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4;
2419 fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
2421 I915_WRITE(fence_reg, 0);
2426 obj_priv->fence_reg = I915_FENCE_REG_NONE;
2427 list_del_init(®->lru_list);
2431 * i915_gem_object_put_fence_reg - waits on outstanding fenced access
2432 * to the buffer to finish, and then resets the fence register.
2433 * @obj: tiled object holding a fence register.
2435 * Zeroes out the fence register itself and clears out the associated
2436 * data structures in dev_priv and obj_priv.
2439 i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2441 struct drm_device *dev = obj->dev;
2442 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2444 if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
2447 /* If we've changed tiling, GTT-mappings of the object
2448 * need to re-fault to ensure that the correct fence register
2449 * setup is in place.
2451 i915_gem_release_mmap(obj);
2453 /* On the i915, GPU access to tiled buffers is via a fence,
2454 * therefore we must wait for any outstanding access to complete
2455 * before clearing the fence.
2457 if (!IS_I965G(dev)) {
2460 ret = i915_gem_object_flush_gpu_write_domain(obj);
2464 ret = i915_gem_object_wait_rendering(obj);
2469 i915_gem_object_flush_gtt_write_domain(obj);
2470 i915_gem_clear_fence_reg (obj);
2476 * Finds free space in the GTT aperture and binds the object there.
2479 i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2481 struct drm_device *dev = obj->dev;
2482 drm_i915_private_t *dev_priv = dev->dev_private;
2483 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2484 struct drm_mm_node *free_space;
2485 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
2488 if (obj_priv->madv != I915_MADV_WILLNEED) {
2489 DRM_ERROR("Attempting to bind a purgeable object\n");
2494 alignment = i915_gem_get_gtt_alignment(obj);
2495 if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
2496 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2500 /* If the object is bigger than the entire aperture, reject it early
2501 * before evicting everything in a vain attempt to find space.
2503 if (obj->size > dev->gtt_total) {
2504 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2509 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2510 obj->size, alignment, 0);
2511 if (free_space != NULL) {
2512 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
2514 if (obj_priv->gtt_space != NULL)
2515 obj_priv->gtt_offset = obj_priv->gtt_space->start;
2517 if (obj_priv->gtt_space == NULL) {
2518 /* If the gtt is empty and we're still having trouble
2519 * fitting our object in, we're out of memory.
2522 DRM_INFO("%s: GTT full, evicting something\n", __func__);
2524 ret = i915_gem_evict_something(dev, obj->size, alignment);
2532 DRM_INFO("Binding object of size %zd at 0x%08x\n",
2533 obj->size, obj_priv->gtt_offset);
2535 ret = i915_gem_object_get_pages(obj, gfpmask);
2537 drm_mm_put_block(obj_priv->gtt_space);
2538 obj_priv->gtt_space = NULL;
2540 if (ret == -ENOMEM) {
2541 /* first try to clear up some space from the GTT */
2542 ret = i915_gem_evict_something(dev, obj->size,
2545 /* now try to shrink everyone else */
2560 /* Create an AGP memory structure pointing at our pages, and bind it
2563 obj_priv->agp_mem = drm_agp_bind_pages(dev,
2565 obj->size >> PAGE_SHIFT,
2566 obj_priv->gtt_offset,
2567 obj_priv->agp_type);
2568 if (obj_priv->agp_mem == NULL) {
2569 i915_gem_object_put_pages(obj);
2570 drm_mm_put_block(obj_priv->gtt_space);
2571 obj_priv->gtt_space = NULL;
2573 ret = i915_gem_evict_something(dev, obj->size, alignment);
2579 atomic_inc(&dev->gtt_count);
2580 atomic_add(obj->size, &dev->gtt_memory);
2582 /* keep track of bounds object by adding it to the inactive list */
2583 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
2585 /* Assert that the object is not currently in any GPU domain. As it
2586 * wasn't in the GTT, there shouldn't be any way it could have been in
2589 BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
2590 BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
2592 trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
2598 i915_gem_clflush_object(struct drm_gem_object *obj)
2600 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2602 /* If we don't have a page list set up, then we're not pinned
2603 * to GPU, and we can ignore the cache flush because it'll happen
2604 * again at bind time.
2606 if (obj_priv->pages == NULL)
2609 trace_i915_gem_object_clflush(obj);
2611 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
2614 /** Flushes any GPU write domain for the object if it's dirty. */
2616 i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2618 struct drm_device *dev = obj->dev;
2619 uint32_t old_write_domain;
2620 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2622 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
2625 /* Queue the GPU write cache flushing we need. */
2626 old_write_domain = obj->write_domain;
2627 i915_gem_flush(dev, 0, obj->write_domain);
2628 if (i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring) == 0)
2631 trace_i915_gem_object_change_domain(obj,
2637 /** Flushes the GTT write domain for the object if it's dirty. */
2639 i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
2641 uint32_t old_write_domain;
2643 if (obj->write_domain != I915_GEM_DOMAIN_GTT)
2646 /* No actual flushing is required for the GTT write domain. Writes
2647 * to it immediately go to main memory as far as we know, so there's
2648 * no chipset flush. It also doesn't land in render cache.
2650 old_write_domain = obj->write_domain;
2651 obj->write_domain = 0;
2653 trace_i915_gem_object_change_domain(obj,
2658 /** Flushes the CPU write domain for the object if it's dirty. */
2660 i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2662 struct drm_device *dev = obj->dev;
2663 uint32_t old_write_domain;
2665 if (obj->write_domain != I915_GEM_DOMAIN_CPU)
2668 i915_gem_clflush_object(obj);
2669 drm_agp_chipset_flush(dev);
2670 old_write_domain = obj->write_domain;
2671 obj->write_domain = 0;
2673 trace_i915_gem_object_change_domain(obj,
2679 i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
2683 switch (obj->write_domain) {
2684 case I915_GEM_DOMAIN_GTT:
2685 i915_gem_object_flush_gtt_write_domain(obj);
2687 case I915_GEM_DOMAIN_CPU:
2688 i915_gem_object_flush_cpu_write_domain(obj);
2691 ret = i915_gem_object_flush_gpu_write_domain(obj);
2699 * Moves a single object to the GTT read, and possibly write domain.
2701 * This function returns when the move is complete, including waiting on
2705 i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2707 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2708 uint32_t old_write_domain, old_read_domains;
2711 /* Not valid to be called on unbound objects. */
2712 if (obj_priv->gtt_space == NULL)
2715 ret = i915_gem_object_flush_gpu_write_domain(obj);
2719 /* Wait on any GPU rendering and flushing to occur. */
2720 ret = i915_gem_object_wait_rendering(obj);
2724 old_write_domain = obj->write_domain;
2725 old_read_domains = obj->read_domains;
2727 /* If we're writing through the GTT domain, then CPU and GPU caches
2728 * will need to be invalidated at next use.
2731 obj->read_domains &= I915_GEM_DOMAIN_GTT;
2733 i915_gem_object_flush_cpu_write_domain(obj);
2735 /* It should now be out of any other write domains, and we can update
2736 * the domain values for our changes.
2738 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2739 obj->read_domains |= I915_GEM_DOMAIN_GTT;
2741 obj->write_domain = I915_GEM_DOMAIN_GTT;
2742 obj_priv->dirty = 1;
2745 trace_i915_gem_object_change_domain(obj,
2753 * Prepare buffer for display plane. Use uninterruptible for possible flush
2754 * wait, as in modesetting process we're not supposed to be interrupted.
2757 i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
2759 struct drm_device *dev = obj->dev;
2760 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2761 uint32_t old_write_domain, old_read_domains;
2764 /* Not valid to be called on unbound objects. */
2765 if (obj_priv->gtt_space == NULL)
2768 ret = i915_gem_object_flush_gpu_write_domain(obj);
2772 /* Wait on any GPU rendering and flushing to occur. */
2773 if (obj_priv->active) {
2775 DRM_INFO("%s: object %p wait for seqno %08x\n",
2776 __func__, obj, obj_priv->last_rendering_seqno);
2778 ret = i915_do_wait_request(dev,
2779 obj_priv->last_rendering_seqno,
2786 i915_gem_object_flush_cpu_write_domain(obj);
2788 old_write_domain = obj->write_domain;
2789 old_read_domains = obj->read_domains;
2791 /* It should now be out of any other write domains, and we can update
2792 * the domain values for our changes.
2794 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2795 obj->read_domains = I915_GEM_DOMAIN_GTT;
2796 obj->write_domain = I915_GEM_DOMAIN_GTT;
2797 obj_priv->dirty = 1;
2799 trace_i915_gem_object_change_domain(obj,
2807 * Moves a single object to the CPU read, and possibly write domain.
2809 * This function returns when the move is complete, including waiting on
2813 i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2815 uint32_t old_write_domain, old_read_domains;
2818 ret = i915_gem_object_flush_gpu_write_domain(obj);
2822 /* Wait on any GPU rendering and flushing to occur. */
2823 ret = i915_gem_object_wait_rendering(obj);
2827 i915_gem_object_flush_gtt_write_domain(obj);
2829 /* If we have a partially-valid cache of the object in the CPU,
2830 * finish invalidating it and free the per-page flags.
2832 i915_gem_object_set_to_full_cpu_read_domain(obj);
2834 old_write_domain = obj->write_domain;
2835 old_read_domains = obj->read_domains;
2837 /* Flush the CPU cache if it's still invalid. */
2838 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2839 i915_gem_clflush_object(obj);
2841 obj->read_domains |= I915_GEM_DOMAIN_CPU;
2844 /* It should now be out of any other write domains, and we can update
2845 * the domain values for our changes.
2847 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2849 /* If we're writing through the CPU, then the GPU read domains will
2850 * need to be invalidated at next use.
2853 obj->read_domains &= I915_GEM_DOMAIN_CPU;
2854 obj->write_domain = I915_GEM_DOMAIN_CPU;
2857 trace_i915_gem_object_change_domain(obj,
2865 * Set the next domain for the specified object. This
2866 * may not actually perform the necessary flushing/invaliding though,
2867 * as that may want to be batched with other set_domain operations
2869 * This is (we hope) the only really tricky part of gem. The goal
2870 * is fairly simple -- track which caches hold bits of the object
2871 * and make sure they remain coherent. A few concrete examples may
2872 * help to explain how it works. For shorthand, we use the notation
2873 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
2874 * a pair of read and write domain masks.
2876 * Case 1: the batch buffer
2882 * 5. Unmapped from GTT
2885 * Let's take these a step at a time
2888 * Pages allocated from the kernel may still have
2889 * cache contents, so we set them to (CPU, CPU) always.
2890 * 2. Written by CPU (using pwrite)
2891 * The pwrite function calls set_domain (CPU, CPU) and
2892 * this function does nothing (as nothing changes)
2894 * This function asserts that the object is not
2895 * currently in any GPU-based read or write domains
2897 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
2898 * As write_domain is zero, this function adds in the
2899 * current read domains (CPU+COMMAND, 0).
2900 * flush_domains is set to CPU.
2901 * invalidate_domains is set to COMMAND
2902 * clflush is run to get data out of the CPU caches
2903 * then i915_dev_set_domain calls i915_gem_flush to
2904 * emit an MI_FLUSH and drm_agp_chipset_flush
2905 * 5. Unmapped from GTT
2906 * i915_gem_object_unbind calls set_domain (CPU, CPU)
2907 * flush_domains and invalidate_domains end up both zero
2908 * so no flushing/invalidating happens
2912 * Case 2: The shared render buffer
2916 * 3. Read/written by GPU
2917 * 4. set_domain to (CPU,CPU)
2918 * 5. Read/written by CPU
2919 * 6. Read/written by GPU
2922 * Same as last example, (CPU, CPU)
2924 * Nothing changes (assertions find that it is not in the GPU)
2925 * 3. Read/written by GPU
2926 * execbuffer calls set_domain (RENDER, RENDER)
2927 * flush_domains gets CPU
2928 * invalidate_domains gets GPU
2930 * MI_FLUSH and drm_agp_chipset_flush
2931 * 4. set_domain (CPU, CPU)
2932 * flush_domains gets GPU
2933 * invalidate_domains gets CPU
2934 * wait_rendering (obj) to make sure all drawing is complete.
2935 * This will include an MI_FLUSH to get the data from GPU
2937 * clflush (obj) to invalidate the CPU cache
2938 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
2939 * 5. Read/written by CPU
2940 * cache lines are loaded and dirtied
2941 * 6. Read written by GPU
2942 * Same as last GPU access
2944 * Case 3: The constant buffer
2949 * 4. Updated (written) by CPU again
2958 * flush_domains = CPU
2959 * invalidate_domains = RENDER
2962 * drm_agp_chipset_flush
2963 * 4. Updated (written) by CPU again
2965 * flush_domains = 0 (no previous write domain)
2966 * invalidate_domains = 0 (no new read domains)
2969 * flush_domains = CPU
2970 * invalidate_domains = RENDER
2973 * drm_agp_chipset_flush
2976 i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
2978 struct drm_device *dev = obj->dev;
2979 drm_i915_private_t *dev_priv = dev->dev_private;
2980 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2981 uint32_t invalidate_domains = 0;
2982 uint32_t flush_domains = 0;
2983 uint32_t old_read_domains;
2985 BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
2986 BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
2988 intel_mark_busy(dev, obj);
2991 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
2993 obj->read_domains, obj->pending_read_domains,
2994 obj->write_domain, obj->pending_write_domain);
2997 * If the object isn't moving to a new write domain,
2998 * let the object stay in multiple read domains
3000 if (obj->pending_write_domain == 0)
3001 obj->pending_read_domains |= obj->read_domains;
3003 obj_priv->dirty = 1;
3006 * Flush the current write domain if
3007 * the new read domains don't match. Invalidate
3008 * any read domains which differ from the old
3011 if (obj->write_domain &&
3012 obj->write_domain != obj->pending_read_domains) {
3013 flush_domains |= obj->write_domain;
3014 invalidate_domains |=
3015 obj->pending_read_domains & ~obj->write_domain;
3018 * Invalidate any read caches which may have
3019 * stale data. That is, any new read domains.
3021 invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
3022 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
3024 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
3025 __func__, flush_domains, invalidate_domains);
3027 i915_gem_clflush_object(obj);
3030 old_read_domains = obj->read_domains;
3032 /* The actual obj->write_domain will be updated with
3033 * pending_write_domain after we emit the accumulated flush for all
3034 * of our domain changes in execbuffers (which clears objects'
3035 * write_domains). So if we have a current write domain that we
3036 * aren't changing, set pending_write_domain to that.
3038 if (flush_domains == 0 && obj->pending_write_domain == 0)
3039 obj->pending_write_domain = obj->write_domain;
3040 obj->read_domains = obj->pending_read_domains;
3042 if (flush_domains & I915_GEM_GPU_DOMAINS) {
3043 if (obj_priv->ring == &dev_priv->render_ring)
3044 dev_priv->flush_rings |= FLUSH_RENDER_RING;
3045 else if (obj_priv->ring == &dev_priv->bsd_ring)
3046 dev_priv->flush_rings |= FLUSH_BSD_RING;
3049 dev->invalidate_domains |= invalidate_domains;
3050 dev->flush_domains |= flush_domains;
3052 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
3054 obj->read_domains, obj->write_domain,
3055 dev->invalidate_domains, dev->flush_domains);
3058 trace_i915_gem_object_change_domain(obj,
3064 * Moves the object from a partially CPU read to a full one.
3066 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
3067 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
3070 i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
3072 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3074 if (!obj_priv->page_cpu_valid)
3077 /* If we're partially in the CPU read domain, finish moving it in.
3079 if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
3082 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
3083 if (obj_priv->page_cpu_valid[i])
3085 drm_clflush_pages(obj_priv->pages + i, 1);
3089 /* Free the page_cpu_valid mappings which are now stale, whether
3090 * or not we've got I915_GEM_DOMAIN_CPU.
3092 kfree(obj_priv->page_cpu_valid);
3093 obj_priv->page_cpu_valid = NULL;
3097 * Set the CPU read domain on a range of the object.
3099 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
3100 * not entirely valid. The page_cpu_valid member of the object flags which
3101 * pages have been flushed, and will be respected by
3102 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
3103 * of the whole object.
3105 * This function returns when the move is complete, including waiting on
3109 i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3110 uint64_t offset, uint64_t size)
3112 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3113 uint32_t old_read_domains;
3116 if (offset == 0 && size == obj->size)
3117 return i915_gem_object_set_to_cpu_domain(obj, 0);
3119 ret = i915_gem_object_flush_gpu_write_domain(obj);
3123 /* Wait on any GPU rendering and flushing to occur. */
3124 ret = i915_gem_object_wait_rendering(obj);
3127 i915_gem_object_flush_gtt_write_domain(obj);
3129 /* If we're already fully in the CPU read domain, we're done. */
3130 if (obj_priv->page_cpu_valid == NULL &&
3131 (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
3134 /* Otherwise, create/clear the per-page CPU read domain flag if we're
3135 * newly adding I915_GEM_DOMAIN_CPU
3137 if (obj_priv->page_cpu_valid == NULL) {
3138 obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
3140 if (obj_priv->page_cpu_valid == NULL)
3142 } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
3143 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
3145 /* Flush the cache on any pages that are still invalid from the CPU's
3148 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
3150 if (obj_priv->page_cpu_valid[i])
3153 drm_clflush_pages(obj_priv->pages + i, 1);
3155 obj_priv->page_cpu_valid[i] = 1;
3158 /* It should now be out of any other write domains, and we can update
3159 * the domain values for our changes.
3161 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3163 old_read_domains = obj->read_domains;
3164 obj->read_domains |= I915_GEM_DOMAIN_CPU;
3166 trace_i915_gem_object_change_domain(obj,
3174 * Pin an object to the GTT and evaluate the relocations landing in it.
3177 i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3178 struct drm_file *file_priv,
3179 struct drm_i915_gem_exec_object2 *entry,
3180 struct drm_i915_gem_relocation_entry *relocs)
3182 struct drm_device *dev = obj->dev;
3183 drm_i915_private_t *dev_priv = dev->dev_private;
3184 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3186 void __iomem *reloc_page;
3189 need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
3190 obj_priv->tiling_mode != I915_TILING_NONE;
3192 /* Check fence reg constraints and rebind if necessary */
3194 !i915_gem_object_fence_offset_ok(obj,
3195 obj_priv->tiling_mode)) {
3196 ret = i915_gem_object_unbind(obj);
3201 /* Choose the GTT offset for our buffer and put it there. */
3202 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
3207 * Pre-965 chips need a fence register set up in order to
3208 * properly handle blits to/from tiled surfaces.
3211 ret = i915_gem_object_get_fence_reg(obj);
3213 i915_gem_object_unpin(obj);
3218 entry->offset = obj_priv->gtt_offset;
3220 /* Apply the relocations, using the GTT aperture to avoid cache
3221 * flushing requirements.
3223 for (i = 0; i < entry->relocation_count; i++) {
3224 struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
3225 struct drm_gem_object *target_obj;
3226 struct drm_i915_gem_object *target_obj_priv;
3227 uint32_t reloc_val, reloc_offset;
3228 uint32_t __iomem *reloc_entry;
3230 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
3231 reloc->target_handle);
3232 if (target_obj == NULL) {
3233 i915_gem_object_unpin(obj);
3236 target_obj_priv = to_intel_bo(target_obj);
3239 DRM_INFO("%s: obj %p offset %08x target %d "
3240 "read %08x write %08x gtt %08x "
3241 "presumed %08x delta %08x\n",
3244 (int) reloc->offset,
3245 (int) reloc->target_handle,
3246 (int) reloc->read_domains,
3247 (int) reloc->write_domain,
3248 (int) target_obj_priv->gtt_offset,
3249 (int) reloc->presumed_offset,
3253 /* The target buffer should have appeared before us in the
3254 * exec_object list, so it should have a GTT space bound by now.
3256 if (target_obj_priv->gtt_space == NULL) {
3257 DRM_ERROR("No GTT space found for object %d\n",
3258 reloc->target_handle);
3259 drm_gem_object_unreference(target_obj);
3260 i915_gem_object_unpin(obj);
3264 /* Validate that the target is in a valid r/w GPU domain */
3265 if (reloc->write_domain & (reloc->write_domain - 1)) {
3266 DRM_ERROR("reloc with multiple write domains: "
3267 "obj %p target %d offset %d "
3268 "read %08x write %08x",
3269 obj, reloc->target_handle,
3270 (int) reloc->offset,
3271 reloc->read_domains,
3272 reloc->write_domain);
3273 drm_gem_object_unreference(target_obj);
3274 i915_gem_object_unpin(obj);
3277 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
3278 reloc->read_domains & I915_GEM_DOMAIN_CPU) {
3279 DRM_ERROR("reloc with read/write CPU domains: "
3280 "obj %p target %d offset %d "
3281 "read %08x write %08x",
3282 obj, reloc->target_handle,
3283 (int) reloc->offset,
3284 reloc->read_domains,
3285 reloc->write_domain);
3286 drm_gem_object_unreference(target_obj);
3287 i915_gem_object_unpin(obj);
3290 if (reloc->write_domain && target_obj->pending_write_domain &&
3291 reloc->write_domain != target_obj->pending_write_domain) {
3292 DRM_ERROR("Write domain conflict: "
3293 "obj %p target %d offset %d "
3294 "new %08x old %08x\n",
3295 obj, reloc->target_handle,
3296 (int) reloc->offset,
3297 reloc->write_domain,
3298 target_obj->pending_write_domain);
3299 drm_gem_object_unreference(target_obj);
3300 i915_gem_object_unpin(obj);
3304 target_obj->pending_read_domains |= reloc->read_domains;
3305 target_obj->pending_write_domain |= reloc->write_domain;
3307 /* If the relocation already has the right value in it, no
3308 * more work needs to be done.
3310 if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
3311 drm_gem_object_unreference(target_obj);
3315 /* Check that the relocation address is valid... */
3316 if (reloc->offset > obj->size - 4) {
3317 DRM_ERROR("Relocation beyond object bounds: "
3318 "obj %p target %d offset %d size %d.\n",
3319 obj, reloc->target_handle,
3320 (int) reloc->offset, (int) obj->size);
3321 drm_gem_object_unreference(target_obj);
3322 i915_gem_object_unpin(obj);
3325 if (reloc->offset & 3) {
3326 DRM_ERROR("Relocation not 4-byte aligned: "
3327 "obj %p target %d offset %d.\n",
3328 obj, reloc->target_handle,
3329 (int) reloc->offset);
3330 drm_gem_object_unreference(target_obj);
3331 i915_gem_object_unpin(obj);
3335 /* and points to somewhere within the target object. */
3336 if (reloc->delta >= target_obj->size) {
3337 DRM_ERROR("Relocation beyond target object bounds: "
3338 "obj %p target %d delta %d size %d.\n",
3339 obj, reloc->target_handle,
3340 (int) reloc->delta, (int) target_obj->size);
3341 drm_gem_object_unreference(target_obj);
3342 i915_gem_object_unpin(obj);
3346 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
3348 drm_gem_object_unreference(target_obj);
3349 i915_gem_object_unpin(obj);
3353 /* Map the page containing the relocation we're going to
3356 reloc_offset = obj_priv->gtt_offset + reloc->offset;
3357 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
3361 reloc_entry = (uint32_t __iomem *)(reloc_page +
3362 (reloc_offset & (PAGE_SIZE - 1)));
3363 reloc_val = target_obj_priv->gtt_offset + reloc->delta;
3366 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
3367 obj, (unsigned int) reloc->offset,
3368 readl(reloc_entry), reloc_val);
3370 writel(reloc_val, reloc_entry);
3371 io_mapping_unmap_atomic(reloc_page, KM_USER0);
3373 /* The updated presumed offset for this entry will be
3374 * copied back out to the user.
3376 reloc->presumed_offset = target_obj_priv->gtt_offset;
3378 drm_gem_object_unreference(target_obj);
3383 i915_gem_dump_object(obj, 128, __func__, ~0);
3388 /* Throttle our rendering by waiting until the ring has completed our requests
3389 * emitted over 20 msec ago.
3391 * Note that if we were to use the current jiffies each time around the loop,
3392 * we wouldn't escape the function with any frames outstanding if the time to
3393 * render a frame was over 20ms.
3395 * This should get us reasonable parallelism between CPU and GPU but also
3396 * relatively low latency when blocking on a particular request to finish.
3399 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
3401 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
3403 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3405 mutex_lock(&dev->struct_mutex);
3406 while (!list_empty(&i915_file_priv->mm.request_list)) {
3407 struct drm_i915_gem_request *request;
3409 request = list_first_entry(&i915_file_priv->mm.request_list,
3410 struct drm_i915_gem_request,
3413 if (time_after_eq(request->emitted_jiffies, recent_enough))
3416 ret = i915_wait_request(dev, request->seqno, request->ring);
3420 mutex_unlock(&dev->struct_mutex);
3426 i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list,
3427 uint32_t buffer_count,
3428 struct drm_i915_gem_relocation_entry **relocs)
3430 uint32_t reloc_count = 0, reloc_index = 0, i;
3434 for (i = 0; i < buffer_count; i++) {
3435 if (reloc_count + exec_list[i].relocation_count < reloc_count)
3437 reloc_count += exec_list[i].relocation_count;
3440 *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
3441 if (*relocs == NULL) {
3442 DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count);
3446 for (i = 0; i < buffer_count; i++) {
3447 struct drm_i915_gem_relocation_entry __user *user_relocs;
3449 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3451 ret = copy_from_user(&(*relocs)[reloc_index],
3453 exec_list[i].relocation_count *
3456 drm_free_large(*relocs);
3461 reloc_index += exec_list[i].relocation_count;
3468 i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
3469 uint32_t buffer_count,
3470 struct drm_i915_gem_relocation_entry *relocs)
3472 uint32_t reloc_count = 0, i;
3478 for (i = 0; i < buffer_count; i++) {
3479 struct drm_i915_gem_relocation_entry __user *user_relocs;
3482 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3484 unwritten = copy_to_user(user_relocs,
3485 &relocs[reloc_count],
3486 exec_list[i].relocation_count *
3494 reloc_count += exec_list[i].relocation_count;
3498 drm_free_large(relocs);
3504 i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
3505 uint64_t exec_offset)
3507 uint32_t exec_start, exec_len;
3509 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3510 exec_len = (uint32_t) exec->batch_len;
3512 if ((exec_start | exec_len) & 0x7)
3522 i915_gem_wait_for_pending_flip(struct drm_device *dev,
3523 struct drm_gem_object **object_list,
3526 drm_i915_private_t *dev_priv = dev->dev_private;
3527 struct drm_i915_gem_object *obj_priv;
3532 prepare_to_wait(&dev_priv->pending_flip_queue,
3533 &wait, TASK_INTERRUPTIBLE);
3534 for (i = 0; i < count; i++) {
3535 obj_priv = to_intel_bo(object_list[i]);
3536 if (atomic_read(&obj_priv->pending_flip) > 0)
3542 if (!signal_pending(current)) {
3543 mutex_unlock(&dev->struct_mutex);
3545 mutex_lock(&dev->struct_mutex);
3551 finish_wait(&dev_priv->pending_flip_queue, &wait);
3558 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3559 struct drm_file *file_priv,
3560 struct drm_i915_gem_execbuffer2 *args,
3561 struct drm_i915_gem_exec_object2 *exec_list)
3563 drm_i915_private_t *dev_priv = dev->dev_private;
3564 struct drm_gem_object **object_list = NULL;
3565 struct drm_gem_object *batch_obj;
3566 struct drm_i915_gem_object *obj_priv;
3567 struct drm_clip_rect *cliprects = NULL;
3568 struct drm_i915_gem_relocation_entry *relocs = NULL;
3569 int ret = 0, ret2, i, pinned = 0;
3570 uint64_t exec_offset;
3571 uint32_t seqno, flush_domains, reloc_index;
3572 int pin_tries, flips;
3574 struct intel_ring_buffer *ring = NULL;
3577 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3578 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3580 if (args->flags & I915_EXEC_BSD) {
3581 if (!HAS_BSD(dev)) {
3582 DRM_ERROR("execbuf with wrong flag\n");
3585 ring = &dev_priv->bsd_ring;
3587 ring = &dev_priv->render_ring;
3590 if (args->buffer_count < 1) {
3591 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3594 object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
3595 if (object_list == NULL) {
3596 DRM_ERROR("Failed to allocate object list for %d buffers\n",
3597 args->buffer_count);
3602 if (args->num_cliprects != 0) {
3603 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
3605 if (cliprects == NULL) {
3610 ret = copy_from_user(cliprects,
3611 (struct drm_clip_rect __user *)
3612 (uintptr_t) args->cliprects_ptr,
3613 sizeof(*cliprects) * args->num_cliprects);
3615 DRM_ERROR("copy %d cliprects failed: %d\n",
3616 args->num_cliprects, ret);
3622 ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
3627 mutex_lock(&dev->struct_mutex);
3629 i915_verify_inactive(dev, __FILE__, __LINE__);
3631 if (atomic_read(&dev_priv->mm.wedged)) {
3632 mutex_unlock(&dev->struct_mutex);
3637 if (dev_priv->mm.suspended) {
3638 mutex_unlock(&dev->struct_mutex);
3643 /* Look up object handles */
3645 for (i = 0; i < args->buffer_count; i++) {
3646 object_list[i] = drm_gem_object_lookup(dev, file_priv,
3647 exec_list[i].handle);
3648 if (object_list[i] == NULL) {
3649 DRM_ERROR("Invalid object handle %d at index %d\n",
3650 exec_list[i].handle, i);
3651 /* prevent error path from reading uninitialized data */
3652 args->buffer_count = i + 1;
3657 obj_priv = to_intel_bo(object_list[i]);
3658 if (obj_priv->in_execbuffer) {
3659 DRM_ERROR("Object %p appears more than once in object list\n",
3661 /* prevent error path from reading uninitialized data */
3662 args->buffer_count = i + 1;
3666 obj_priv->in_execbuffer = true;
3667 flips += atomic_read(&obj_priv->pending_flip);
3671 ret = i915_gem_wait_for_pending_flip(dev, object_list,
3672 args->buffer_count);
3677 /* Pin and relocate */
3678 for (pin_tries = 0; ; pin_tries++) {
3682 for (i = 0; i < args->buffer_count; i++) {
3683 object_list[i]->pending_read_domains = 0;
3684 object_list[i]->pending_write_domain = 0;
3685 ret = i915_gem_object_pin_and_relocate(object_list[i],
3688 &relocs[reloc_index]);
3692 reloc_index += exec_list[i].relocation_count;
3698 /* error other than GTT full, or we've already tried again */
3699 if (ret != -ENOSPC || pin_tries >= 1) {
3700 if (ret != -ERESTARTSYS) {
3701 unsigned long long total_size = 0;
3703 for (i = 0; i < args->buffer_count; i++) {
3704 obj_priv = to_intel_bo(object_list[i]);
3706 total_size += object_list[i]->size;
3708 exec_list[i].flags & EXEC_OBJECT_NEEDS_FENCE &&
3709 obj_priv->tiling_mode != I915_TILING_NONE;
3711 DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes, %d fences: %d\n",
3712 pinned+1, args->buffer_count,
3713 total_size, num_fences,
3715 DRM_ERROR("%d objects [%d pinned], "
3716 "%d object bytes [%d pinned], "
3717 "%d/%d gtt bytes\n",
3718 atomic_read(&dev->object_count),
3719 atomic_read(&dev->pin_count),
3720 atomic_read(&dev->object_memory),
3721 atomic_read(&dev->pin_memory),
3722 atomic_read(&dev->gtt_memory),
3728 /* unpin all of our buffers */
3729 for (i = 0; i < pinned; i++)
3730 i915_gem_object_unpin(object_list[i]);
3733 /* evict everyone we can from the aperture */
3734 ret = i915_gem_evict_everything(dev);
3735 if (ret && ret != -ENOSPC)
3739 /* Set the pending read domains for the batch buffer to COMMAND */
3740 batch_obj = object_list[args->buffer_count-1];
3741 if (batch_obj->pending_write_domain) {
3742 DRM_ERROR("Attempting to use self-modifying batch buffer\n");
3746 batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
3748 /* Sanity check the batch buffer, prior to moving objects */
3749 exec_offset = exec_list[args->buffer_count - 1].offset;
3750 ret = i915_gem_check_execbuffer (args, exec_offset);
3752 DRM_ERROR("execbuf with invalid offset/length\n");
3756 i915_verify_inactive(dev, __FILE__, __LINE__);
3758 /* Zero the global flush/invalidate flags. These
3759 * will be modified as new domains are computed
3762 dev->invalidate_domains = 0;
3763 dev->flush_domains = 0;
3764 dev_priv->flush_rings = 0;
3766 for (i = 0; i < args->buffer_count; i++) {
3767 struct drm_gem_object *obj = object_list[i];
3769 /* Compute new gpu domains and update invalidate/flush */
3770 i915_gem_object_set_to_gpu_domain(obj);
3773 i915_verify_inactive(dev, __FILE__, __LINE__);
3775 if (dev->invalidate_domains | dev->flush_domains) {
3777 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3779 dev->invalidate_domains,
3780 dev->flush_domains);
3783 dev->invalidate_domains,
3784 dev->flush_domains);
3785 if (dev_priv->flush_rings & FLUSH_RENDER_RING)
3786 (void)i915_add_request(dev, file_priv,
3788 &dev_priv->render_ring);
3789 if (dev_priv->flush_rings & FLUSH_BSD_RING)
3790 (void)i915_add_request(dev, file_priv,
3792 &dev_priv->bsd_ring);
3795 for (i = 0; i < args->buffer_count; i++) {
3796 struct drm_gem_object *obj = object_list[i];
3797 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3798 uint32_t old_write_domain = obj->write_domain;
3800 obj->write_domain = obj->pending_write_domain;
3801 if (obj->write_domain)
3802 list_move_tail(&obj_priv->gpu_write_list,
3803 &dev_priv->mm.gpu_write_list);
3805 list_del_init(&obj_priv->gpu_write_list);
3807 trace_i915_gem_object_change_domain(obj,
3812 i915_verify_inactive(dev, __FILE__, __LINE__);
3815 for (i = 0; i < args->buffer_count; i++) {
3816 i915_gem_object_check_coherency(object_list[i],
3817 exec_list[i].handle);
3822 i915_gem_dump_object(batch_obj,
3828 /* Exec the batchbuffer */
3829 ret = ring->dispatch_gem_execbuffer(dev, ring, args,
3830 cliprects, exec_offset);
3832 DRM_ERROR("dispatch failed %d\n", ret);
3837 * Ensure that the commands in the batch buffer are
3838 * finished before the interrupt fires
3840 flush_domains = i915_retire_commands(dev, ring);
3842 i915_verify_inactive(dev, __FILE__, __LINE__);
3845 * Get a seqno representing the execution of the current buffer,
3846 * which we can wait on. We would like to mitigate these interrupts,
3847 * likely by only creating seqnos occasionally (so that we have
3848 * *some* interrupts representing completion of buffers that we can
3849 * wait on when trying to clear up gtt space).
3851 seqno = i915_add_request(dev, file_priv, flush_domains, ring);
3853 for (i = 0; i < args->buffer_count; i++) {
3854 struct drm_gem_object *obj = object_list[i];
3855 obj_priv = to_intel_bo(obj);
3857 i915_gem_object_move_to_active(obj, seqno, ring);
3859 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
3863 i915_dump_lru(dev, __func__);
3866 i915_verify_inactive(dev, __FILE__, __LINE__);
3869 for (i = 0; i < pinned; i++)
3870 i915_gem_object_unpin(object_list[i]);
3872 for (i = 0; i < args->buffer_count; i++) {
3873 if (object_list[i]) {
3874 obj_priv = to_intel_bo(object_list[i]);
3875 obj_priv->in_execbuffer = false;
3877 drm_gem_object_unreference(object_list[i]);
3880 mutex_unlock(&dev->struct_mutex);
3883 /* Copy the updated relocations out regardless of current error
3884 * state. Failure to update the relocs would mean that the next
3885 * time userland calls execbuf, it would do so with presumed offset
3886 * state that didn't match the actual object state.
3888 ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
3891 DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
3897 drm_free_large(object_list);
3904 * Legacy execbuffer just creates an exec2 list from the original exec object
3905 * list array and passes it to the real function.
3908 i915_gem_execbuffer(struct drm_device *dev, void *data,
3909 struct drm_file *file_priv)
3911 struct drm_i915_gem_execbuffer *args = data;
3912 struct drm_i915_gem_execbuffer2 exec2;
3913 struct drm_i915_gem_exec_object *exec_list = NULL;
3914 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
3918 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3919 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3922 if (args->buffer_count < 1) {
3923 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3927 /* Copy in the exec list from userland */
3928 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
3929 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
3930 if (exec_list == NULL || exec2_list == NULL) {
3931 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
3932 args->buffer_count);
3933 drm_free_large(exec_list);
3934 drm_free_large(exec2_list);
3937 ret = copy_from_user(exec_list,
3938 (struct drm_i915_relocation_entry __user *)
3939 (uintptr_t) args->buffers_ptr,
3940 sizeof(*exec_list) * args->buffer_count);
3942 DRM_ERROR("copy %d exec entries failed %d\n",
3943 args->buffer_count, ret);
3944 drm_free_large(exec_list);
3945 drm_free_large(exec2_list);
3949 for (i = 0; i < args->buffer_count; i++) {
3950 exec2_list[i].handle = exec_list[i].handle;
3951 exec2_list[i].relocation_count = exec_list[i].relocation_count;
3952 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
3953 exec2_list[i].alignment = exec_list[i].alignment;
3954 exec2_list[i].offset = exec_list[i].offset;
3956 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
3958 exec2_list[i].flags = 0;
3961 exec2.buffers_ptr = args->buffers_ptr;
3962 exec2.buffer_count = args->buffer_count;
3963 exec2.batch_start_offset = args->batch_start_offset;
3964 exec2.batch_len = args->batch_len;
3965 exec2.DR1 = args->DR1;
3966 exec2.DR4 = args->DR4;
3967 exec2.num_cliprects = args->num_cliprects;
3968 exec2.cliprects_ptr = args->cliprects_ptr;
3969 exec2.flags = I915_EXEC_RENDER;
3971 ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
3973 /* Copy the new buffer offsets back to the user's exec list. */
3974 for (i = 0; i < args->buffer_count; i++)
3975 exec_list[i].offset = exec2_list[i].offset;
3976 /* ... and back out to userspace */
3977 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
3978 (uintptr_t) args->buffers_ptr,
3980 sizeof(*exec_list) * args->buffer_count);
3983 DRM_ERROR("failed to copy %d exec entries "
3984 "back to user (%d)\n",
3985 args->buffer_count, ret);
3989 drm_free_large(exec_list);
3990 drm_free_large(exec2_list);
3995 i915_gem_execbuffer2(struct drm_device *dev, void *data,
3996 struct drm_file *file_priv)
3998 struct drm_i915_gem_execbuffer2 *args = data;
3999 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
4003 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
4004 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
4007 if (args->buffer_count < 1) {
4008 DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
4012 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
4013 if (exec2_list == NULL) {
4014 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
4015 args->buffer_count);
4018 ret = copy_from_user(exec2_list,
4019 (struct drm_i915_relocation_entry __user *)
4020 (uintptr_t) args->buffers_ptr,
4021 sizeof(*exec2_list) * args->buffer_count);
4023 DRM_ERROR("copy %d exec entries failed %d\n",
4024 args->buffer_count, ret);
4025 drm_free_large(exec2_list);
4029 ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
4031 /* Copy the new buffer offsets back to the user's exec list. */
4032 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
4033 (uintptr_t) args->buffers_ptr,
4035 sizeof(*exec2_list) * args->buffer_count);
4038 DRM_ERROR("failed to copy %d exec entries "
4039 "back to user (%d)\n",
4040 args->buffer_count, ret);
4044 drm_free_large(exec2_list);
4049 i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
4051 struct drm_device *dev = obj->dev;
4052 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4055 BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
4057 i915_verify_inactive(dev, __FILE__, __LINE__);
4059 if (obj_priv->gtt_space != NULL) {
4061 alignment = i915_gem_get_gtt_alignment(obj);
4062 if (obj_priv->gtt_offset & (alignment - 1)) {
4063 WARN(obj_priv->pin_count,
4064 "bo is already pinned with incorrect alignment:"
4065 " offset=%x, req.alignment=%x\n",
4066 obj_priv->gtt_offset, alignment);
4067 ret = i915_gem_object_unbind(obj);
4073 if (obj_priv->gtt_space == NULL) {
4074 ret = i915_gem_object_bind_to_gtt(obj, alignment);
4079 obj_priv->pin_count++;
4081 /* If the object is not active and not pending a flush,
4082 * remove it from the inactive list
4084 if (obj_priv->pin_count == 1) {
4085 atomic_inc(&dev->pin_count);
4086 atomic_add(obj->size, &dev->pin_memory);
4087 if (!obj_priv->active &&
4088 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
4089 list_del_init(&obj_priv->list);
4091 i915_verify_inactive(dev, __FILE__, __LINE__);
4097 i915_gem_object_unpin(struct drm_gem_object *obj)
4099 struct drm_device *dev = obj->dev;
4100 drm_i915_private_t *dev_priv = dev->dev_private;
4101 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4103 i915_verify_inactive(dev, __FILE__, __LINE__);
4104 obj_priv->pin_count--;
4105 BUG_ON(obj_priv->pin_count < 0);
4106 BUG_ON(obj_priv->gtt_space == NULL);
4108 /* If the object is no longer pinned, and is
4109 * neither active nor being flushed, then stick it on
4112 if (obj_priv->pin_count == 0) {
4113 if (!obj_priv->active &&
4114 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
4115 list_move_tail(&obj_priv->list,
4116 &dev_priv->mm.inactive_list);
4117 atomic_dec(&dev->pin_count);
4118 atomic_sub(obj->size, &dev->pin_memory);
4120 i915_verify_inactive(dev, __FILE__, __LINE__);
4124 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
4125 struct drm_file *file_priv)
4127 struct drm_i915_gem_pin *args = data;
4128 struct drm_gem_object *obj;
4129 struct drm_i915_gem_object *obj_priv;
4132 mutex_lock(&dev->struct_mutex);
4134 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4136 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
4138 mutex_unlock(&dev->struct_mutex);
4141 obj_priv = to_intel_bo(obj);
4143 if (obj_priv->madv != I915_MADV_WILLNEED) {
4144 DRM_ERROR("Attempting to pin a purgeable buffer\n");
4145 drm_gem_object_unreference(obj);
4146 mutex_unlock(&dev->struct_mutex);
4150 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
4151 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
4153 drm_gem_object_unreference(obj);
4154 mutex_unlock(&dev->struct_mutex);
4158 obj_priv->user_pin_count++;
4159 obj_priv->pin_filp = file_priv;
4160 if (obj_priv->user_pin_count == 1) {
4161 ret = i915_gem_object_pin(obj, args->alignment);
4163 drm_gem_object_unreference(obj);
4164 mutex_unlock(&dev->struct_mutex);
4169 /* XXX - flush the CPU caches for pinned objects
4170 * as the X server doesn't manage domains yet
4172 i915_gem_object_flush_cpu_write_domain(obj);
4173 args->offset = obj_priv->gtt_offset;
4174 drm_gem_object_unreference(obj);
4175 mutex_unlock(&dev->struct_mutex);
4181 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
4182 struct drm_file *file_priv)
4184 struct drm_i915_gem_pin *args = data;
4185 struct drm_gem_object *obj;
4186 struct drm_i915_gem_object *obj_priv;
4188 mutex_lock(&dev->struct_mutex);
4190 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4192 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
4194 mutex_unlock(&dev->struct_mutex);
4198 obj_priv = to_intel_bo(obj);
4199 if (obj_priv->pin_filp != file_priv) {
4200 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
4202 drm_gem_object_unreference(obj);
4203 mutex_unlock(&dev->struct_mutex);
4206 obj_priv->user_pin_count--;
4207 if (obj_priv->user_pin_count == 0) {
4208 obj_priv->pin_filp = NULL;
4209 i915_gem_object_unpin(obj);
4212 drm_gem_object_unreference(obj);
4213 mutex_unlock(&dev->struct_mutex);
4218 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4219 struct drm_file *file_priv)
4221 struct drm_i915_gem_busy *args = data;
4222 struct drm_gem_object *obj;
4223 struct drm_i915_gem_object *obj_priv;
4225 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4227 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
4232 mutex_lock(&dev->struct_mutex);
4234 /* Count all active objects as busy, even if they are currently not used
4235 * by the gpu. Users of this interface expect objects to eventually
4236 * become non-busy without any further actions, therefore emit any
4237 * necessary flushes here.
4239 obj_priv = to_intel_bo(obj);
4240 args->busy = obj_priv->active;
4242 /* Unconditionally flush objects, even when the gpu still uses this
4243 * object. Userspace calling this function indicates that it wants to
4244 * use this buffer rather sooner than later, so issuing the required
4245 * flush earlier is beneficial.
4247 if (obj->write_domain) {
4248 i915_gem_flush(dev, 0, obj->write_domain);
4249 (void)i915_add_request(dev, file_priv, obj->write_domain, obj_priv->ring);
4252 /* Update the active list for the hardware's current position.
4253 * Otherwise this only updates on a delayed timer or when irqs
4254 * are actually unmasked, and our working set ends up being
4255 * larger than required.
4257 i915_gem_retire_requests_ring(dev, obj_priv->ring);
4259 args->busy = obj_priv->active;
4262 drm_gem_object_unreference(obj);
4263 mutex_unlock(&dev->struct_mutex);
4268 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4269 struct drm_file *file_priv)
4271 return i915_gem_ring_throttle(dev, file_priv);
4275 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4276 struct drm_file *file_priv)
4278 struct drm_i915_gem_madvise *args = data;
4279 struct drm_gem_object *obj;
4280 struct drm_i915_gem_object *obj_priv;
4282 switch (args->madv) {
4283 case I915_MADV_DONTNEED:
4284 case I915_MADV_WILLNEED:
4290 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4292 DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
4297 mutex_lock(&dev->struct_mutex);
4298 obj_priv = to_intel_bo(obj);
4300 if (obj_priv->pin_count) {
4301 drm_gem_object_unreference(obj);
4302 mutex_unlock(&dev->struct_mutex);
4304 DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
4308 if (obj_priv->madv != __I915_MADV_PURGED)
4309 obj_priv->madv = args->madv;
4311 /* if the object is no longer bound, discard its backing storage */
4312 if (i915_gem_object_is_purgeable(obj_priv) &&
4313 obj_priv->gtt_space == NULL)
4314 i915_gem_object_truncate(obj);
4316 args->retained = obj_priv->madv != __I915_MADV_PURGED;
4318 drm_gem_object_unreference(obj);
4319 mutex_unlock(&dev->struct_mutex);
4324 struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
4327 struct drm_i915_gem_object *obj;
4329 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
4333 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4338 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4339 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4341 obj->agp_type = AGP_USER_MEMORY;
4342 obj->base.driver_private = NULL;
4343 obj->fence_reg = I915_FENCE_REG_NONE;
4344 INIT_LIST_HEAD(&obj->list);
4345 INIT_LIST_HEAD(&obj->gpu_write_list);
4346 obj->madv = I915_MADV_WILLNEED;
4348 trace_i915_gem_object_create(&obj->base);
4353 int i915_gem_init_object(struct drm_gem_object *obj)
4360 static void i915_gem_free_object_tail(struct drm_gem_object *obj)
4362 struct drm_device *dev = obj->dev;
4363 drm_i915_private_t *dev_priv = dev->dev_private;
4364 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4367 ret = i915_gem_object_unbind(obj);
4368 if (ret == -ERESTARTSYS) {
4369 list_move(&obj_priv->list,
4370 &dev_priv->mm.deferred_free_list);
4374 if (obj_priv->mmap_offset)
4375 i915_gem_free_mmap_offset(obj);
4377 drm_gem_object_release(obj);
4379 kfree(obj_priv->page_cpu_valid);
4380 kfree(obj_priv->bit_17);
4384 void i915_gem_free_object(struct drm_gem_object *obj)
4386 struct drm_device *dev = obj->dev;
4387 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4389 trace_i915_gem_object_destroy(obj);
4391 while (obj_priv->pin_count > 0)
4392 i915_gem_object_unpin(obj);
4394 if (obj_priv->phys_obj)
4395 i915_gem_detach_phys_object(dev, obj);
4397 i915_gem_free_object_tail(obj);
4401 i915_gem_idle(struct drm_device *dev)
4403 drm_i915_private_t *dev_priv = dev->dev_private;
4406 mutex_lock(&dev->struct_mutex);
4408 if (dev_priv->mm.suspended ||
4409 (dev_priv->render_ring.gem_object == NULL) ||
4411 dev_priv->bsd_ring.gem_object == NULL)) {
4412 mutex_unlock(&dev->struct_mutex);
4416 ret = i915_gpu_idle(dev);
4418 mutex_unlock(&dev->struct_mutex);
4422 /* Under UMS, be paranoid and evict. */
4423 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
4424 ret = i915_gem_evict_inactive(dev);
4426 mutex_unlock(&dev->struct_mutex);
4431 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4432 * We need to replace this with a semaphore, or something.
4433 * And not confound mm.suspended!
4435 dev_priv->mm.suspended = 1;
4436 del_timer(&dev_priv->hangcheck_timer);
4438 i915_kernel_lost_context(dev);
4439 i915_gem_cleanup_ringbuffer(dev);
4441 mutex_unlock(&dev->struct_mutex);
4443 /* Cancel the retire work handler, which should be idle now. */
4444 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4450 * 965+ support PIPE_CONTROL commands, which provide finer grained control
4451 * over cache flushing.
4454 i915_gem_init_pipe_control(struct drm_device *dev)
4456 drm_i915_private_t *dev_priv = dev->dev_private;
4457 struct drm_gem_object *obj;
4458 struct drm_i915_gem_object *obj_priv;
4461 obj = i915_gem_alloc_object(dev, 4096);
4463 DRM_ERROR("Failed to allocate seqno page\n");
4467 obj_priv = to_intel_bo(obj);
4468 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
4470 ret = i915_gem_object_pin(obj, 4096);
4474 dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
4475 dev_priv->seqno_page = kmap(obj_priv->pages[0]);
4476 if (dev_priv->seqno_page == NULL)
4479 dev_priv->seqno_obj = obj;
4480 memset(dev_priv->seqno_page, 0, PAGE_SIZE);
4485 i915_gem_object_unpin(obj);
4487 drm_gem_object_unreference(obj);
4494 i915_gem_cleanup_pipe_control(struct drm_device *dev)
4496 drm_i915_private_t *dev_priv = dev->dev_private;
4497 struct drm_gem_object *obj;
4498 struct drm_i915_gem_object *obj_priv;
4500 obj = dev_priv->seqno_obj;
4501 obj_priv = to_intel_bo(obj);
4502 kunmap(obj_priv->pages[0]);
4503 i915_gem_object_unpin(obj);
4504 drm_gem_object_unreference(obj);
4505 dev_priv->seqno_obj = NULL;
4507 dev_priv->seqno_page = NULL;
4511 i915_gem_init_ringbuffer(struct drm_device *dev)
4513 drm_i915_private_t *dev_priv = dev->dev_private;
4516 dev_priv->render_ring = render_ring;
4518 if (!I915_NEED_GFX_HWS(dev)) {
4519 dev_priv->render_ring.status_page.page_addr
4520 = dev_priv->status_page_dmah->vaddr;
4521 memset(dev_priv->render_ring.status_page.page_addr,
4525 if (HAS_PIPE_CONTROL(dev)) {
4526 ret = i915_gem_init_pipe_control(dev);
4531 ret = intel_init_ring_buffer(dev, &dev_priv->render_ring);
4533 goto cleanup_pipe_control;
4536 dev_priv->bsd_ring = bsd_ring;
4537 ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
4539 goto cleanup_render_ring;
4542 dev_priv->next_seqno = 1;
4546 cleanup_render_ring:
4547 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
4548 cleanup_pipe_control:
4549 if (HAS_PIPE_CONTROL(dev))
4550 i915_gem_cleanup_pipe_control(dev);
4555 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4557 drm_i915_private_t *dev_priv = dev->dev_private;
4559 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
4561 intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
4562 if (HAS_PIPE_CONTROL(dev))
4563 i915_gem_cleanup_pipe_control(dev);
4567 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4568 struct drm_file *file_priv)
4570 drm_i915_private_t *dev_priv = dev->dev_private;
4573 if (drm_core_check_feature(dev, DRIVER_MODESET))
4576 if (atomic_read(&dev_priv->mm.wedged)) {
4577 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4578 atomic_set(&dev_priv->mm.wedged, 0);
4581 mutex_lock(&dev->struct_mutex);
4582 dev_priv->mm.suspended = 0;
4584 ret = i915_gem_init_ringbuffer(dev);
4586 mutex_unlock(&dev->struct_mutex);
4590 spin_lock(&dev_priv->mm.active_list_lock);
4591 BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
4592 BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list));
4593 spin_unlock(&dev_priv->mm.active_list_lock);
4595 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
4596 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
4597 BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
4598 BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list));
4599 mutex_unlock(&dev->struct_mutex);
4601 ret = drm_irq_install(dev);
4603 goto cleanup_ringbuffer;
4608 mutex_lock(&dev->struct_mutex);
4609 i915_gem_cleanup_ringbuffer(dev);
4610 dev_priv->mm.suspended = 1;
4611 mutex_unlock(&dev->struct_mutex);
4617 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4618 struct drm_file *file_priv)
4620 if (drm_core_check_feature(dev, DRIVER_MODESET))
4623 drm_irq_uninstall(dev);
4624 return i915_gem_idle(dev);
4628 i915_gem_lastclose(struct drm_device *dev)
4632 if (drm_core_check_feature(dev, DRIVER_MODESET))
4635 ret = i915_gem_idle(dev);
4637 DRM_ERROR("failed to idle hardware: %d\n", ret);
4641 i915_gem_load(struct drm_device *dev)
4644 drm_i915_private_t *dev_priv = dev->dev_private;
4646 spin_lock_init(&dev_priv->mm.active_list_lock);
4647 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4648 INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
4649 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4650 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4651 INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
4652 INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
4653 INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
4655 INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list);
4656 INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list);
4658 for (i = 0; i < 16; i++)
4659 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4660 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4661 i915_gem_retire_work_handler);
4662 spin_lock(&shrink_list_lock);
4663 list_add(&dev_priv->mm.shrink_list, &shrink_list);
4664 spin_unlock(&shrink_list_lock);
4666 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4668 u32 tmp = I915_READ(MI_ARB_STATE);
4669 if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
4670 /* arb state is a masked write, so set bit + bit in mask */
4671 tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
4672 I915_WRITE(MI_ARB_STATE, tmp);
4676 /* Old X drivers will take 0-2 for front, back, depth buffers */
4677 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4678 dev_priv->fence_reg_start = 3;
4680 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4681 dev_priv->num_fence_regs = 16;
4683 dev_priv->num_fence_regs = 8;
4685 /* Initialize fence registers to zero */
4686 if (IS_I965G(dev)) {
4687 for (i = 0; i < 16; i++)
4688 I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
4690 for (i = 0; i < 8; i++)
4691 I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
4692 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4693 for (i = 0; i < 8; i++)
4694 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
4696 i915_gem_detect_bit_6_swizzle(dev);
4697 init_waitqueue_head(&dev_priv->pending_flip_queue);
4701 * Create a physically contiguous memory object for this object
4702 * e.g. for cursor + overlay regs
4704 int i915_gem_init_phys_object(struct drm_device *dev,
4705 int id, int size, int align)
4707 drm_i915_private_t *dev_priv = dev->dev_private;
4708 struct drm_i915_gem_phys_object *phys_obj;
4711 if (dev_priv->mm.phys_objs[id - 1] || !size)
4714 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
4720 phys_obj->handle = drm_pci_alloc(dev, size, align);
4721 if (!phys_obj->handle) {
4726 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4729 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4737 void i915_gem_free_phys_object(struct drm_device *dev, int id)
4739 drm_i915_private_t *dev_priv = dev->dev_private;
4740 struct drm_i915_gem_phys_object *phys_obj;
4742 if (!dev_priv->mm.phys_objs[id - 1])
4745 phys_obj = dev_priv->mm.phys_objs[id - 1];
4746 if (phys_obj->cur_obj) {
4747 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4751 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4753 drm_pci_free(dev, phys_obj->handle);
4755 dev_priv->mm.phys_objs[id - 1] = NULL;
4758 void i915_gem_free_all_phys_object(struct drm_device *dev)
4762 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4763 i915_gem_free_phys_object(dev, i);
4766 void i915_gem_detach_phys_object(struct drm_device *dev,
4767 struct drm_gem_object *obj)
4769 struct drm_i915_gem_object *obj_priv;
4774 obj_priv = to_intel_bo(obj);
4775 if (!obj_priv->phys_obj)
4778 ret = i915_gem_object_get_pages(obj, 0);
4782 page_count = obj->size / PAGE_SIZE;
4784 for (i = 0; i < page_count; i++) {
4785 char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
4786 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4788 memcpy(dst, src, PAGE_SIZE);
4789 kunmap_atomic(dst, KM_USER0);
4791 drm_clflush_pages(obj_priv->pages, page_count);
4792 drm_agp_chipset_flush(dev);
4794 i915_gem_object_put_pages(obj);
4796 obj_priv->phys_obj->cur_obj = NULL;
4797 obj_priv->phys_obj = NULL;
4801 i915_gem_attach_phys_object(struct drm_device *dev,
4802 struct drm_gem_object *obj,
4806 drm_i915_private_t *dev_priv = dev->dev_private;
4807 struct drm_i915_gem_object *obj_priv;
4812 if (id > I915_MAX_PHYS_OBJECT)
4815 obj_priv = to_intel_bo(obj);
4817 if (obj_priv->phys_obj) {
4818 if (obj_priv->phys_obj->id == id)
4820 i915_gem_detach_phys_object(dev, obj);
4823 /* create a new object */
4824 if (!dev_priv->mm.phys_objs[id - 1]) {
4825 ret = i915_gem_init_phys_object(dev, id,
4828 DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
4833 /* bind to the object */
4834 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
4835 obj_priv->phys_obj->cur_obj = obj;
4837 ret = i915_gem_object_get_pages(obj, 0);
4839 DRM_ERROR("failed to get page list\n");
4843 page_count = obj->size / PAGE_SIZE;
4845 for (i = 0; i < page_count; i++) {
4846 char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
4847 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4849 memcpy(dst, src, PAGE_SIZE);
4850 kunmap_atomic(src, KM_USER0);
4853 i915_gem_object_put_pages(obj);
4861 i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4862 struct drm_i915_gem_pwrite *args,
4863 struct drm_file *file_priv)
4865 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4868 char __user *user_data;
4870 user_data = (char __user *) (uintptr_t) args->data_ptr;
4871 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
4873 DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
4874 ret = copy_from_user(obj_addr, user_data, args->size);
4878 drm_agp_chipset_flush(dev);
4882 void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
4884 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
4886 /* Clean up our request list when the client is going away, so that
4887 * later retire_requests won't dereference our soon-to-be-gone
4890 mutex_lock(&dev->struct_mutex);
4891 while (!list_empty(&i915_file_priv->mm.request_list))
4892 list_del_init(i915_file_priv->mm.request_list.next);
4893 mutex_unlock(&dev->struct_mutex);
4897 i915_gpu_is_active(struct drm_device *dev)
4899 drm_i915_private_t *dev_priv = dev->dev_private;
4902 spin_lock(&dev_priv->mm.active_list_lock);
4903 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
4904 list_empty(&dev_priv->render_ring.active_list);
4906 lists_empty &= list_empty(&dev_priv->bsd_ring.active_list);
4907 spin_unlock(&dev_priv->mm.active_list_lock);
4909 return !lists_empty;
4913 i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
4915 drm_i915_private_t *dev_priv, *next_dev;
4916 struct drm_i915_gem_object *obj_priv, *next_obj;
4918 int would_deadlock = 1;
4920 /* "fast-path" to count number of available objects */
4921 if (nr_to_scan == 0) {
4922 spin_lock(&shrink_list_lock);
4923 list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
4924 struct drm_device *dev = dev_priv->dev;
4926 if (mutex_trylock(&dev->struct_mutex)) {
4927 list_for_each_entry(obj_priv,
4928 &dev_priv->mm.inactive_list,
4931 mutex_unlock(&dev->struct_mutex);
4934 spin_unlock(&shrink_list_lock);
4936 return (cnt / 100) * sysctl_vfs_cache_pressure;
4939 spin_lock(&shrink_list_lock);
4942 /* first scan for clean buffers */
4943 list_for_each_entry_safe(dev_priv, next_dev,
4944 &shrink_list, mm.shrink_list) {
4945 struct drm_device *dev = dev_priv->dev;
4947 if (! mutex_trylock(&dev->struct_mutex))
4950 spin_unlock(&shrink_list_lock);
4951 i915_gem_retire_requests(dev);
4953 list_for_each_entry_safe(obj_priv, next_obj,
4954 &dev_priv->mm.inactive_list,
4956 if (i915_gem_object_is_purgeable(obj_priv)) {
4957 i915_gem_object_unbind(&obj_priv->base);
4958 if (--nr_to_scan <= 0)
4963 spin_lock(&shrink_list_lock);
4964 mutex_unlock(&dev->struct_mutex);
4968 if (nr_to_scan <= 0)
4972 /* second pass, evict/count anything still on the inactive list */
4973 list_for_each_entry_safe(dev_priv, next_dev,
4974 &shrink_list, mm.shrink_list) {
4975 struct drm_device *dev = dev_priv->dev;
4977 if (! mutex_trylock(&dev->struct_mutex))
4980 spin_unlock(&shrink_list_lock);
4982 list_for_each_entry_safe(obj_priv, next_obj,
4983 &dev_priv->mm.inactive_list,
4985 if (nr_to_scan > 0) {
4986 i915_gem_object_unbind(&obj_priv->base);
4992 spin_lock(&shrink_list_lock);
4993 mutex_unlock(&dev->struct_mutex);
5002 * We are desperate for pages, so as a last resort, wait
5003 * for the GPU to finish and discard whatever we can.
5004 * This has a dramatic impact to reduce the number of
5005 * OOM-killer events whilst running the GPU aggressively.
5007 list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
5008 struct drm_device *dev = dev_priv->dev;
5010 if (!mutex_trylock(&dev->struct_mutex))
5013 spin_unlock(&shrink_list_lock);
5015 if (i915_gpu_is_active(dev)) {
5020 spin_lock(&shrink_list_lock);
5021 mutex_unlock(&dev->struct_mutex);
5028 spin_unlock(&shrink_list_lock);
5033 return (cnt / 100) * sysctl_vfs_cache_pressure;
5038 static struct shrinker shrinker = {
5039 .shrink = i915_gem_shrink,
5040 .seeks = DEFAULT_SEEKS,
5044 i915_gem_shrinker_init(void)
5046 register_shrinker(&shrinker);
5050 i915_gem_shrinker_exit(void)
5052 unregister_shrinker(&shrinker);