]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/gpu/drm/i915/i915_gem.c
drm/i915: Propagate error from i915_gem_object_flush_gpu_write_domain()
[net-next-2.6.git] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include "drmP.h"
29 #include "drm.h"
30 #include "i915_drm.h"
31 #include "i915_drv.h"
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/slab.h>
35 #include <linux/swap.h>
36 #include <linux/pci.h>
37
38 static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
39 static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
40 static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
41 static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
42                                              int write);
43 static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
44                                                      uint64_t offset,
45                                                      uint64_t size);
46 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
47 static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
48 static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
49                                            unsigned alignment);
50 static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
51 static int i915_gem_evict_something(struct drm_device *dev, int min_size);
52 static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
53 static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
54                                 struct drm_i915_gem_pwrite *args,
55                                 struct drm_file *file_priv);
56
57 static LIST_HEAD(shrink_list);
58 static DEFINE_SPINLOCK(shrink_list_lock);
59
60 int i915_gem_do_init(struct drm_device *dev, unsigned long start,
61                      unsigned long end)
62 {
63         drm_i915_private_t *dev_priv = dev->dev_private;
64
65         if (start >= end ||
66             (start & (PAGE_SIZE - 1)) != 0 ||
67             (end & (PAGE_SIZE - 1)) != 0) {
68                 return -EINVAL;
69         }
70
71         drm_mm_init(&dev_priv->mm.gtt_space, start,
72                     end - start);
73
74         dev->gtt_total = (uint32_t) (end - start);
75
76         return 0;
77 }
78
79 int
80 i915_gem_init_ioctl(struct drm_device *dev, void *data,
81                     struct drm_file *file_priv)
82 {
83         struct drm_i915_gem_init *args = data;
84         int ret;
85
86         mutex_lock(&dev->struct_mutex);
87         ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
88         mutex_unlock(&dev->struct_mutex);
89
90         return ret;
91 }
92
93 int
94 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
95                             struct drm_file *file_priv)
96 {
97         struct drm_i915_gem_get_aperture *args = data;
98
99         if (!(dev->driver->driver_features & DRIVER_GEM))
100                 return -ENODEV;
101
102         args->aper_size = dev->gtt_total;
103         args->aper_available_size = (args->aper_size -
104                                      atomic_read(&dev->pin_memory));
105
106         return 0;
107 }
108
109
110 /**
111  * Creates a new mm object and returns a handle to it.
112  */
113 int
114 i915_gem_create_ioctl(struct drm_device *dev, void *data,
115                       struct drm_file *file_priv)
116 {
117         struct drm_i915_gem_create *args = data;
118         struct drm_gem_object *obj;
119         int ret;
120         u32 handle;
121
122         args->size = roundup(args->size, PAGE_SIZE);
123
124         /* Allocate the new object */
125         obj = i915_gem_alloc_object(dev, args->size);
126         if (obj == NULL)
127                 return -ENOMEM;
128
129         ret = drm_gem_handle_create(file_priv, obj, &handle);
130         drm_gem_object_handle_unreference_unlocked(obj);
131
132         if (ret)
133                 return ret;
134
135         args->handle = handle;
136
137         return 0;
138 }
139
140 static inline int
141 fast_shmem_read(struct page **pages,
142                 loff_t page_base, int page_offset,
143                 char __user *data,
144                 int length)
145 {
146         char __iomem *vaddr;
147         int unwritten;
148
149         vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
150         if (vaddr == NULL)
151                 return -ENOMEM;
152         unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
153         kunmap_atomic(vaddr, KM_USER0);
154
155         if (unwritten)
156                 return -EFAULT;
157
158         return 0;
159 }
160
161 static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
162 {
163         drm_i915_private_t *dev_priv = obj->dev->dev_private;
164         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
165
166         return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
167                 obj_priv->tiling_mode != I915_TILING_NONE;
168 }
169
170 static inline void
171 slow_shmem_copy(struct page *dst_page,
172                 int dst_offset,
173                 struct page *src_page,
174                 int src_offset,
175                 int length)
176 {
177         char *dst_vaddr, *src_vaddr;
178
179         dst_vaddr = kmap(dst_page);
180         src_vaddr = kmap(src_page);
181
182         memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
183
184         kunmap(src_page);
185         kunmap(dst_page);
186 }
187
188 static inline void
189 slow_shmem_bit17_copy(struct page *gpu_page,
190                       int gpu_offset,
191                       struct page *cpu_page,
192                       int cpu_offset,
193                       int length,
194                       int is_read)
195 {
196         char *gpu_vaddr, *cpu_vaddr;
197
198         /* Use the unswizzled path if this page isn't affected. */
199         if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
200                 if (is_read)
201                         return slow_shmem_copy(cpu_page, cpu_offset,
202                                                gpu_page, gpu_offset, length);
203                 else
204                         return slow_shmem_copy(gpu_page, gpu_offset,
205                                                cpu_page, cpu_offset, length);
206         }
207
208         gpu_vaddr = kmap(gpu_page);
209         cpu_vaddr = kmap(cpu_page);
210
211         /* Copy the data, XORing A6 with A17 (1). The user already knows he's
212          * XORing with the other bits (A9 for Y, A9 and A10 for X)
213          */
214         while (length > 0) {
215                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
216                 int this_length = min(cacheline_end - gpu_offset, length);
217                 int swizzled_gpu_offset = gpu_offset ^ 64;
218
219                 if (is_read) {
220                         memcpy(cpu_vaddr + cpu_offset,
221                                gpu_vaddr + swizzled_gpu_offset,
222                                this_length);
223                 } else {
224                         memcpy(gpu_vaddr + swizzled_gpu_offset,
225                                cpu_vaddr + cpu_offset,
226                                this_length);
227                 }
228                 cpu_offset += this_length;
229                 gpu_offset += this_length;
230                 length -= this_length;
231         }
232
233         kunmap(cpu_page);
234         kunmap(gpu_page);
235 }
236
237 /**
238  * This is the fast shmem pread path, which attempts to copy_from_user directly
239  * from the backing pages of the object to the user's address space.  On a
240  * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
241  */
242 static int
243 i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
244                           struct drm_i915_gem_pread *args,
245                           struct drm_file *file_priv)
246 {
247         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
248         ssize_t remain;
249         loff_t offset, page_base;
250         char __user *user_data;
251         int page_offset, page_length;
252         int ret;
253
254         user_data = (char __user *) (uintptr_t) args->data_ptr;
255         remain = args->size;
256
257         mutex_lock(&dev->struct_mutex);
258
259         ret = i915_gem_object_get_pages(obj, 0);
260         if (ret != 0)
261                 goto fail_unlock;
262
263         ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
264                                                         args->size);
265         if (ret != 0)
266                 goto fail_put_pages;
267
268         obj_priv = to_intel_bo(obj);
269         offset = args->offset;
270
271         while (remain > 0) {
272                 /* Operation in this page
273                  *
274                  * page_base = page offset within aperture
275                  * page_offset = offset within page
276                  * page_length = bytes to copy for this page
277                  */
278                 page_base = (offset & ~(PAGE_SIZE-1));
279                 page_offset = offset & (PAGE_SIZE-1);
280                 page_length = remain;
281                 if ((page_offset + remain) > PAGE_SIZE)
282                         page_length = PAGE_SIZE - page_offset;
283
284                 ret = fast_shmem_read(obj_priv->pages,
285                                       page_base, page_offset,
286                                       user_data, page_length);
287                 if (ret)
288                         goto fail_put_pages;
289
290                 remain -= page_length;
291                 user_data += page_length;
292                 offset += page_length;
293         }
294
295 fail_put_pages:
296         i915_gem_object_put_pages(obj);
297 fail_unlock:
298         mutex_unlock(&dev->struct_mutex);
299
300         return ret;
301 }
302
303 static int
304 i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
305 {
306         int ret;
307
308         ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
309
310         /* If we've insufficient memory to map in the pages, attempt
311          * to make some space by throwing out some old buffers.
312          */
313         if (ret == -ENOMEM) {
314                 struct drm_device *dev = obj->dev;
315
316                 ret = i915_gem_evict_something(dev, obj->size);
317                 if (ret)
318                         return ret;
319
320                 ret = i915_gem_object_get_pages(obj, 0);
321         }
322
323         return ret;
324 }
325
326 /**
327  * This is the fallback shmem pread path, which allocates temporary storage
328  * in kernel space to copy_to_user into outside of the struct_mutex, so we
329  * can copy out of the object's backing pages while holding the struct mutex
330  * and not take page faults.
331  */
332 static int
333 i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
334                           struct drm_i915_gem_pread *args,
335                           struct drm_file *file_priv)
336 {
337         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
338         struct mm_struct *mm = current->mm;
339         struct page **user_pages;
340         ssize_t remain;
341         loff_t offset, pinned_pages, i;
342         loff_t first_data_page, last_data_page, num_pages;
343         int shmem_page_index, shmem_page_offset;
344         int data_page_index,  data_page_offset;
345         int page_length;
346         int ret;
347         uint64_t data_ptr = args->data_ptr;
348         int do_bit17_swizzling;
349
350         remain = args->size;
351
352         /* Pin the user pages containing the data.  We can't fault while
353          * holding the struct mutex, yet we want to hold it while
354          * dereferencing the user data.
355          */
356         first_data_page = data_ptr / PAGE_SIZE;
357         last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
358         num_pages = last_data_page - first_data_page + 1;
359
360         user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
361         if (user_pages == NULL)
362                 return -ENOMEM;
363
364         down_read(&mm->mmap_sem);
365         pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
366                                       num_pages, 1, 0, user_pages, NULL);
367         up_read(&mm->mmap_sem);
368         if (pinned_pages < num_pages) {
369                 ret = -EFAULT;
370                 goto fail_put_user_pages;
371         }
372
373         do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
374
375         mutex_lock(&dev->struct_mutex);
376
377         ret = i915_gem_object_get_pages_or_evict(obj);
378         if (ret)
379                 goto fail_unlock;
380
381         ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
382                                                         args->size);
383         if (ret != 0)
384                 goto fail_put_pages;
385
386         obj_priv = to_intel_bo(obj);
387         offset = args->offset;
388
389         while (remain > 0) {
390                 /* Operation in this page
391                  *
392                  * shmem_page_index = page number within shmem file
393                  * shmem_page_offset = offset within page in shmem file
394                  * data_page_index = page number in get_user_pages return
395                  * data_page_offset = offset with data_page_index page.
396                  * page_length = bytes to copy for this page
397                  */
398                 shmem_page_index = offset / PAGE_SIZE;
399                 shmem_page_offset = offset & ~PAGE_MASK;
400                 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
401                 data_page_offset = data_ptr & ~PAGE_MASK;
402
403                 page_length = remain;
404                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
405                         page_length = PAGE_SIZE - shmem_page_offset;
406                 if ((data_page_offset + page_length) > PAGE_SIZE)
407                         page_length = PAGE_SIZE - data_page_offset;
408
409                 if (do_bit17_swizzling) {
410                         slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
411                                               shmem_page_offset,
412                                               user_pages[data_page_index],
413                                               data_page_offset,
414                                               page_length,
415                                               1);
416                 } else {
417                         slow_shmem_copy(user_pages[data_page_index],
418                                         data_page_offset,
419                                         obj_priv->pages[shmem_page_index],
420                                         shmem_page_offset,
421                                         page_length);
422                 }
423
424                 remain -= page_length;
425                 data_ptr += page_length;
426                 offset += page_length;
427         }
428
429 fail_put_pages:
430         i915_gem_object_put_pages(obj);
431 fail_unlock:
432         mutex_unlock(&dev->struct_mutex);
433 fail_put_user_pages:
434         for (i = 0; i < pinned_pages; i++) {
435                 SetPageDirty(user_pages[i]);
436                 page_cache_release(user_pages[i]);
437         }
438         drm_free_large(user_pages);
439
440         return ret;
441 }
442
443 /**
444  * Reads data from the object referenced by handle.
445  *
446  * On error, the contents of *data are undefined.
447  */
448 int
449 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
450                      struct drm_file *file_priv)
451 {
452         struct drm_i915_gem_pread *args = data;
453         struct drm_gem_object *obj;
454         struct drm_i915_gem_object *obj_priv;
455         int ret;
456
457         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
458         if (obj == NULL)
459                 return -EBADF;
460         obj_priv = to_intel_bo(obj);
461
462         /* Bounds check source.
463          *
464          * XXX: This could use review for overflow issues...
465          */
466         if (args->offset > obj->size || args->size > obj->size ||
467             args->offset + args->size > obj->size) {
468                 drm_gem_object_unreference_unlocked(obj);
469                 return -EINVAL;
470         }
471
472         if (i915_gem_object_needs_bit17_swizzle(obj)) {
473                 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
474         } else {
475                 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
476                 if (ret != 0)
477                         ret = i915_gem_shmem_pread_slow(dev, obj, args,
478                                                         file_priv);
479         }
480
481         drm_gem_object_unreference_unlocked(obj);
482
483         return ret;
484 }
485
486 /* This is the fast write path which cannot handle
487  * page faults in the source data
488  */
489
490 static inline int
491 fast_user_write(struct io_mapping *mapping,
492                 loff_t page_base, int page_offset,
493                 char __user *user_data,
494                 int length)
495 {
496         char *vaddr_atomic;
497         unsigned long unwritten;
498
499         vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
500         unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
501                                                       user_data, length);
502         io_mapping_unmap_atomic(vaddr_atomic);
503         if (unwritten)
504                 return -EFAULT;
505         return 0;
506 }
507
508 /* Here's the write path which can sleep for
509  * page faults
510  */
511
512 static inline void
513 slow_kernel_write(struct io_mapping *mapping,
514                   loff_t gtt_base, int gtt_offset,
515                   struct page *user_page, int user_offset,
516                   int length)
517 {
518         char __iomem *dst_vaddr;
519         char *src_vaddr;
520
521         dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
522         src_vaddr = kmap(user_page);
523
524         memcpy_toio(dst_vaddr + gtt_offset,
525                     src_vaddr + user_offset,
526                     length);
527
528         kunmap(user_page);
529         io_mapping_unmap(dst_vaddr);
530 }
531
532 static inline int
533 fast_shmem_write(struct page **pages,
534                  loff_t page_base, int page_offset,
535                  char __user *data,
536                  int length)
537 {
538         char __iomem *vaddr;
539         unsigned long unwritten;
540
541         vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
542         if (vaddr == NULL)
543                 return -ENOMEM;
544         unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
545         kunmap_atomic(vaddr, KM_USER0);
546
547         if (unwritten)
548                 return -EFAULT;
549         return 0;
550 }
551
552 /**
553  * This is the fast pwrite path, where we copy the data directly from the
554  * user into the GTT, uncached.
555  */
556 static int
557 i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
558                          struct drm_i915_gem_pwrite *args,
559                          struct drm_file *file_priv)
560 {
561         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
562         drm_i915_private_t *dev_priv = dev->dev_private;
563         ssize_t remain;
564         loff_t offset, page_base;
565         char __user *user_data;
566         int page_offset, page_length;
567         int ret;
568
569         user_data = (char __user *) (uintptr_t) args->data_ptr;
570         remain = args->size;
571         if (!access_ok(VERIFY_READ, user_data, remain))
572                 return -EFAULT;
573
574
575         mutex_lock(&dev->struct_mutex);
576         ret = i915_gem_object_pin(obj, 0);
577         if (ret) {
578                 mutex_unlock(&dev->struct_mutex);
579                 return ret;
580         }
581         ret = i915_gem_object_set_to_gtt_domain(obj, 1);
582         if (ret)
583                 goto fail;
584
585         obj_priv = to_intel_bo(obj);
586         offset = obj_priv->gtt_offset + args->offset;
587
588         while (remain > 0) {
589                 /* Operation in this page
590                  *
591                  * page_base = page offset within aperture
592                  * page_offset = offset within page
593                  * page_length = bytes to copy for this page
594                  */
595                 page_base = (offset & ~(PAGE_SIZE-1));
596                 page_offset = offset & (PAGE_SIZE-1);
597                 page_length = remain;
598                 if ((page_offset + remain) > PAGE_SIZE)
599                         page_length = PAGE_SIZE - page_offset;
600
601                 ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
602                                        page_offset, user_data, page_length);
603
604                 /* If we get a fault while copying data, then (presumably) our
605                  * source page isn't available.  Return the error and we'll
606                  * retry in the slow path.
607                  */
608                 if (ret)
609                         goto fail;
610
611                 remain -= page_length;
612                 user_data += page_length;
613                 offset += page_length;
614         }
615
616 fail:
617         i915_gem_object_unpin(obj);
618         mutex_unlock(&dev->struct_mutex);
619
620         return ret;
621 }
622
623 /**
624  * This is the fallback GTT pwrite path, which uses get_user_pages to pin
625  * the memory and maps it using kmap_atomic for copying.
626  *
627  * This code resulted in x11perf -rgb10text consuming about 10% more CPU
628  * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
629  */
630 static int
631 i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
632                          struct drm_i915_gem_pwrite *args,
633                          struct drm_file *file_priv)
634 {
635         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
636         drm_i915_private_t *dev_priv = dev->dev_private;
637         ssize_t remain;
638         loff_t gtt_page_base, offset;
639         loff_t first_data_page, last_data_page, num_pages;
640         loff_t pinned_pages, i;
641         struct page **user_pages;
642         struct mm_struct *mm = current->mm;
643         int gtt_page_offset, data_page_offset, data_page_index, page_length;
644         int ret;
645         uint64_t data_ptr = args->data_ptr;
646
647         remain = args->size;
648
649         /* Pin the user pages containing the data.  We can't fault while
650          * holding the struct mutex, and all of the pwrite implementations
651          * want to hold it while dereferencing the user data.
652          */
653         first_data_page = data_ptr / PAGE_SIZE;
654         last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
655         num_pages = last_data_page - first_data_page + 1;
656
657         user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
658         if (user_pages == NULL)
659                 return -ENOMEM;
660
661         down_read(&mm->mmap_sem);
662         pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
663                                       num_pages, 0, 0, user_pages, NULL);
664         up_read(&mm->mmap_sem);
665         if (pinned_pages < num_pages) {
666                 ret = -EFAULT;
667                 goto out_unpin_pages;
668         }
669
670         mutex_lock(&dev->struct_mutex);
671         ret = i915_gem_object_pin(obj, 0);
672         if (ret)
673                 goto out_unlock;
674
675         ret = i915_gem_object_set_to_gtt_domain(obj, 1);
676         if (ret)
677                 goto out_unpin_object;
678
679         obj_priv = to_intel_bo(obj);
680         offset = obj_priv->gtt_offset + args->offset;
681
682         while (remain > 0) {
683                 /* Operation in this page
684                  *
685                  * gtt_page_base = page offset within aperture
686                  * gtt_page_offset = offset within page in aperture
687                  * data_page_index = page number in get_user_pages return
688                  * data_page_offset = offset with data_page_index page.
689                  * page_length = bytes to copy for this page
690                  */
691                 gtt_page_base = offset & PAGE_MASK;
692                 gtt_page_offset = offset & ~PAGE_MASK;
693                 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
694                 data_page_offset = data_ptr & ~PAGE_MASK;
695
696                 page_length = remain;
697                 if ((gtt_page_offset + page_length) > PAGE_SIZE)
698                         page_length = PAGE_SIZE - gtt_page_offset;
699                 if ((data_page_offset + page_length) > PAGE_SIZE)
700                         page_length = PAGE_SIZE - data_page_offset;
701
702                 slow_kernel_write(dev_priv->mm.gtt_mapping,
703                                   gtt_page_base, gtt_page_offset,
704                                   user_pages[data_page_index],
705                                   data_page_offset,
706                                   page_length);
707
708                 remain -= page_length;
709                 offset += page_length;
710                 data_ptr += page_length;
711         }
712
713 out_unpin_object:
714         i915_gem_object_unpin(obj);
715 out_unlock:
716         mutex_unlock(&dev->struct_mutex);
717 out_unpin_pages:
718         for (i = 0; i < pinned_pages; i++)
719                 page_cache_release(user_pages[i]);
720         drm_free_large(user_pages);
721
722         return ret;
723 }
724
725 /**
726  * This is the fast shmem pwrite path, which attempts to directly
727  * copy_from_user into the kmapped pages backing the object.
728  */
729 static int
730 i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
731                            struct drm_i915_gem_pwrite *args,
732                            struct drm_file *file_priv)
733 {
734         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
735         ssize_t remain;
736         loff_t offset, page_base;
737         char __user *user_data;
738         int page_offset, page_length;
739         int ret;
740
741         user_data = (char __user *) (uintptr_t) args->data_ptr;
742         remain = args->size;
743
744         mutex_lock(&dev->struct_mutex);
745
746         ret = i915_gem_object_get_pages(obj, 0);
747         if (ret != 0)
748                 goto fail_unlock;
749
750         ret = i915_gem_object_set_to_cpu_domain(obj, 1);
751         if (ret != 0)
752                 goto fail_put_pages;
753
754         obj_priv = to_intel_bo(obj);
755         offset = args->offset;
756         obj_priv->dirty = 1;
757
758         while (remain > 0) {
759                 /* Operation in this page
760                  *
761                  * page_base = page offset within aperture
762                  * page_offset = offset within page
763                  * page_length = bytes to copy for this page
764                  */
765                 page_base = (offset & ~(PAGE_SIZE-1));
766                 page_offset = offset & (PAGE_SIZE-1);
767                 page_length = remain;
768                 if ((page_offset + remain) > PAGE_SIZE)
769                         page_length = PAGE_SIZE - page_offset;
770
771                 ret = fast_shmem_write(obj_priv->pages,
772                                        page_base, page_offset,
773                                        user_data, page_length);
774                 if (ret)
775                         goto fail_put_pages;
776
777                 remain -= page_length;
778                 user_data += page_length;
779                 offset += page_length;
780         }
781
782 fail_put_pages:
783         i915_gem_object_put_pages(obj);
784 fail_unlock:
785         mutex_unlock(&dev->struct_mutex);
786
787         return ret;
788 }
789
790 /**
791  * This is the fallback shmem pwrite path, which uses get_user_pages to pin
792  * the memory and maps it using kmap_atomic for copying.
793  *
794  * This avoids taking mmap_sem for faulting on the user's address while the
795  * struct_mutex is held.
796  */
797 static int
798 i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
799                            struct drm_i915_gem_pwrite *args,
800                            struct drm_file *file_priv)
801 {
802         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
803         struct mm_struct *mm = current->mm;
804         struct page **user_pages;
805         ssize_t remain;
806         loff_t offset, pinned_pages, i;
807         loff_t first_data_page, last_data_page, num_pages;
808         int shmem_page_index, shmem_page_offset;
809         int data_page_index,  data_page_offset;
810         int page_length;
811         int ret;
812         uint64_t data_ptr = args->data_ptr;
813         int do_bit17_swizzling;
814
815         remain = args->size;
816
817         /* Pin the user pages containing the data.  We can't fault while
818          * holding the struct mutex, and all of the pwrite implementations
819          * want to hold it while dereferencing the user data.
820          */
821         first_data_page = data_ptr / PAGE_SIZE;
822         last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
823         num_pages = last_data_page - first_data_page + 1;
824
825         user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
826         if (user_pages == NULL)
827                 return -ENOMEM;
828
829         down_read(&mm->mmap_sem);
830         pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
831                                       num_pages, 0, 0, user_pages, NULL);
832         up_read(&mm->mmap_sem);
833         if (pinned_pages < num_pages) {
834                 ret = -EFAULT;
835                 goto fail_put_user_pages;
836         }
837
838         do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
839
840         mutex_lock(&dev->struct_mutex);
841
842         ret = i915_gem_object_get_pages_or_evict(obj);
843         if (ret)
844                 goto fail_unlock;
845
846         ret = i915_gem_object_set_to_cpu_domain(obj, 1);
847         if (ret != 0)
848                 goto fail_put_pages;
849
850         obj_priv = to_intel_bo(obj);
851         offset = args->offset;
852         obj_priv->dirty = 1;
853
854         while (remain > 0) {
855                 /* Operation in this page
856                  *
857                  * shmem_page_index = page number within shmem file
858                  * shmem_page_offset = offset within page in shmem file
859                  * data_page_index = page number in get_user_pages return
860                  * data_page_offset = offset with data_page_index page.
861                  * page_length = bytes to copy for this page
862                  */
863                 shmem_page_index = offset / PAGE_SIZE;
864                 shmem_page_offset = offset & ~PAGE_MASK;
865                 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
866                 data_page_offset = data_ptr & ~PAGE_MASK;
867
868                 page_length = remain;
869                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
870                         page_length = PAGE_SIZE - shmem_page_offset;
871                 if ((data_page_offset + page_length) > PAGE_SIZE)
872                         page_length = PAGE_SIZE - data_page_offset;
873
874                 if (do_bit17_swizzling) {
875                         slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
876                                               shmem_page_offset,
877                                               user_pages[data_page_index],
878                                               data_page_offset,
879                                               page_length,
880                                               0);
881                 } else {
882                         slow_shmem_copy(obj_priv->pages[shmem_page_index],
883                                         shmem_page_offset,
884                                         user_pages[data_page_index],
885                                         data_page_offset,
886                                         page_length);
887                 }
888
889                 remain -= page_length;
890                 data_ptr += page_length;
891                 offset += page_length;
892         }
893
894 fail_put_pages:
895         i915_gem_object_put_pages(obj);
896 fail_unlock:
897         mutex_unlock(&dev->struct_mutex);
898 fail_put_user_pages:
899         for (i = 0; i < pinned_pages; i++)
900                 page_cache_release(user_pages[i]);
901         drm_free_large(user_pages);
902
903         return ret;
904 }
905
906 /**
907  * Writes data to the object referenced by handle.
908  *
909  * On error, the contents of the buffer that were to be modified are undefined.
910  */
911 int
912 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
913                       struct drm_file *file_priv)
914 {
915         struct drm_i915_gem_pwrite *args = data;
916         struct drm_gem_object *obj;
917         struct drm_i915_gem_object *obj_priv;
918         int ret = 0;
919
920         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
921         if (obj == NULL)
922                 return -EBADF;
923         obj_priv = to_intel_bo(obj);
924
925         /* Bounds check destination.
926          *
927          * XXX: This could use review for overflow issues...
928          */
929         if (args->offset > obj->size || args->size > obj->size ||
930             args->offset + args->size > obj->size) {
931                 drm_gem_object_unreference_unlocked(obj);
932                 return -EINVAL;
933         }
934
935         /* We can only do the GTT pwrite on untiled buffers, as otherwise
936          * it would end up going through the fenced access, and we'll get
937          * different detiling behavior between reading and writing.
938          * pread/pwrite currently are reading and writing from the CPU
939          * perspective, requiring manual detiling by the client.
940          */
941         if (obj_priv->phys_obj)
942                 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
943         else if (obj_priv->tiling_mode == I915_TILING_NONE &&
944                  dev->gtt_total != 0 &&
945                  obj->write_domain != I915_GEM_DOMAIN_CPU) {
946                 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
947                 if (ret == -EFAULT) {
948                         ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
949                                                        file_priv);
950                 }
951         } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
952                 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
953         } else {
954                 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
955                 if (ret == -EFAULT) {
956                         ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
957                                                          file_priv);
958                 }
959         }
960
961 #if WATCH_PWRITE
962         if (ret)
963                 DRM_INFO("pwrite failed %d\n", ret);
964 #endif
965
966         drm_gem_object_unreference_unlocked(obj);
967
968         return ret;
969 }
970
971 /**
972  * Called when user space prepares to use an object with the CPU, either
973  * through the mmap ioctl's mapping or a GTT mapping.
974  */
975 int
976 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
977                           struct drm_file *file_priv)
978 {
979         struct drm_i915_private *dev_priv = dev->dev_private;
980         struct drm_i915_gem_set_domain *args = data;
981         struct drm_gem_object *obj;
982         struct drm_i915_gem_object *obj_priv;
983         uint32_t read_domains = args->read_domains;
984         uint32_t write_domain = args->write_domain;
985         int ret;
986
987         if (!(dev->driver->driver_features & DRIVER_GEM))
988                 return -ENODEV;
989
990         /* Only handle setting domains to types used by the CPU. */
991         if (write_domain & I915_GEM_GPU_DOMAINS)
992                 return -EINVAL;
993
994         if (read_domains & I915_GEM_GPU_DOMAINS)
995                 return -EINVAL;
996
997         /* Having something in the write domain implies it's in the read
998          * domain, and only that read domain.  Enforce that in the request.
999          */
1000         if (write_domain != 0 && read_domains != write_domain)
1001                 return -EINVAL;
1002
1003         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1004         if (obj == NULL)
1005                 return -EBADF;
1006         obj_priv = to_intel_bo(obj);
1007
1008         mutex_lock(&dev->struct_mutex);
1009
1010         intel_mark_busy(dev, obj);
1011
1012 #if WATCH_BUF
1013         DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
1014                  obj, obj->size, read_domains, write_domain);
1015 #endif
1016         if (read_domains & I915_GEM_DOMAIN_GTT) {
1017                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1018
1019                 /* Update the LRU on the fence for the CPU access that's
1020                  * about to occur.
1021                  */
1022                 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
1023                         struct drm_i915_fence_reg *reg =
1024                                 &dev_priv->fence_regs[obj_priv->fence_reg];
1025                         list_move_tail(&reg->lru_list,
1026                                        &dev_priv->mm.fence_list);
1027                 }
1028
1029                 /* Silently promote "you're not bound, there was nothing to do"
1030                  * to success, since the client was just asking us to
1031                  * make sure everything was done.
1032                  */
1033                 if (ret == -EINVAL)
1034                         ret = 0;
1035         } else {
1036                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1037         }
1038
1039         drm_gem_object_unreference(obj);
1040         mutex_unlock(&dev->struct_mutex);
1041         return ret;
1042 }
1043
1044 /**
1045  * Called when user space has done writes to this buffer
1046  */
1047 int
1048 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1049                       struct drm_file *file_priv)
1050 {
1051         struct drm_i915_gem_sw_finish *args = data;
1052         struct drm_gem_object *obj;
1053         struct drm_i915_gem_object *obj_priv;
1054         int ret = 0;
1055
1056         if (!(dev->driver->driver_features & DRIVER_GEM))
1057                 return -ENODEV;
1058
1059         mutex_lock(&dev->struct_mutex);
1060         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1061         if (obj == NULL) {
1062                 mutex_unlock(&dev->struct_mutex);
1063                 return -EBADF;
1064         }
1065
1066 #if WATCH_BUF
1067         DRM_INFO("%s: sw_finish %d (%p %zd)\n",
1068                  __func__, args->handle, obj, obj->size);
1069 #endif
1070         obj_priv = to_intel_bo(obj);
1071
1072         /* Pinned buffers may be scanout, so flush the cache */
1073         if (obj_priv->pin_count)
1074                 i915_gem_object_flush_cpu_write_domain(obj);
1075
1076         drm_gem_object_unreference(obj);
1077         mutex_unlock(&dev->struct_mutex);
1078         return ret;
1079 }
1080
1081 /**
1082  * Maps the contents of an object, returning the address it is mapped
1083  * into.
1084  *
1085  * While the mapping holds a reference on the contents of the object, it doesn't
1086  * imply a ref on the object itself.
1087  */
1088 int
1089 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1090                    struct drm_file *file_priv)
1091 {
1092         struct drm_i915_gem_mmap *args = data;
1093         struct drm_gem_object *obj;
1094         loff_t offset;
1095         unsigned long addr;
1096
1097         if (!(dev->driver->driver_features & DRIVER_GEM))
1098                 return -ENODEV;
1099
1100         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1101         if (obj == NULL)
1102                 return -EBADF;
1103
1104         offset = args->offset;
1105
1106         down_write(&current->mm->mmap_sem);
1107         addr = do_mmap(obj->filp, 0, args->size,
1108                        PROT_READ | PROT_WRITE, MAP_SHARED,
1109                        args->offset);
1110         up_write(&current->mm->mmap_sem);
1111         drm_gem_object_unreference_unlocked(obj);
1112         if (IS_ERR((void *)addr))
1113                 return addr;
1114
1115         args->addr_ptr = (uint64_t) addr;
1116
1117         return 0;
1118 }
1119
1120 /**
1121  * i915_gem_fault - fault a page into the GTT
1122  * vma: VMA in question
1123  * vmf: fault info
1124  *
1125  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1126  * from userspace.  The fault handler takes care of binding the object to
1127  * the GTT (if needed), allocating and programming a fence register (again,
1128  * only if needed based on whether the old reg is still valid or the object
1129  * is tiled) and inserting a new PTE into the faulting process.
1130  *
1131  * Note that the faulting process may involve evicting existing objects
1132  * from the GTT and/or fence registers to make room.  So performance may
1133  * suffer if the GTT working set is large or there are few fence registers
1134  * left.
1135  */
1136 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1137 {
1138         struct drm_gem_object *obj = vma->vm_private_data;
1139         struct drm_device *dev = obj->dev;
1140         struct drm_i915_private *dev_priv = dev->dev_private;
1141         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1142         pgoff_t page_offset;
1143         unsigned long pfn;
1144         int ret = 0;
1145         bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1146
1147         /* We don't use vmf->pgoff since that has the fake offset */
1148         page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1149                 PAGE_SHIFT;
1150
1151         /* Now bind it into the GTT if needed */
1152         mutex_lock(&dev->struct_mutex);
1153         if (!obj_priv->gtt_space) {
1154                 ret = i915_gem_object_bind_to_gtt(obj, 0);
1155                 if (ret)
1156                         goto unlock;
1157
1158                 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1159
1160                 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1161                 if (ret)
1162                         goto unlock;
1163         }
1164
1165         /* Need a new fence register? */
1166         if (obj_priv->tiling_mode != I915_TILING_NONE) {
1167                 ret = i915_gem_object_get_fence_reg(obj);
1168                 if (ret)
1169                         goto unlock;
1170         }
1171
1172         pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
1173                 page_offset;
1174
1175         /* Finally, remap it using the new GTT offset */
1176         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1177 unlock:
1178         mutex_unlock(&dev->struct_mutex);
1179
1180         switch (ret) {
1181         case 0:
1182         case -ERESTARTSYS:
1183                 return VM_FAULT_NOPAGE;
1184         case -ENOMEM:
1185         case -EAGAIN:
1186                 return VM_FAULT_OOM;
1187         default:
1188                 return VM_FAULT_SIGBUS;
1189         }
1190 }
1191
1192 /**
1193  * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1194  * @obj: obj in question
1195  *
1196  * GEM memory mapping works by handing back to userspace a fake mmap offset
1197  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
1198  * up the object based on the offset and sets up the various memory mapping
1199  * structures.
1200  *
1201  * This routine allocates and attaches a fake offset for @obj.
1202  */
1203 static int
1204 i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1205 {
1206         struct drm_device *dev = obj->dev;
1207         struct drm_gem_mm *mm = dev->mm_private;
1208         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1209         struct drm_map_list *list;
1210         struct drm_local_map *map;
1211         int ret = 0;
1212
1213         /* Set the object up for mmap'ing */
1214         list = &obj->map_list;
1215         list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
1216         if (!list->map)
1217                 return -ENOMEM;
1218
1219         map = list->map;
1220         map->type = _DRM_GEM;
1221         map->size = obj->size;
1222         map->handle = obj;
1223
1224         /* Get a DRM GEM mmap offset allocated... */
1225         list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
1226                                                     obj->size / PAGE_SIZE, 0, 0);
1227         if (!list->file_offset_node) {
1228                 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
1229                 ret = -ENOMEM;
1230                 goto out_free_list;
1231         }
1232
1233         list->file_offset_node = drm_mm_get_block(list->file_offset_node,
1234                                                   obj->size / PAGE_SIZE, 0);
1235         if (!list->file_offset_node) {
1236                 ret = -ENOMEM;
1237                 goto out_free_list;
1238         }
1239
1240         list->hash.key = list->file_offset_node->start;
1241         if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
1242                 DRM_ERROR("failed to add to map hash\n");
1243                 ret = -ENOMEM;
1244                 goto out_free_mm;
1245         }
1246
1247         /* By now we should be all set, any drm_mmap request on the offset
1248          * below will get to our mmap & fault handler */
1249         obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
1250
1251         return 0;
1252
1253 out_free_mm:
1254         drm_mm_put_block(list->file_offset_node);
1255 out_free_list:
1256         kfree(list->map);
1257
1258         return ret;
1259 }
1260
1261 /**
1262  * i915_gem_release_mmap - remove physical page mappings
1263  * @obj: obj in question
1264  *
1265  * Preserve the reservation of the mmapping with the DRM core code, but
1266  * relinquish ownership of the pages back to the system.
1267  *
1268  * It is vital that we remove the page mapping if we have mapped a tiled
1269  * object through the GTT and then lose the fence register due to
1270  * resource pressure. Similarly if the object has been moved out of the
1271  * aperture, than pages mapped into userspace must be revoked. Removing the
1272  * mapping will then trigger a page fault on the next user access, allowing
1273  * fixup by i915_gem_fault().
1274  */
1275 void
1276 i915_gem_release_mmap(struct drm_gem_object *obj)
1277 {
1278         struct drm_device *dev = obj->dev;
1279         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1280
1281         if (dev->dev_mapping)
1282                 unmap_mapping_range(dev->dev_mapping,
1283                                     obj_priv->mmap_offset, obj->size, 1);
1284 }
1285
1286 static void
1287 i915_gem_free_mmap_offset(struct drm_gem_object *obj)
1288 {
1289         struct drm_device *dev = obj->dev;
1290         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1291         struct drm_gem_mm *mm = dev->mm_private;
1292         struct drm_map_list *list;
1293
1294         list = &obj->map_list;
1295         drm_ht_remove_item(&mm->offset_hash, &list->hash);
1296
1297         if (list->file_offset_node) {
1298                 drm_mm_put_block(list->file_offset_node);
1299                 list->file_offset_node = NULL;
1300         }
1301
1302         if (list->map) {
1303                 kfree(list->map);
1304                 list->map = NULL;
1305         }
1306
1307         obj_priv->mmap_offset = 0;
1308 }
1309
1310 /**
1311  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1312  * @obj: object to check
1313  *
1314  * Return the required GTT alignment for an object, taking into account
1315  * potential fence register mapping if needed.
1316  */
1317 static uint32_t
1318 i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
1319 {
1320         struct drm_device *dev = obj->dev;
1321         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1322         int start, i;
1323
1324         /*
1325          * Minimum alignment is 4k (GTT page size), but might be greater
1326          * if a fence register is needed for the object.
1327          */
1328         if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
1329                 return 4096;
1330
1331         /*
1332          * Previous chips need to be aligned to the size of the smallest
1333          * fence register that can contain the object.
1334          */
1335         if (IS_I9XX(dev))
1336                 start = 1024*1024;
1337         else
1338                 start = 512*1024;
1339
1340         for (i = start; i < obj->size; i <<= 1)
1341                 ;
1342
1343         return i;
1344 }
1345
1346 /**
1347  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1348  * @dev: DRM device
1349  * @data: GTT mapping ioctl data
1350  * @file_priv: GEM object info
1351  *
1352  * Simply returns the fake offset to userspace so it can mmap it.
1353  * The mmap call will end up in drm_gem_mmap(), which will set things
1354  * up so we can get faults in the handler above.
1355  *
1356  * The fault handler will take care of binding the object into the GTT
1357  * (since it may have been evicted to make room for something), allocating
1358  * a fence register, and mapping the appropriate aperture address into
1359  * userspace.
1360  */
1361 int
1362 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1363                         struct drm_file *file_priv)
1364 {
1365         struct drm_i915_gem_mmap_gtt *args = data;
1366         struct drm_i915_private *dev_priv = dev->dev_private;
1367         struct drm_gem_object *obj;
1368         struct drm_i915_gem_object *obj_priv;
1369         int ret;
1370
1371         if (!(dev->driver->driver_features & DRIVER_GEM))
1372                 return -ENODEV;
1373
1374         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1375         if (obj == NULL)
1376                 return -EBADF;
1377
1378         mutex_lock(&dev->struct_mutex);
1379
1380         obj_priv = to_intel_bo(obj);
1381
1382         if (obj_priv->madv != I915_MADV_WILLNEED) {
1383                 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1384                 drm_gem_object_unreference(obj);
1385                 mutex_unlock(&dev->struct_mutex);
1386                 return -EINVAL;
1387         }
1388
1389
1390         if (!obj_priv->mmap_offset) {
1391                 ret = i915_gem_create_mmap_offset(obj);
1392                 if (ret) {
1393                         drm_gem_object_unreference(obj);
1394                         mutex_unlock(&dev->struct_mutex);
1395                         return ret;
1396                 }
1397         }
1398
1399         args->offset = obj_priv->mmap_offset;
1400
1401         /*
1402          * Pull it into the GTT so that we have a page list (makes the
1403          * initial fault faster and any subsequent flushing possible).
1404          */
1405         if (!obj_priv->agp_mem) {
1406                 ret = i915_gem_object_bind_to_gtt(obj, 0);
1407                 if (ret) {
1408                         drm_gem_object_unreference(obj);
1409                         mutex_unlock(&dev->struct_mutex);
1410                         return ret;
1411                 }
1412                 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1413         }
1414
1415         drm_gem_object_unreference(obj);
1416         mutex_unlock(&dev->struct_mutex);
1417
1418         return 0;
1419 }
1420
1421 void
1422 i915_gem_object_put_pages(struct drm_gem_object *obj)
1423 {
1424         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1425         int page_count = obj->size / PAGE_SIZE;
1426         int i;
1427
1428         BUG_ON(obj_priv->pages_refcount == 0);
1429         BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
1430
1431         if (--obj_priv->pages_refcount != 0)
1432                 return;
1433
1434         if (obj_priv->tiling_mode != I915_TILING_NONE)
1435                 i915_gem_object_save_bit_17_swizzle(obj);
1436
1437         if (obj_priv->madv == I915_MADV_DONTNEED)
1438                 obj_priv->dirty = 0;
1439
1440         for (i = 0; i < page_count; i++) {
1441                 if (obj_priv->dirty)
1442                         set_page_dirty(obj_priv->pages[i]);
1443
1444                 if (obj_priv->madv == I915_MADV_WILLNEED)
1445                         mark_page_accessed(obj_priv->pages[i]);
1446
1447                 page_cache_release(obj_priv->pages[i]);
1448         }
1449         obj_priv->dirty = 0;
1450
1451         drm_free_large(obj_priv->pages);
1452         obj_priv->pages = NULL;
1453 }
1454
1455 static void
1456 i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno,
1457                                struct intel_ring_buffer *ring)
1458 {
1459         struct drm_device *dev = obj->dev;
1460         drm_i915_private_t *dev_priv = dev->dev_private;
1461         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1462         BUG_ON(ring == NULL);
1463         obj_priv->ring = ring;
1464
1465         /* Add a reference if we're newly entering the active list. */
1466         if (!obj_priv->active) {
1467                 drm_gem_object_reference(obj);
1468                 obj_priv->active = 1;
1469         }
1470         /* Move from whatever list we were on to the tail of execution. */
1471         spin_lock(&dev_priv->mm.active_list_lock);
1472         list_move_tail(&obj_priv->list, &ring->active_list);
1473         spin_unlock(&dev_priv->mm.active_list_lock);
1474         obj_priv->last_rendering_seqno = seqno;
1475 }
1476
1477 static void
1478 i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1479 {
1480         struct drm_device *dev = obj->dev;
1481         drm_i915_private_t *dev_priv = dev->dev_private;
1482         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1483
1484         BUG_ON(!obj_priv->active);
1485         list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
1486         obj_priv->last_rendering_seqno = 0;
1487 }
1488
1489 /* Immediately discard the backing storage */
1490 static void
1491 i915_gem_object_truncate(struct drm_gem_object *obj)
1492 {
1493         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1494         struct inode *inode;
1495
1496         inode = obj->filp->f_path.dentry->d_inode;
1497         if (inode->i_op->truncate)
1498                 inode->i_op->truncate (inode);
1499
1500         obj_priv->madv = __I915_MADV_PURGED;
1501 }
1502
1503 static inline int
1504 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
1505 {
1506         return obj_priv->madv == I915_MADV_DONTNEED;
1507 }
1508
1509 static void
1510 i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1511 {
1512         struct drm_device *dev = obj->dev;
1513         drm_i915_private_t *dev_priv = dev->dev_private;
1514         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1515
1516         i915_verify_inactive(dev, __FILE__, __LINE__);
1517         if (obj_priv->pin_count != 0)
1518                 list_del_init(&obj_priv->list);
1519         else
1520                 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1521
1522         BUG_ON(!list_empty(&obj_priv->gpu_write_list));
1523
1524         obj_priv->last_rendering_seqno = 0;
1525         obj_priv->ring = NULL;
1526         if (obj_priv->active) {
1527                 obj_priv->active = 0;
1528                 drm_gem_object_unreference(obj);
1529         }
1530         i915_verify_inactive(dev, __FILE__, __LINE__);
1531 }
1532
1533 static void
1534 i915_gem_process_flushing_list(struct drm_device *dev,
1535                                uint32_t flush_domains, uint32_t seqno,
1536                                struct intel_ring_buffer *ring)
1537 {
1538         drm_i915_private_t *dev_priv = dev->dev_private;
1539         struct drm_i915_gem_object *obj_priv, *next;
1540
1541         list_for_each_entry_safe(obj_priv, next,
1542                                  &dev_priv->mm.gpu_write_list,
1543                                  gpu_write_list) {
1544                 struct drm_gem_object *obj = &obj_priv->base;
1545
1546                 if ((obj->write_domain & flush_domains) ==
1547                     obj->write_domain &&
1548                     obj_priv->ring->ring_flag == ring->ring_flag) {
1549                         uint32_t old_write_domain = obj->write_domain;
1550
1551                         obj->write_domain = 0;
1552                         list_del_init(&obj_priv->gpu_write_list);
1553                         i915_gem_object_move_to_active(obj, seqno, ring);
1554
1555                         /* update the fence lru list */
1556                         if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
1557                                 struct drm_i915_fence_reg *reg =
1558                                         &dev_priv->fence_regs[obj_priv->fence_reg];
1559                                 list_move_tail(&reg->lru_list,
1560                                                 &dev_priv->mm.fence_list);
1561                         }
1562
1563                         trace_i915_gem_object_change_domain(obj,
1564                                                             obj->read_domains,
1565                                                             old_write_domain);
1566                 }
1567         }
1568 }
1569
1570 uint32_t
1571 i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1572                  uint32_t flush_domains, struct intel_ring_buffer *ring)
1573 {
1574         drm_i915_private_t *dev_priv = dev->dev_private;
1575         struct drm_i915_file_private *i915_file_priv = NULL;
1576         struct drm_i915_gem_request *request;
1577         uint32_t seqno;
1578         int was_empty;
1579
1580         if (file_priv != NULL)
1581                 i915_file_priv = file_priv->driver_priv;
1582
1583         request = kzalloc(sizeof(*request), GFP_KERNEL);
1584         if (request == NULL)
1585                 return 0;
1586
1587         seqno = ring->add_request(dev, ring, file_priv, flush_domains);
1588
1589         request->seqno = seqno;
1590         request->ring = ring;
1591         request->emitted_jiffies = jiffies;
1592         was_empty = list_empty(&ring->request_list);
1593         list_add_tail(&request->list, &ring->request_list);
1594
1595         if (i915_file_priv) {
1596                 list_add_tail(&request->client_list,
1597                               &i915_file_priv->mm.request_list);
1598         } else {
1599                 INIT_LIST_HEAD(&request->client_list);
1600         }
1601
1602         /* Associate any objects on the flushing list matching the write
1603          * domain we're flushing with our flush.
1604          */
1605         if (flush_domains != 0) 
1606                 i915_gem_process_flushing_list(dev, flush_domains, seqno, ring);
1607
1608         if (!dev_priv->mm.suspended) {
1609                 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
1610                 if (was_empty)
1611                         queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1612         }
1613         return seqno;
1614 }
1615
1616 /**
1617  * Command execution barrier
1618  *
1619  * Ensures that all commands in the ring are finished
1620  * before signalling the CPU
1621  */
1622 static uint32_t
1623 i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
1624 {
1625         uint32_t flush_domains = 0;
1626
1627         /* The sampler always gets flushed on i965 (sigh) */
1628         if (IS_I965G(dev))
1629                 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
1630
1631         ring->flush(dev, ring,
1632                         I915_GEM_DOMAIN_COMMAND, flush_domains);
1633         return flush_domains;
1634 }
1635
1636 /**
1637  * Moves buffers associated only with the given active seqno from the active
1638  * to inactive list, potentially freeing them.
1639  */
1640 static void
1641 i915_gem_retire_request(struct drm_device *dev,
1642                         struct drm_i915_gem_request *request)
1643 {
1644         drm_i915_private_t *dev_priv = dev->dev_private;
1645
1646         trace_i915_gem_request_retire(dev, request->seqno);
1647
1648         /* Move any buffers on the active list that are no longer referenced
1649          * by the ringbuffer to the flushing/inactive lists as appropriate.
1650          */
1651         spin_lock(&dev_priv->mm.active_list_lock);
1652         while (!list_empty(&request->ring->active_list)) {
1653                 struct drm_gem_object *obj;
1654                 struct drm_i915_gem_object *obj_priv;
1655
1656                 obj_priv = list_first_entry(&request->ring->active_list,
1657                                             struct drm_i915_gem_object,
1658                                             list);
1659                 obj = &obj_priv->base;
1660
1661                 /* If the seqno being retired doesn't match the oldest in the
1662                  * list, then the oldest in the list must still be newer than
1663                  * this seqno.
1664                  */
1665                 if (obj_priv->last_rendering_seqno != request->seqno)
1666                         goto out;
1667
1668 #if WATCH_LRU
1669                 DRM_INFO("%s: retire %d moves to inactive list %p\n",
1670                          __func__, request->seqno, obj);
1671 #endif
1672
1673                 if (obj->write_domain != 0)
1674                         i915_gem_object_move_to_flushing(obj);
1675                 else {
1676                         /* Take a reference on the object so it won't be
1677                          * freed while the spinlock is held.  The list
1678                          * protection for this spinlock is safe when breaking
1679                          * the lock like this since the next thing we do
1680                          * is just get the head of the list again.
1681                          */
1682                         drm_gem_object_reference(obj);
1683                         i915_gem_object_move_to_inactive(obj);
1684                         spin_unlock(&dev_priv->mm.active_list_lock);
1685                         drm_gem_object_unreference(obj);
1686                         spin_lock(&dev_priv->mm.active_list_lock);
1687                 }
1688         }
1689 out:
1690         spin_unlock(&dev_priv->mm.active_list_lock);
1691 }
1692
1693 /**
1694  * Returns true if seq1 is later than seq2.
1695  */
1696 bool
1697 i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1698 {
1699         return (int32_t)(seq1 - seq2) >= 0;
1700 }
1701
1702 uint32_t
1703 i915_get_gem_seqno(struct drm_device *dev,
1704                    struct intel_ring_buffer *ring)
1705 {
1706         return ring->get_gem_seqno(dev, ring);
1707 }
1708
1709 /**
1710  * This function clears the request list as sequence numbers are passed.
1711  */
1712 void
1713 i915_gem_retire_requests(struct drm_device *dev,
1714                 struct intel_ring_buffer *ring)
1715 {
1716         drm_i915_private_t *dev_priv = dev->dev_private;
1717         uint32_t seqno;
1718
1719         if (!ring->status_page.page_addr
1720                         || list_empty(&ring->request_list))
1721                 return;
1722
1723         seqno = i915_get_gem_seqno(dev, ring);
1724
1725         while (!list_empty(&ring->request_list)) {
1726                 struct drm_i915_gem_request *request;
1727                 uint32_t retiring_seqno;
1728
1729                 request = list_first_entry(&ring->request_list,
1730                                            struct drm_i915_gem_request,
1731                                            list);
1732                 retiring_seqno = request->seqno;
1733
1734                 if (i915_seqno_passed(seqno, retiring_seqno) ||
1735                     atomic_read(&dev_priv->mm.wedged)) {
1736                         i915_gem_retire_request(dev, request);
1737
1738                         list_del(&request->list);
1739                         list_del(&request->client_list);
1740                         kfree(request);
1741                 } else
1742                         break;
1743         }
1744
1745         if (unlikely (dev_priv->trace_irq_seqno &&
1746                       i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
1747
1748                 ring->user_irq_put(dev, ring);
1749                 dev_priv->trace_irq_seqno = 0;
1750         }
1751 }
1752
1753 void
1754 i915_gem_retire_work_handler(struct work_struct *work)
1755 {
1756         drm_i915_private_t *dev_priv;
1757         struct drm_device *dev;
1758
1759         dev_priv = container_of(work, drm_i915_private_t,
1760                                 mm.retire_work.work);
1761         dev = dev_priv->dev;
1762
1763         mutex_lock(&dev->struct_mutex);
1764         i915_gem_retire_requests(dev, &dev_priv->render_ring);
1765
1766         if (HAS_BSD(dev))
1767                 i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
1768
1769         if (!dev_priv->mm.suspended &&
1770                 (!list_empty(&dev_priv->render_ring.request_list) ||
1771                         (HAS_BSD(dev) &&
1772                          !list_empty(&dev_priv->bsd_ring.request_list))))
1773                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1774         mutex_unlock(&dev->struct_mutex);
1775 }
1776
1777 int
1778 i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
1779                 int interruptible, struct intel_ring_buffer *ring)
1780 {
1781         drm_i915_private_t *dev_priv = dev->dev_private;
1782         u32 ier;
1783         int ret = 0;
1784
1785         BUG_ON(seqno == 0);
1786
1787         if (atomic_read(&dev_priv->mm.wedged))
1788                 return -EIO;
1789
1790         if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) {
1791                 if (HAS_PCH_SPLIT(dev))
1792                         ier = I915_READ(DEIER) | I915_READ(GTIER);
1793                 else
1794                         ier = I915_READ(IER);
1795                 if (!ier) {
1796                         DRM_ERROR("something (likely vbetool) disabled "
1797                                   "interrupts, re-enabling\n");
1798                         i915_driver_irq_preinstall(dev);
1799                         i915_driver_irq_postinstall(dev);
1800                 }
1801
1802                 trace_i915_gem_request_wait_begin(dev, seqno);
1803
1804                 ring->waiting_gem_seqno = seqno;
1805                 ring->user_irq_get(dev, ring);
1806                 if (interruptible)
1807                         ret = wait_event_interruptible(ring->irq_queue,
1808                                 i915_seqno_passed(
1809                                         ring->get_gem_seqno(dev, ring), seqno)
1810                                 || atomic_read(&dev_priv->mm.wedged));
1811                 else
1812                         wait_event(ring->irq_queue,
1813                                 i915_seqno_passed(
1814                                         ring->get_gem_seqno(dev, ring), seqno)
1815                                 || atomic_read(&dev_priv->mm.wedged));
1816
1817                 ring->user_irq_put(dev, ring);
1818                 ring->waiting_gem_seqno = 0;
1819
1820                 trace_i915_gem_request_wait_end(dev, seqno);
1821         }
1822         if (atomic_read(&dev_priv->mm.wedged))
1823                 ret = -EIO;
1824
1825         if (ret && ret != -ERESTARTSYS)
1826                 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
1827                           __func__, ret, seqno, ring->get_gem_seqno(dev, ring));
1828
1829         /* Directly dispatch request retiring.  While we have the work queue
1830          * to handle this, the waiter on a request often wants an associated
1831          * buffer to have made it to the inactive list, and we would need
1832          * a separate wait queue to handle that.
1833          */
1834         if (ret == 0)
1835                 i915_gem_retire_requests(dev, ring);
1836
1837         return ret;
1838 }
1839
1840 /**
1841  * Waits for a sequence number to be signaled, and cleans up the
1842  * request and object lists appropriately for that event.
1843  */
1844 static int
1845 i915_wait_request(struct drm_device *dev, uint32_t seqno,
1846                 struct intel_ring_buffer *ring)
1847 {
1848         return i915_do_wait_request(dev, seqno, 1, ring);
1849 }
1850
1851 static void
1852 i915_gem_flush(struct drm_device *dev,
1853                uint32_t invalidate_domains,
1854                uint32_t flush_domains)
1855 {
1856         drm_i915_private_t *dev_priv = dev->dev_private;
1857         if (flush_domains & I915_GEM_DOMAIN_CPU)
1858                 drm_agp_chipset_flush(dev);
1859         dev_priv->render_ring.flush(dev, &dev_priv->render_ring,
1860                         invalidate_domains,
1861                         flush_domains);
1862
1863         if (HAS_BSD(dev))
1864                 dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring,
1865                                 invalidate_domains,
1866                                 flush_domains);
1867 }
1868
1869 static void
1870 i915_gem_flush_ring(struct drm_device *dev,
1871                uint32_t invalidate_domains,
1872                uint32_t flush_domains,
1873                struct intel_ring_buffer *ring)
1874 {
1875         if (flush_domains & I915_GEM_DOMAIN_CPU)
1876                 drm_agp_chipset_flush(dev);
1877         ring->flush(dev, ring,
1878                         invalidate_domains,
1879                         flush_domains);
1880 }
1881
1882 /**
1883  * Ensures that all rendering to the object has completed and the object is
1884  * safe to unbind from the GTT or access from the CPU.
1885  */
1886 static int
1887 i915_gem_object_wait_rendering(struct drm_gem_object *obj)
1888 {
1889         struct drm_device *dev = obj->dev;
1890         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1891         int ret;
1892
1893         /* This function only exists to support waiting for existing rendering,
1894          * not for emitting required flushes.
1895          */
1896         BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
1897
1898         /* If there is rendering queued on the buffer being evicted, wait for
1899          * it.
1900          */
1901         if (obj_priv->active) {
1902 #if WATCH_BUF
1903                 DRM_INFO("%s: object %p wait for seqno %08x\n",
1904                           __func__, obj, obj_priv->last_rendering_seqno);
1905 #endif
1906                 ret = i915_wait_request(dev,
1907                                 obj_priv->last_rendering_seqno, obj_priv->ring);
1908                 if (ret != 0)
1909                         return ret;
1910         }
1911
1912         return 0;
1913 }
1914
1915 /**
1916  * Unbinds an object from the GTT aperture.
1917  */
1918 int
1919 i915_gem_object_unbind(struct drm_gem_object *obj)
1920 {
1921         struct drm_device *dev = obj->dev;
1922         drm_i915_private_t *dev_priv = dev->dev_private;
1923         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1924         int ret = 0;
1925
1926 #if WATCH_BUF
1927         DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
1928         DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
1929 #endif
1930         if (obj_priv->gtt_space == NULL)
1931                 return 0;
1932
1933         if (obj_priv->pin_count != 0) {
1934                 DRM_ERROR("Attempting to unbind pinned buffer\n");
1935                 return -EINVAL;
1936         }
1937
1938         /* blow away mappings if mapped through GTT */
1939         i915_gem_release_mmap(obj);
1940
1941         /* Move the object to the CPU domain to ensure that
1942          * any possible CPU writes while it's not in the GTT
1943          * are flushed when we go to remap it. This will
1944          * also ensure that all pending GPU writes are finished
1945          * before we unbind.
1946          */
1947         ret = i915_gem_object_set_to_cpu_domain(obj, 1);
1948         if (ret) {
1949                 if (ret != -ERESTARTSYS)
1950                         DRM_ERROR("set_domain failed: %d\n", ret);
1951                 return ret;
1952         }
1953
1954         BUG_ON(obj_priv->active);
1955
1956         /* release the fence reg _after_ flushing */
1957         if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
1958                 i915_gem_clear_fence_reg(obj);
1959
1960         if (obj_priv->agp_mem != NULL) {
1961                 drm_unbind_agp(obj_priv->agp_mem);
1962                 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
1963                 obj_priv->agp_mem = NULL;
1964         }
1965
1966         i915_gem_object_put_pages(obj);
1967         BUG_ON(obj_priv->pages_refcount);
1968
1969         if (obj_priv->gtt_space) {
1970                 atomic_dec(&dev->gtt_count);
1971                 atomic_sub(obj->size, &dev->gtt_memory);
1972
1973                 drm_mm_put_block(obj_priv->gtt_space);
1974                 obj_priv->gtt_space = NULL;
1975         }
1976
1977         /* Remove ourselves from the LRU list if present. */
1978         spin_lock(&dev_priv->mm.active_list_lock);
1979         if (!list_empty(&obj_priv->list))
1980                 list_del_init(&obj_priv->list);
1981         spin_unlock(&dev_priv->mm.active_list_lock);
1982
1983         if (i915_gem_object_is_purgeable(obj_priv))
1984                 i915_gem_object_truncate(obj);
1985
1986         trace_i915_gem_object_unbind(obj);
1987
1988         return 0;
1989 }
1990
1991 static struct drm_gem_object *
1992 i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
1993 {
1994         drm_i915_private_t *dev_priv = dev->dev_private;
1995         struct drm_i915_gem_object *obj_priv;
1996         struct drm_gem_object *best = NULL;
1997         struct drm_gem_object *first = NULL;
1998
1999         /* Try to find the smallest clean object */
2000         list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
2001                 struct drm_gem_object *obj = &obj_priv->base;
2002                 if (obj->size >= min_size) {
2003                         if ((!obj_priv->dirty ||
2004                              i915_gem_object_is_purgeable(obj_priv)) &&
2005                             (!best || obj->size < best->size)) {
2006                                 best = obj;
2007                                 if (best->size == min_size)
2008                                         return best;
2009                         }
2010                         if (!first)
2011                             first = obj;
2012                 }
2013         }
2014
2015         return best ? best : first;
2016 }
2017
2018 static int
2019 i915_gpu_idle(struct drm_device *dev)
2020 {
2021         drm_i915_private_t *dev_priv = dev->dev_private;
2022         bool lists_empty;
2023         uint32_t seqno1, seqno2;
2024         int ret;
2025
2026         spin_lock(&dev_priv->mm.active_list_lock);
2027         lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
2028                        list_empty(&dev_priv->render_ring.active_list) &&
2029                        (!HAS_BSD(dev) ||
2030                         list_empty(&dev_priv->bsd_ring.active_list)));
2031         spin_unlock(&dev_priv->mm.active_list_lock);
2032
2033         if (lists_empty)
2034                 return 0;
2035
2036         /* Flush everything onto the inactive list. */
2037         i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2038         seqno1 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
2039                         &dev_priv->render_ring);
2040         if (seqno1 == 0)
2041                 return -ENOMEM;
2042         ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring);
2043
2044         if (HAS_BSD(dev)) {
2045                 seqno2 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
2046                                 &dev_priv->bsd_ring);
2047                 if (seqno2 == 0)
2048                         return -ENOMEM;
2049
2050                 ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring);
2051                 if (ret)
2052                         return ret;
2053         }
2054
2055
2056         return ret;
2057 }
2058
2059 static int
2060 i915_gem_evict_everything(struct drm_device *dev)
2061 {
2062         drm_i915_private_t *dev_priv = dev->dev_private;
2063         int ret;
2064         bool lists_empty;
2065
2066         spin_lock(&dev_priv->mm.active_list_lock);
2067         lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2068                        list_empty(&dev_priv->mm.flushing_list) &&
2069                        list_empty(&dev_priv->render_ring.active_list) &&
2070                        (!HAS_BSD(dev)
2071                         || list_empty(&dev_priv->bsd_ring.active_list)));
2072         spin_unlock(&dev_priv->mm.active_list_lock);
2073
2074         if (lists_empty)
2075                 return -ENOSPC;
2076
2077         /* Flush everything (on to the inactive lists) and evict */
2078         ret = i915_gpu_idle(dev);
2079         if (ret)
2080                 return ret;
2081
2082         BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2083
2084         ret = i915_gem_evict_from_inactive_list(dev);
2085         if (ret)
2086                 return ret;
2087
2088         spin_lock(&dev_priv->mm.active_list_lock);
2089         lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2090                        list_empty(&dev_priv->mm.flushing_list) &&
2091                        list_empty(&dev_priv->render_ring.active_list) &&
2092                        (!HAS_BSD(dev)
2093                         || list_empty(&dev_priv->bsd_ring.active_list)));
2094         spin_unlock(&dev_priv->mm.active_list_lock);
2095         BUG_ON(!lists_empty);
2096
2097         return 0;
2098 }
2099
2100 static int
2101 i915_gem_evict_something(struct drm_device *dev, int min_size)
2102 {
2103         drm_i915_private_t *dev_priv = dev->dev_private;
2104         struct drm_gem_object *obj;
2105         int ret;
2106
2107         struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
2108         struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring;
2109         for (;;) {
2110                 i915_gem_retire_requests(dev, render_ring);
2111
2112                 if (HAS_BSD(dev))
2113                         i915_gem_retire_requests(dev, bsd_ring);
2114
2115                 /* If there's an inactive buffer available now, grab it
2116                  * and be done.
2117                  */
2118                 obj = i915_gem_find_inactive_object(dev, min_size);
2119                 if (obj) {
2120                         struct drm_i915_gem_object *obj_priv;
2121
2122 #if WATCH_LRU
2123                         DRM_INFO("%s: evicting %p\n", __func__, obj);
2124 #endif
2125                         obj_priv = to_intel_bo(obj);
2126                         BUG_ON(obj_priv->pin_count != 0);
2127                         BUG_ON(obj_priv->active);
2128
2129                         /* Wait on the rendering and unbind the buffer. */
2130                         return i915_gem_object_unbind(obj);
2131                 }
2132
2133                 /* If we didn't get anything, but the ring is still processing
2134                  * things, wait for the next to finish and hopefully leave us
2135                  * a buffer to evict.
2136                  */
2137                 if (!list_empty(&render_ring->request_list)) {
2138                         struct drm_i915_gem_request *request;
2139
2140                         request = list_first_entry(&render_ring->request_list,
2141                                                    struct drm_i915_gem_request,
2142                                                    list);
2143
2144                         ret = i915_wait_request(dev,
2145                                         request->seqno, request->ring);
2146                         if (ret)
2147                                 return ret;
2148
2149                         continue;
2150                 }
2151
2152                 if (HAS_BSD(dev) && !list_empty(&bsd_ring->request_list)) {
2153                         struct drm_i915_gem_request *request;
2154
2155                         request = list_first_entry(&bsd_ring->request_list,
2156                                                    struct drm_i915_gem_request,
2157                                                    list);
2158
2159                         ret = i915_wait_request(dev,
2160                                         request->seqno, request->ring);
2161                         if (ret)
2162                                 return ret;
2163
2164                         continue;
2165                 }
2166
2167                 /* If we didn't have anything on the request list but there
2168                  * are buffers awaiting a flush, emit one and try again.
2169                  * When we wait on it, those buffers waiting for that flush
2170                  * will get moved to inactive.
2171                  */
2172                 if (!list_empty(&dev_priv->mm.flushing_list)) {
2173                         struct drm_i915_gem_object *obj_priv;
2174
2175                         /* Find an object that we can immediately reuse */
2176                         list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
2177                                 obj = &obj_priv->base;
2178                                 if (obj->size >= min_size)
2179                                         break;
2180
2181                                 obj = NULL;
2182                         }
2183
2184                         if (obj != NULL) {
2185                                 uint32_t seqno;
2186
2187                                 i915_gem_flush_ring(dev,
2188                                                obj->write_domain,
2189                                                obj->write_domain,
2190                                                obj_priv->ring);
2191                                 seqno = i915_add_request(dev, NULL,
2192                                                 obj->write_domain,
2193                                                 obj_priv->ring);
2194                                 if (seqno == 0)
2195                                         return -ENOMEM;
2196                                 continue;
2197                         }
2198                 }
2199
2200                 /* If we didn't do any of the above, there's no single buffer
2201                  * large enough to swap out for the new one, so just evict
2202                  * everything and start again. (This should be rare.)
2203                  */
2204                 if (!list_empty (&dev_priv->mm.inactive_list))
2205                         return i915_gem_evict_from_inactive_list(dev);
2206                 else
2207                         return i915_gem_evict_everything(dev);
2208         }
2209 }
2210
2211 int
2212 i915_gem_object_get_pages(struct drm_gem_object *obj,
2213                           gfp_t gfpmask)
2214 {
2215         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2216         int page_count, i;
2217         struct address_space *mapping;
2218         struct inode *inode;
2219         struct page *page;
2220
2221         BUG_ON(obj_priv->pages_refcount
2222                         == DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT);
2223
2224         if (obj_priv->pages_refcount++ != 0)
2225                 return 0;
2226
2227         /* Get the list of pages out of our struct file.  They'll be pinned
2228          * at this point until we release them.
2229          */
2230         page_count = obj->size / PAGE_SIZE;
2231         BUG_ON(obj_priv->pages != NULL);
2232         obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
2233         if (obj_priv->pages == NULL) {
2234                 obj_priv->pages_refcount--;
2235                 return -ENOMEM;
2236         }
2237
2238         inode = obj->filp->f_path.dentry->d_inode;
2239         mapping = inode->i_mapping;
2240         for (i = 0; i < page_count; i++) {
2241                 page = read_cache_page_gfp(mapping, i,
2242                                            GFP_HIGHUSER |
2243                                            __GFP_COLD |
2244                                            __GFP_RECLAIMABLE |
2245                                            gfpmask);
2246                 if (IS_ERR(page))
2247                         goto err_pages;
2248
2249                 obj_priv->pages[i] = page;
2250         }
2251
2252         if (obj_priv->tiling_mode != I915_TILING_NONE)
2253                 i915_gem_object_do_bit_17_swizzle(obj);
2254
2255         return 0;
2256
2257 err_pages:
2258         while (i--)
2259                 page_cache_release(obj_priv->pages[i]);
2260
2261         drm_free_large(obj_priv->pages);
2262         obj_priv->pages = NULL;
2263         obj_priv->pages_refcount--;
2264         return PTR_ERR(page);
2265 }
2266
2267 static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
2268 {
2269         struct drm_gem_object *obj = reg->obj;
2270         struct drm_device *dev = obj->dev;
2271         drm_i915_private_t *dev_priv = dev->dev_private;
2272         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2273         int regnum = obj_priv->fence_reg;
2274         uint64_t val;
2275
2276         val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2277                     0xfffff000) << 32;
2278         val |= obj_priv->gtt_offset & 0xfffff000;
2279         val |= (uint64_t)((obj_priv->stride / 128) - 1) <<
2280                 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2281
2282         if (obj_priv->tiling_mode == I915_TILING_Y)
2283                 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2284         val |= I965_FENCE_REG_VALID;
2285
2286         I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val);
2287 }
2288
2289 static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
2290 {
2291         struct drm_gem_object *obj = reg->obj;
2292         struct drm_device *dev = obj->dev;
2293         drm_i915_private_t *dev_priv = dev->dev_private;
2294         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2295         int regnum = obj_priv->fence_reg;
2296         uint64_t val;
2297
2298         val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2299                     0xfffff000) << 32;
2300         val |= obj_priv->gtt_offset & 0xfffff000;
2301         val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2302         if (obj_priv->tiling_mode == I915_TILING_Y)
2303                 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2304         val |= I965_FENCE_REG_VALID;
2305
2306         I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
2307 }
2308
2309 static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
2310 {
2311         struct drm_gem_object *obj = reg->obj;
2312         struct drm_device *dev = obj->dev;
2313         drm_i915_private_t *dev_priv = dev->dev_private;
2314         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2315         int regnum = obj_priv->fence_reg;
2316         int tile_width;
2317         uint32_t fence_reg, val;
2318         uint32_t pitch_val;
2319
2320         if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
2321             (obj_priv->gtt_offset & (obj->size - 1))) {
2322                 WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
2323                      __func__, obj_priv->gtt_offset, obj->size);
2324                 return;
2325         }
2326
2327         if (obj_priv->tiling_mode == I915_TILING_Y &&
2328             HAS_128_BYTE_Y_TILING(dev))
2329                 tile_width = 128;
2330         else
2331                 tile_width = 512;
2332
2333         /* Note: pitch better be a power of two tile widths */
2334         pitch_val = obj_priv->stride / tile_width;
2335         pitch_val = ffs(pitch_val) - 1;
2336
2337         if (obj_priv->tiling_mode == I915_TILING_Y &&
2338             HAS_128_BYTE_Y_TILING(dev))
2339                 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2340         else
2341                 WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
2342
2343         val = obj_priv->gtt_offset;
2344         if (obj_priv->tiling_mode == I915_TILING_Y)
2345                 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2346         val |= I915_FENCE_SIZE_BITS(obj->size);
2347         val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2348         val |= I830_FENCE_REG_VALID;
2349
2350         if (regnum < 8)
2351                 fence_reg = FENCE_REG_830_0 + (regnum * 4);
2352         else
2353                 fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
2354         I915_WRITE(fence_reg, val);
2355 }
2356
2357 static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2358 {
2359         struct drm_gem_object *obj = reg->obj;
2360         struct drm_device *dev = obj->dev;
2361         drm_i915_private_t *dev_priv = dev->dev_private;
2362         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2363         int regnum = obj_priv->fence_reg;
2364         uint32_t val;
2365         uint32_t pitch_val;
2366         uint32_t fence_size_bits;
2367
2368         if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
2369             (obj_priv->gtt_offset & (obj->size - 1))) {
2370                 WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
2371                      __func__, obj_priv->gtt_offset);
2372                 return;
2373         }
2374
2375         pitch_val = obj_priv->stride / 128;
2376         pitch_val = ffs(pitch_val) - 1;
2377         WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2378
2379         val = obj_priv->gtt_offset;
2380         if (obj_priv->tiling_mode == I915_TILING_Y)
2381                 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2382         fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
2383         WARN_ON(fence_size_bits & ~0x00000f00);
2384         val |= fence_size_bits;
2385         val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2386         val |= I830_FENCE_REG_VALID;
2387
2388         I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
2389 }
2390
2391 static int i915_find_fence_reg(struct drm_device *dev)
2392 {
2393         struct drm_i915_fence_reg *reg = NULL;
2394         struct drm_i915_gem_object *obj_priv = NULL;
2395         struct drm_i915_private *dev_priv = dev->dev_private;
2396         struct drm_gem_object *obj = NULL;
2397         int i, avail, ret;
2398
2399         /* First try to find a free reg */
2400         avail = 0;
2401         for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2402                 reg = &dev_priv->fence_regs[i];
2403                 if (!reg->obj)
2404                         return i;
2405
2406                 obj_priv = to_intel_bo(reg->obj);
2407                 if (!obj_priv->pin_count)
2408                     avail++;
2409         }
2410
2411         if (avail == 0)
2412                 return -ENOSPC;
2413
2414         /* None available, try to steal one or wait for a user to finish */
2415         i = I915_FENCE_REG_NONE;
2416         list_for_each_entry(reg, &dev_priv->mm.fence_list,
2417                             lru_list) {
2418                 obj = reg->obj;
2419                 obj_priv = to_intel_bo(obj);
2420
2421                 if (obj_priv->pin_count)
2422                         continue;
2423
2424                 /* found one! */
2425                 i = obj_priv->fence_reg;
2426                 break;
2427         }
2428
2429         BUG_ON(i == I915_FENCE_REG_NONE);
2430
2431         /* We only have a reference on obj from the active list. put_fence_reg
2432          * might drop that one, causing a use-after-free in it. So hold a
2433          * private reference to obj like the other callers of put_fence_reg
2434          * (set_tiling ioctl) do. */
2435         drm_gem_object_reference(obj);
2436         ret = i915_gem_object_put_fence_reg(obj);
2437         drm_gem_object_unreference(obj);
2438         if (ret != 0)
2439                 return ret;
2440
2441         return i;
2442 }
2443
2444 /**
2445  * i915_gem_object_get_fence_reg - set up a fence reg for an object
2446  * @obj: object to map through a fence reg
2447  *
2448  * When mapping objects through the GTT, userspace wants to be able to write
2449  * to them without having to worry about swizzling if the object is tiled.
2450  *
2451  * This function walks the fence regs looking for a free one for @obj,
2452  * stealing one if it can't find any.
2453  *
2454  * It then sets up the reg based on the object's properties: address, pitch
2455  * and tiling format.
2456  */
2457 int
2458 i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2459 {
2460         struct drm_device *dev = obj->dev;
2461         struct drm_i915_private *dev_priv = dev->dev_private;
2462         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2463         struct drm_i915_fence_reg *reg = NULL;
2464         int ret;
2465
2466         /* Just update our place in the LRU if our fence is getting used. */
2467         if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
2468                 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2469                 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2470                 return 0;
2471         }
2472
2473         switch (obj_priv->tiling_mode) {
2474         case I915_TILING_NONE:
2475                 WARN(1, "allocating a fence for non-tiled object?\n");
2476                 break;
2477         case I915_TILING_X:
2478                 if (!obj_priv->stride)
2479                         return -EINVAL;
2480                 WARN((obj_priv->stride & (512 - 1)),
2481                      "object 0x%08x is X tiled but has non-512B pitch\n",
2482                      obj_priv->gtt_offset);
2483                 break;
2484         case I915_TILING_Y:
2485                 if (!obj_priv->stride)
2486                         return -EINVAL;
2487                 WARN((obj_priv->stride & (128 - 1)),
2488                      "object 0x%08x is Y tiled but has non-128B pitch\n",
2489                      obj_priv->gtt_offset);
2490                 break;
2491         }
2492
2493         ret = i915_find_fence_reg(dev);
2494         if (ret < 0)
2495                 return ret;
2496
2497         obj_priv->fence_reg = ret;
2498         reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2499         list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2500
2501         reg->obj = obj;
2502
2503         if (IS_GEN6(dev))
2504                 sandybridge_write_fence_reg(reg);
2505         else if (IS_I965G(dev))
2506                 i965_write_fence_reg(reg);
2507         else if (IS_I9XX(dev))
2508                 i915_write_fence_reg(reg);
2509         else
2510                 i830_write_fence_reg(reg);
2511
2512         trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
2513                         obj_priv->tiling_mode);
2514
2515         return 0;
2516 }
2517
2518 /**
2519  * i915_gem_clear_fence_reg - clear out fence register info
2520  * @obj: object to clear
2521  *
2522  * Zeroes out the fence register itself and clears out the associated
2523  * data structures in dev_priv and obj_priv.
2524  */
2525 static void
2526 i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2527 {
2528         struct drm_device *dev = obj->dev;
2529         drm_i915_private_t *dev_priv = dev->dev_private;
2530         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2531         struct drm_i915_fence_reg *reg =
2532                 &dev_priv->fence_regs[obj_priv->fence_reg];
2533
2534         if (IS_GEN6(dev)) {
2535                 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
2536                              (obj_priv->fence_reg * 8), 0);
2537         } else if (IS_I965G(dev)) {
2538                 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
2539         } else {
2540                 uint32_t fence_reg;
2541
2542                 if (obj_priv->fence_reg < 8)
2543                         fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
2544                 else
2545                         fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg -
2546                                                        8) * 4;
2547
2548                 I915_WRITE(fence_reg, 0);
2549         }
2550
2551         reg->obj = NULL;
2552         obj_priv->fence_reg = I915_FENCE_REG_NONE;
2553         list_del_init(&reg->lru_list);
2554 }
2555
2556 /**
2557  * i915_gem_object_put_fence_reg - waits on outstanding fenced access
2558  * to the buffer to finish, and then resets the fence register.
2559  * @obj: tiled object holding a fence register.
2560  *
2561  * Zeroes out the fence register itself and clears out the associated
2562  * data structures in dev_priv and obj_priv.
2563  */
2564 int
2565 i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2566 {
2567         struct drm_device *dev = obj->dev;
2568         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2569
2570         if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
2571                 return 0;
2572
2573         /* If we've changed tiling, GTT-mappings of the object
2574          * need to re-fault to ensure that the correct fence register
2575          * setup is in place.
2576          */
2577         i915_gem_release_mmap(obj);
2578
2579         /* On the i915, GPU access to tiled buffers is via a fence,
2580          * therefore we must wait for any outstanding access to complete
2581          * before clearing the fence.
2582          */
2583         if (!IS_I965G(dev)) {
2584                 int ret;
2585
2586                 ret = i915_gem_object_flush_gpu_write_domain(obj);
2587                 if (ret != 0)
2588                         return ret;
2589
2590                 ret = i915_gem_object_wait_rendering(obj);
2591                 if (ret != 0)
2592                         return ret;
2593         }
2594
2595         i915_gem_object_flush_gtt_write_domain(obj);
2596         i915_gem_clear_fence_reg (obj);
2597
2598         return 0;
2599 }
2600
2601 /**
2602  * Finds free space in the GTT aperture and binds the object there.
2603  */
2604 static int
2605 i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2606 {
2607         struct drm_device *dev = obj->dev;
2608         drm_i915_private_t *dev_priv = dev->dev_private;
2609         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2610         struct drm_mm_node *free_space;
2611         gfp_t gfpmask =  __GFP_NORETRY | __GFP_NOWARN;
2612         int ret;
2613
2614         if (obj_priv->madv != I915_MADV_WILLNEED) {
2615                 DRM_ERROR("Attempting to bind a purgeable object\n");
2616                 return -EINVAL;
2617         }
2618
2619         if (alignment == 0)
2620                 alignment = i915_gem_get_gtt_alignment(obj);
2621         if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
2622                 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2623                 return -EINVAL;
2624         }
2625
2626         /* If the object is bigger than the entire aperture, reject it early
2627          * before evicting everything in a vain attempt to find space.
2628          */
2629         if (obj->size > dev->gtt_total) {
2630                 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2631                 return -E2BIG;
2632         }
2633
2634  search_free:
2635         free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2636                                         obj->size, alignment, 0);
2637         if (free_space != NULL) {
2638                 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
2639                                                        alignment);
2640                 if (obj_priv->gtt_space != NULL)
2641                         obj_priv->gtt_offset = obj_priv->gtt_space->start;
2642         }
2643         if (obj_priv->gtt_space == NULL) {
2644                 /* If the gtt is empty and we're still having trouble
2645                  * fitting our object in, we're out of memory.
2646                  */
2647 #if WATCH_LRU
2648                 DRM_INFO("%s: GTT full, evicting something\n", __func__);
2649 #endif
2650                 ret = i915_gem_evict_something(dev, obj->size);
2651                 if (ret)
2652                         return ret;
2653
2654                 goto search_free;
2655         }
2656
2657 #if WATCH_BUF
2658         DRM_INFO("Binding object of size %zd at 0x%08x\n",
2659                  obj->size, obj_priv->gtt_offset);
2660 #endif
2661         ret = i915_gem_object_get_pages(obj, gfpmask);
2662         if (ret) {
2663                 drm_mm_put_block(obj_priv->gtt_space);
2664                 obj_priv->gtt_space = NULL;
2665
2666                 if (ret == -ENOMEM) {
2667                         /* first try to clear up some space from the GTT */
2668                         ret = i915_gem_evict_something(dev, obj->size);
2669                         if (ret) {
2670                                 /* now try to shrink everyone else */
2671                                 if (gfpmask) {
2672                                         gfpmask = 0;
2673                                         goto search_free;
2674                                 }
2675
2676                                 return ret;
2677                         }
2678
2679                         goto search_free;
2680                 }
2681
2682                 return ret;
2683         }
2684
2685         /* Create an AGP memory structure pointing at our pages, and bind it
2686          * into the GTT.
2687          */
2688         obj_priv->agp_mem = drm_agp_bind_pages(dev,
2689                                                obj_priv->pages,
2690                                                obj->size >> PAGE_SHIFT,
2691                                                obj_priv->gtt_offset,
2692                                                obj_priv->agp_type);
2693         if (obj_priv->agp_mem == NULL) {
2694                 i915_gem_object_put_pages(obj);
2695                 drm_mm_put_block(obj_priv->gtt_space);
2696                 obj_priv->gtt_space = NULL;
2697
2698                 ret = i915_gem_evict_something(dev, obj->size);
2699                 if (ret)
2700                         return ret;
2701
2702                 goto search_free;
2703         }
2704         atomic_inc(&dev->gtt_count);
2705         atomic_add(obj->size, &dev->gtt_memory);
2706
2707         /* Assert that the object is not currently in any GPU domain. As it
2708          * wasn't in the GTT, there shouldn't be any way it could have been in
2709          * a GPU cache
2710          */
2711         BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
2712         BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
2713
2714         trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
2715
2716         return 0;
2717 }
2718
2719 void
2720 i915_gem_clflush_object(struct drm_gem_object *obj)
2721 {
2722         struct drm_i915_gem_object      *obj_priv = to_intel_bo(obj);
2723
2724         /* If we don't have a page list set up, then we're not pinned
2725          * to GPU, and we can ignore the cache flush because it'll happen
2726          * again at bind time.
2727          */
2728         if (obj_priv->pages == NULL)
2729                 return;
2730
2731         trace_i915_gem_object_clflush(obj);
2732
2733         drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
2734 }
2735
2736 /** Flushes any GPU write domain for the object if it's dirty. */
2737 static int
2738 i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2739 {
2740         struct drm_device *dev = obj->dev;
2741         uint32_t old_write_domain;
2742         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2743
2744         if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
2745                 return 0;
2746
2747         /* Queue the GPU write cache flushing we need. */
2748         old_write_domain = obj->write_domain;
2749         i915_gem_flush(dev, 0, obj->write_domain);
2750         if (i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring) == 0)
2751                 return -ENOMEM;
2752
2753         trace_i915_gem_object_change_domain(obj,
2754                                             obj->read_domains,
2755                                             old_write_domain);
2756         return 0;
2757 }
2758
2759 /** Flushes the GTT write domain for the object if it's dirty. */
2760 static void
2761 i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
2762 {
2763         uint32_t old_write_domain;
2764
2765         if (obj->write_domain != I915_GEM_DOMAIN_GTT)
2766                 return;
2767
2768         /* No actual flushing is required for the GTT write domain.   Writes
2769          * to it immediately go to main memory as far as we know, so there's
2770          * no chipset flush.  It also doesn't land in render cache.
2771          */
2772         old_write_domain = obj->write_domain;
2773         obj->write_domain = 0;
2774
2775         trace_i915_gem_object_change_domain(obj,
2776                                             obj->read_domains,
2777                                             old_write_domain);
2778 }
2779
2780 /** Flushes the CPU write domain for the object if it's dirty. */
2781 static void
2782 i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2783 {
2784         struct drm_device *dev = obj->dev;
2785         uint32_t old_write_domain;
2786
2787         if (obj->write_domain != I915_GEM_DOMAIN_CPU)
2788                 return;
2789
2790         i915_gem_clflush_object(obj);
2791         drm_agp_chipset_flush(dev);
2792         old_write_domain = obj->write_domain;
2793         obj->write_domain = 0;
2794
2795         trace_i915_gem_object_change_domain(obj,
2796                                             obj->read_domains,
2797                                             old_write_domain);
2798 }
2799
2800 int
2801 i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
2802 {
2803         int ret = 0;
2804
2805         switch (obj->write_domain) {
2806         case I915_GEM_DOMAIN_GTT:
2807                 i915_gem_object_flush_gtt_write_domain(obj);
2808                 break;
2809         case I915_GEM_DOMAIN_CPU:
2810                 i915_gem_object_flush_cpu_write_domain(obj);
2811                 break;
2812         default:
2813                 ret = i915_gem_object_flush_gpu_write_domain(obj);
2814                 break;
2815         }
2816
2817         return ret;
2818 }
2819
2820 /**
2821  * Moves a single object to the GTT read, and possibly write domain.
2822  *
2823  * This function returns when the move is complete, including waiting on
2824  * flushes to occur.
2825  */
2826 int
2827 i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2828 {
2829         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2830         uint32_t old_write_domain, old_read_domains;
2831         int ret;
2832
2833         /* Not valid to be called on unbound objects. */
2834         if (obj_priv->gtt_space == NULL)
2835                 return -EINVAL;
2836
2837         ret = i915_gem_object_flush_gpu_write_domain(obj);
2838         if (ret != 0)
2839                 return ret;
2840
2841         /* Wait on any GPU rendering and flushing to occur. */
2842         ret = i915_gem_object_wait_rendering(obj);
2843         if (ret != 0)
2844                 return ret;
2845
2846         old_write_domain = obj->write_domain;
2847         old_read_domains = obj->read_domains;
2848
2849         /* If we're writing through the GTT domain, then CPU and GPU caches
2850          * will need to be invalidated at next use.
2851          */
2852         if (write)
2853                 obj->read_domains &= I915_GEM_DOMAIN_GTT;
2854
2855         i915_gem_object_flush_cpu_write_domain(obj);
2856
2857         /* It should now be out of any other write domains, and we can update
2858          * the domain values for our changes.
2859          */
2860         BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2861         obj->read_domains |= I915_GEM_DOMAIN_GTT;
2862         if (write) {
2863                 obj->write_domain = I915_GEM_DOMAIN_GTT;
2864                 obj_priv->dirty = 1;
2865         }
2866
2867         trace_i915_gem_object_change_domain(obj,
2868                                             old_read_domains,
2869                                             old_write_domain);
2870
2871         return 0;
2872 }
2873
2874 /*
2875  * Prepare buffer for display plane. Use uninterruptible for possible flush
2876  * wait, as in modesetting process we're not supposed to be interrupted.
2877  */
2878 int
2879 i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
2880 {
2881         struct drm_device *dev = obj->dev;
2882         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2883         uint32_t old_write_domain, old_read_domains;
2884         int ret;
2885
2886         /* Not valid to be called on unbound objects. */
2887         if (obj_priv->gtt_space == NULL)
2888                 return -EINVAL;
2889
2890         ret = i915_gem_object_flush_gpu_write_domain(obj);
2891         if (ret)
2892                 return ret;
2893
2894         /* Wait on any GPU rendering and flushing to occur. */
2895         if (obj_priv->active) {
2896 #if WATCH_BUF
2897                 DRM_INFO("%s: object %p wait for seqno %08x\n",
2898                           __func__, obj, obj_priv->last_rendering_seqno);
2899 #endif
2900                 ret = i915_do_wait_request(dev,
2901                                 obj_priv->last_rendering_seqno,
2902                                 0,
2903                                 obj_priv->ring);
2904                 if (ret != 0)
2905                         return ret;
2906         }
2907
2908         i915_gem_object_flush_cpu_write_domain(obj);
2909
2910         old_write_domain = obj->write_domain;
2911         old_read_domains = obj->read_domains;
2912
2913         /* It should now be out of any other write domains, and we can update
2914          * the domain values for our changes.
2915          */
2916         BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2917         obj->read_domains = I915_GEM_DOMAIN_GTT;
2918         obj->write_domain = I915_GEM_DOMAIN_GTT;
2919         obj_priv->dirty = 1;
2920
2921         trace_i915_gem_object_change_domain(obj,
2922                                             old_read_domains,
2923                                             old_write_domain);
2924
2925         return 0;
2926 }
2927
2928 /**
2929  * Moves a single object to the CPU read, and possibly write domain.
2930  *
2931  * This function returns when the move is complete, including waiting on
2932  * flushes to occur.
2933  */
2934 static int
2935 i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2936 {
2937         uint32_t old_write_domain, old_read_domains;
2938         int ret;
2939
2940         ret = i915_gem_object_flush_gpu_write_domain(obj);
2941         if (ret)
2942                 return ret;
2943
2944         /* Wait on any GPU rendering and flushing to occur. */
2945         ret = i915_gem_object_wait_rendering(obj);
2946         if (ret != 0)
2947                 return ret;
2948
2949         i915_gem_object_flush_gtt_write_domain(obj);
2950
2951         /* If we have a partially-valid cache of the object in the CPU,
2952          * finish invalidating it and free the per-page flags.
2953          */
2954         i915_gem_object_set_to_full_cpu_read_domain(obj);
2955
2956         old_write_domain = obj->write_domain;
2957         old_read_domains = obj->read_domains;
2958
2959         /* Flush the CPU cache if it's still invalid. */
2960         if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2961                 i915_gem_clflush_object(obj);
2962
2963                 obj->read_domains |= I915_GEM_DOMAIN_CPU;
2964         }
2965
2966         /* It should now be out of any other write domains, and we can update
2967          * the domain values for our changes.
2968          */
2969         BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2970
2971         /* If we're writing through the CPU, then the GPU read domains will
2972          * need to be invalidated at next use.
2973          */
2974         if (write) {
2975                 obj->read_domains &= I915_GEM_DOMAIN_CPU;
2976                 obj->write_domain = I915_GEM_DOMAIN_CPU;
2977         }
2978
2979         trace_i915_gem_object_change_domain(obj,
2980                                             old_read_domains,
2981                                             old_write_domain);
2982
2983         return 0;
2984 }
2985
2986 /*
2987  * Set the next domain for the specified object. This
2988  * may not actually perform the necessary flushing/invaliding though,
2989  * as that may want to be batched with other set_domain operations
2990  *
2991  * This is (we hope) the only really tricky part of gem. The goal
2992  * is fairly simple -- track which caches hold bits of the object
2993  * and make sure they remain coherent. A few concrete examples may
2994  * help to explain how it works. For shorthand, we use the notation
2995  * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
2996  * a pair of read and write domain masks.
2997  *
2998  * Case 1: the batch buffer
2999  *
3000  *      1. Allocated
3001  *      2. Written by CPU
3002  *      3. Mapped to GTT
3003  *      4. Read by GPU
3004  *      5. Unmapped from GTT
3005  *      6. Freed
3006  *
3007  *      Let's take these a step at a time
3008  *
3009  *      1. Allocated
3010  *              Pages allocated from the kernel may still have
3011  *              cache contents, so we set them to (CPU, CPU) always.
3012  *      2. Written by CPU (using pwrite)
3013  *              The pwrite function calls set_domain (CPU, CPU) and
3014  *              this function does nothing (as nothing changes)
3015  *      3. Mapped by GTT
3016  *              This function asserts that the object is not
3017  *              currently in any GPU-based read or write domains
3018  *      4. Read by GPU
3019  *              i915_gem_execbuffer calls set_domain (COMMAND, 0).
3020  *              As write_domain is zero, this function adds in the
3021  *              current read domains (CPU+COMMAND, 0).
3022  *              flush_domains is set to CPU.
3023  *              invalidate_domains is set to COMMAND
3024  *              clflush is run to get data out of the CPU caches
3025  *              then i915_dev_set_domain calls i915_gem_flush to
3026  *              emit an MI_FLUSH and drm_agp_chipset_flush
3027  *      5. Unmapped from GTT
3028  *              i915_gem_object_unbind calls set_domain (CPU, CPU)
3029  *              flush_domains and invalidate_domains end up both zero
3030  *              so no flushing/invalidating happens
3031  *      6. Freed
3032  *              yay, done
3033  *
3034  * Case 2: The shared render buffer
3035  *
3036  *      1. Allocated
3037  *      2. Mapped to GTT
3038  *      3. Read/written by GPU
3039  *      4. set_domain to (CPU,CPU)
3040  *      5. Read/written by CPU
3041  *      6. Read/written by GPU
3042  *
3043  *      1. Allocated
3044  *              Same as last example, (CPU, CPU)
3045  *      2. Mapped to GTT
3046  *              Nothing changes (assertions find that it is not in the GPU)
3047  *      3. Read/written by GPU
3048  *              execbuffer calls set_domain (RENDER, RENDER)
3049  *              flush_domains gets CPU
3050  *              invalidate_domains gets GPU
3051  *              clflush (obj)
3052  *              MI_FLUSH and drm_agp_chipset_flush
3053  *      4. set_domain (CPU, CPU)
3054  *              flush_domains gets GPU
3055  *              invalidate_domains gets CPU
3056  *              wait_rendering (obj) to make sure all drawing is complete.
3057  *              This will include an MI_FLUSH to get the data from GPU
3058  *              to memory
3059  *              clflush (obj) to invalidate the CPU cache
3060  *              Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
3061  *      5. Read/written by CPU
3062  *              cache lines are loaded and dirtied
3063  *      6. Read written by GPU
3064  *              Same as last GPU access
3065  *
3066  * Case 3: The constant buffer
3067  *
3068  *      1. Allocated
3069  *      2. Written by CPU
3070  *      3. Read by GPU
3071  *      4. Updated (written) by CPU again
3072  *      5. Read by GPU
3073  *
3074  *      1. Allocated
3075  *              (CPU, CPU)
3076  *      2. Written by CPU
3077  *              (CPU, CPU)
3078  *      3. Read by GPU
3079  *              (CPU+RENDER, 0)
3080  *              flush_domains = CPU
3081  *              invalidate_domains = RENDER
3082  *              clflush (obj)
3083  *              MI_FLUSH
3084  *              drm_agp_chipset_flush
3085  *      4. Updated (written) by CPU again
3086  *              (CPU, CPU)
3087  *              flush_domains = 0 (no previous write domain)
3088  *              invalidate_domains = 0 (no new read domains)
3089  *      5. Read by GPU
3090  *              (CPU+RENDER, 0)
3091  *              flush_domains = CPU
3092  *              invalidate_domains = RENDER
3093  *              clflush (obj)
3094  *              MI_FLUSH
3095  *              drm_agp_chipset_flush
3096  */
3097 static void
3098 i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
3099 {
3100         struct drm_device               *dev = obj->dev;
3101         struct drm_i915_gem_object      *obj_priv = to_intel_bo(obj);
3102         uint32_t                        invalidate_domains = 0;
3103         uint32_t                        flush_domains = 0;
3104         uint32_t                        old_read_domains;
3105
3106         BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
3107         BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
3108
3109         intel_mark_busy(dev, obj);
3110
3111 #if WATCH_BUF
3112         DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
3113                  __func__, obj,
3114                  obj->read_domains, obj->pending_read_domains,
3115                  obj->write_domain, obj->pending_write_domain);
3116 #endif
3117         /*
3118          * If the object isn't moving to a new write domain,
3119          * let the object stay in multiple read domains
3120          */
3121         if (obj->pending_write_domain == 0)
3122                 obj->pending_read_domains |= obj->read_domains;
3123         else
3124                 obj_priv->dirty = 1;
3125
3126         /*
3127          * Flush the current write domain if
3128          * the new read domains don't match. Invalidate
3129          * any read domains which differ from the old
3130          * write domain
3131          */
3132         if (obj->write_domain &&
3133             obj->write_domain != obj->pending_read_domains) {
3134                 flush_domains |= obj->write_domain;
3135                 invalidate_domains |=
3136                         obj->pending_read_domains & ~obj->write_domain;
3137         }
3138         /*
3139          * Invalidate any read caches which may have
3140          * stale data. That is, any new read domains.
3141          */
3142         invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
3143         if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
3144 #if WATCH_BUF
3145                 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
3146                          __func__, flush_domains, invalidate_domains);
3147 #endif
3148                 i915_gem_clflush_object(obj);
3149         }
3150
3151         old_read_domains = obj->read_domains;
3152
3153         /* The actual obj->write_domain will be updated with
3154          * pending_write_domain after we emit the accumulated flush for all
3155          * of our domain changes in execbuffers (which clears objects'
3156          * write_domains).  So if we have a current write domain that we
3157          * aren't changing, set pending_write_domain to that.
3158          */
3159         if (flush_domains == 0 && obj->pending_write_domain == 0)
3160                 obj->pending_write_domain = obj->write_domain;
3161         obj->read_domains = obj->pending_read_domains;
3162
3163         dev->invalidate_domains |= invalidate_domains;
3164         dev->flush_domains |= flush_domains;
3165 #if WATCH_BUF
3166         DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
3167                  __func__,
3168                  obj->read_domains, obj->write_domain,
3169                  dev->invalidate_domains, dev->flush_domains);
3170 #endif
3171
3172         trace_i915_gem_object_change_domain(obj,
3173                                             old_read_domains,
3174                                             obj->write_domain);
3175 }
3176
3177 /**
3178  * Moves the object from a partially CPU read to a full one.
3179  *
3180  * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
3181  * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
3182  */
3183 static void
3184 i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
3185 {
3186         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3187
3188         if (!obj_priv->page_cpu_valid)
3189                 return;
3190
3191         /* If we're partially in the CPU read domain, finish moving it in.
3192          */
3193         if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
3194                 int i;
3195
3196                 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
3197                         if (obj_priv->page_cpu_valid[i])
3198                                 continue;
3199                         drm_clflush_pages(obj_priv->pages + i, 1);
3200                 }
3201         }
3202
3203         /* Free the page_cpu_valid mappings which are now stale, whether
3204          * or not we've got I915_GEM_DOMAIN_CPU.
3205          */
3206         kfree(obj_priv->page_cpu_valid);
3207         obj_priv->page_cpu_valid = NULL;
3208 }
3209
3210 /**
3211  * Set the CPU read domain on a range of the object.
3212  *
3213  * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
3214  * not entirely valid.  The page_cpu_valid member of the object flags which
3215  * pages have been flushed, and will be respected by
3216  * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
3217  * of the whole object.
3218  *
3219  * This function returns when the move is complete, including waiting on
3220  * flushes to occur.
3221  */
3222 static int
3223 i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3224                                           uint64_t offset, uint64_t size)
3225 {
3226         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3227         uint32_t old_read_domains;
3228         int i, ret;
3229
3230         if (offset == 0 && size == obj->size)
3231                 return i915_gem_object_set_to_cpu_domain(obj, 0);
3232
3233         ret = i915_gem_object_flush_gpu_write_domain(obj);
3234         if (ret)
3235                 return ret;
3236
3237         /* Wait on any GPU rendering and flushing to occur. */
3238         ret = i915_gem_object_wait_rendering(obj);
3239         if (ret != 0)
3240                 return ret;
3241         i915_gem_object_flush_gtt_write_domain(obj);
3242
3243         /* If we're already fully in the CPU read domain, we're done. */
3244         if (obj_priv->page_cpu_valid == NULL &&
3245             (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
3246                 return 0;
3247
3248         /* Otherwise, create/clear the per-page CPU read domain flag if we're
3249          * newly adding I915_GEM_DOMAIN_CPU
3250          */
3251         if (obj_priv->page_cpu_valid == NULL) {
3252                 obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
3253                                                    GFP_KERNEL);
3254                 if (obj_priv->page_cpu_valid == NULL)
3255                         return -ENOMEM;
3256         } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
3257                 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
3258
3259         /* Flush the cache on any pages that are still invalid from the CPU's
3260          * perspective.
3261          */
3262         for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
3263              i++) {
3264                 if (obj_priv->page_cpu_valid[i])
3265                         continue;
3266
3267                 drm_clflush_pages(obj_priv->pages + i, 1);
3268
3269                 obj_priv->page_cpu_valid[i] = 1;
3270         }
3271
3272         /* It should now be out of any other write domains, and we can update
3273          * the domain values for our changes.
3274          */
3275         BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3276
3277         old_read_domains = obj->read_domains;
3278         obj->read_domains |= I915_GEM_DOMAIN_CPU;
3279
3280         trace_i915_gem_object_change_domain(obj,
3281                                             old_read_domains,
3282                                             obj->write_domain);
3283
3284         return 0;
3285 }
3286
3287 /**
3288  * Pin an object to the GTT and evaluate the relocations landing in it.
3289  */
3290 static int
3291 i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3292                                  struct drm_file *file_priv,
3293                                  struct drm_i915_gem_exec_object2 *entry,
3294                                  struct drm_i915_gem_relocation_entry *relocs)
3295 {
3296         struct drm_device *dev = obj->dev;
3297         drm_i915_private_t *dev_priv = dev->dev_private;
3298         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3299         int i, ret;
3300         void __iomem *reloc_page;
3301         bool need_fence;
3302
3303         need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
3304                      obj_priv->tiling_mode != I915_TILING_NONE;
3305
3306         /* Check fence reg constraints and rebind if necessary */
3307         if (need_fence &&
3308             !i915_gem_object_fence_offset_ok(obj,
3309                                              obj_priv->tiling_mode)) {
3310                 ret = i915_gem_object_unbind(obj);
3311                 if (ret)
3312                         return ret;
3313         }
3314
3315         /* Choose the GTT offset for our buffer and put it there. */
3316         ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
3317         if (ret)
3318                 return ret;
3319
3320         /*
3321          * Pre-965 chips need a fence register set up in order to
3322          * properly handle blits to/from tiled surfaces.
3323          */
3324         if (need_fence) {
3325                 ret = i915_gem_object_get_fence_reg(obj);
3326                 if (ret != 0) {
3327                         i915_gem_object_unpin(obj);
3328                         return ret;
3329                 }
3330         }
3331
3332         entry->offset = obj_priv->gtt_offset;
3333
3334         /* Apply the relocations, using the GTT aperture to avoid cache
3335          * flushing requirements.
3336          */
3337         for (i = 0; i < entry->relocation_count; i++) {
3338                 struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
3339                 struct drm_gem_object *target_obj;
3340                 struct drm_i915_gem_object *target_obj_priv;
3341                 uint32_t reloc_val, reloc_offset;
3342                 uint32_t __iomem *reloc_entry;
3343
3344                 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
3345                                                    reloc->target_handle);
3346                 if (target_obj == NULL) {
3347                         i915_gem_object_unpin(obj);
3348                         return -EBADF;
3349                 }
3350                 target_obj_priv = to_intel_bo(target_obj);
3351
3352 #if WATCH_RELOC
3353                 DRM_INFO("%s: obj %p offset %08x target %d "
3354                          "read %08x write %08x gtt %08x "
3355                          "presumed %08x delta %08x\n",
3356                          __func__,
3357                          obj,
3358                          (int) reloc->offset,
3359                          (int) reloc->target_handle,
3360                          (int) reloc->read_domains,
3361                          (int) reloc->write_domain,
3362                          (int) target_obj_priv->gtt_offset,
3363                          (int) reloc->presumed_offset,
3364                          reloc->delta);
3365 #endif
3366
3367                 /* The target buffer should have appeared before us in the
3368                  * exec_object list, so it should have a GTT space bound by now.
3369                  */
3370                 if (target_obj_priv->gtt_space == NULL) {
3371                         DRM_ERROR("No GTT space found for object %d\n",
3372                                   reloc->target_handle);
3373                         drm_gem_object_unreference(target_obj);
3374                         i915_gem_object_unpin(obj);
3375                         return -EINVAL;
3376                 }
3377
3378                 /* Validate that the target is in a valid r/w GPU domain */
3379                 if (reloc->write_domain & (reloc->write_domain - 1)) {
3380                         DRM_ERROR("reloc with multiple write domains: "
3381                                   "obj %p target %d offset %d "
3382                                   "read %08x write %08x",
3383                                   obj, reloc->target_handle,
3384                                   (int) reloc->offset,
3385                                   reloc->read_domains,
3386                                   reloc->write_domain);
3387                         return -EINVAL;
3388                 }
3389                 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
3390                     reloc->read_domains & I915_GEM_DOMAIN_CPU) {
3391                         DRM_ERROR("reloc with read/write CPU domains: "
3392                                   "obj %p target %d offset %d "
3393                                   "read %08x write %08x",
3394                                   obj, reloc->target_handle,
3395                                   (int) reloc->offset,
3396                                   reloc->read_domains,
3397                                   reloc->write_domain);
3398                         drm_gem_object_unreference(target_obj);
3399                         i915_gem_object_unpin(obj);
3400                         return -EINVAL;
3401                 }
3402                 if (reloc->write_domain && target_obj->pending_write_domain &&
3403                     reloc->write_domain != target_obj->pending_write_domain) {
3404                         DRM_ERROR("Write domain conflict: "
3405                                   "obj %p target %d offset %d "
3406                                   "new %08x old %08x\n",
3407                                   obj, reloc->target_handle,
3408                                   (int) reloc->offset,
3409                                   reloc->write_domain,
3410                                   target_obj->pending_write_domain);
3411                         drm_gem_object_unreference(target_obj);
3412                         i915_gem_object_unpin(obj);
3413                         return -EINVAL;
3414                 }
3415
3416                 target_obj->pending_read_domains |= reloc->read_domains;
3417                 target_obj->pending_write_domain |= reloc->write_domain;
3418
3419                 /* If the relocation already has the right value in it, no
3420                  * more work needs to be done.
3421                  */
3422                 if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
3423                         drm_gem_object_unreference(target_obj);
3424                         continue;
3425                 }
3426
3427                 /* Check that the relocation address is valid... */
3428                 if (reloc->offset > obj->size - 4) {
3429                         DRM_ERROR("Relocation beyond object bounds: "
3430                                   "obj %p target %d offset %d size %d.\n",
3431                                   obj, reloc->target_handle,
3432                                   (int) reloc->offset, (int) obj->size);
3433                         drm_gem_object_unreference(target_obj);
3434                         i915_gem_object_unpin(obj);
3435                         return -EINVAL;
3436                 }
3437                 if (reloc->offset & 3) {
3438                         DRM_ERROR("Relocation not 4-byte aligned: "
3439                                   "obj %p target %d offset %d.\n",
3440                                   obj, reloc->target_handle,
3441                                   (int) reloc->offset);
3442                         drm_gem_object_unreference(target_obj);
3443                         i915_gem_object_unpin(obj);
3444                         return -EINVAL;
3445                 }
3446
3447                 /* and points to somewhere within the target object. */
3448                 if (reloc->delta >= target_obj->size) {
3449                         DRM_ERROR("Relocation beyond target object bounds: "
3450                                   "obj %p target %d delta %d size %d.\n",
3451                                   obj, reloc->target_handle,
3452                                   (int) reloc->delta, (int) target_obj->size);
3453                         drm_gem_object_unreference(target_obj);
3454                         i915_gem_object_unpin(obj);
3455                         return -EINVAL;
3456                 }
3457
3458                 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
3459                 if (ret != 0) {
3460                         drm_gem_object_unreference(target_obj);
3461                         i915_gem_object_unpin(obj);
3462                         return -EINVAL;
3463                 }
3464
3465                 /* Map the page containing the relocation we're going to
3466                  * perform.
3467                  */
3468                 reloc_offset = obj_priv->gtt_offset + reloc->offset;
3469                 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
3470                                                       (reloc_offset &
3471                                                        ~(PAGE_SIZE - 1)));
3472                 reloc_entry = (uint32_t __iomem *)(reloc_page +
3473                                                    (reloc_offset & (PAGE_SIZE - 1)));
3474                 reloc_val = target_obj_priv->gtt_offset + reloc->delta;
3475
3476 #if WATCH_BUF
3477                 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
3478                           obj, (unsigned int) reloc->offset,
3479                           readl(reloc_entry), reloc_val);
3480 #endif
3481                 writel(reloc_val, reloc_entry);
3482                 io_mapping_unmap_atomic(reloc_page);
3483
3484                 /* The updated presumed offset for this entry will be
3485                  * copied back out to the user.
3486                  */
3487                 reloc->presumed_offset = target_obj_priv->gtt_offset;
3488
3489                 drm_gem_object_unreference(target_obj);
3490         }
3491
3492 #if WATCH_BUF
3493         if (0)
3494                 i915_gem_dump_object(obj, 128, __func__, ~0);
3495 #endif
3496         return 0;
3497 }
3498
3499 /* Throttle our rendering by waiting until the ring has completed our requests
3500  * emitted over 20 msec ago.
3501  *
3502  * Note that if we were to use the current jiffies each time around the loop,
3503  * we wouldn't escape the function with any frames outstanding if the time to
3504  * render a frame was over 20ms.
3505  *
3506  * This should get us reasonable parallelism between CPU and GPU but also
3507  * relatively low latency when blocking on a particular request to finish.
3508  */
3509 static int
3510 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
3511 {
3512         struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
3513         int ret = 0;
3514         unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3515
3516         mutex_lock(&dev->struct_mutex);
3517         while (!list_empty(&i915_file_priv->mm.request_list)) {
3518                 struct drm_i915_gem_request *request;
3519
3520                 request = list_first_entry(&i915_file_priv->mm.request_list,
3521                                            struct drm_i915_gem_request,
3522                                            client_list);
3523
3524                 if (time_after_eq(request->emitted_jiffies, recent_enough))
3525                         break;
3526
3527                 ret = i915_wait_request(dev, request->seqno, request->ring);
3528                 if (ret != 0)
3529                         break;
3530         }
3531         mutex_unlock(&dev->struct_mutex);
3532
3533         return ret;
3534 }
3535
3536 static int
3537 i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list,
3538                               uint32_t buffer_count,
3539                               struct drm_i915_gem_relocation_entry **relocs)
3540 {
3541         uint32_t reloc_count = 0, reloc_index = 0, i;
3542         int ret;
3543
3544         *relocs = NULL;
3545         for (i = 0; i < buffer_count; i++) {
3546                 if (reloc_count + exec_list[i].relocation_count < reloc_count)
3547                         return -EINVAL;
3548                 reloc_count += exec_list[i].relocation_count;
3549         }
3550
3551         *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
3552         if (*relocs == NULL) {
3553                 DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count);
3554                 return -ENOMEM;
3555         }
3556
3557         for (i = 0; i < buffer_count; i++) {
3558                 struct drm_i915_gem_relocation_entry __user *user_relocs;
3559
3560                 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3561
3562                 ret = copy_from_user(&(*relocs)[reloc_index],
3563                                      user_relocs,
3564                                      exec_list[i].relocation_count *
3565                                      sizeof(**relocs));
3566                 if (ret != 0) {
3567                         drm_free_large(*relocs);
3568                         *relocs = NULL;
3569                         return -EFAULT;
3570                 }
3571
3572                 reloc_index += exec_list[i].relocation_count;
3573         }
3574
3575         return 0;
3576 }
3577
3578 static int
3579 i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
3580                             uint32_t buffer_count,
3581                             struct drm_i915_gem_relocation_entry *relocs)
3582 {
3583         uint32_t reloc_count = 0, i;
3584         int ret = 0;
3585
3586         if (relocs == NULL)
3587             return 0;
3588
3589         for (i = 0; i < buffer_count; i++) {
3590                 struct drm_i915_gem_relocation_entry __user *user_relocs;
3591                 int unwritten;
3592
3593                 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3594
3595                 unwritten = copy_to_user(user_relocs,
3596                                          &relocs[reloc_count],
3597                                          exec_list[i].relocation_count *
3598                                          sizeof(*relocs));
3599
3600                 if (unwritten) {
3601                         ret = -EFAULT;
3602                         goto err;
3603                 }
3604
3605                 reloc_count += exec_list[i].relocation_count;
3606         }
3607
3608 err:
3609         drm_free_large(relocs);
3610
3611         return ret;
3612 }
3613
3614 static int
3615 i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
3616                            uint64_t exec_offset)
3617 {
3618         uint32_t exec_start, exec_len;
3619
3620         exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3621         exec_len = (uint32_t) exec->batch_len;
3622
3623         if ((exec_start | exec_len) & 0x7)
3624                 return -EINVAL;
3625
3626         if (!exec_start)
3627                 return -EINVAL;
3628
3629         return 0;
3630 }
3631
3632 static int
3633 i915_gem_wait_for_pending_flip(struct drm_device *dev,
3634                                struct drm_gem_object **object_list,
3635                                int count)
3636 {
3637         drm_i915_private_t *dev_priv = dev->dev_private;
3638         struct drm_i915_gem_object *obj_priv;
3639         DEFINE_WAIT(wait);
3640         int i, ret = 0;
3641
3642         for (;;) {
3643                 prepare_to_wait(&dev_priv->pending_flip_queue,
3644                                 &wait, TASK_INTERRUPTIBLE);
3645                 for (i = 0; i < count; i++) {
3646                         obj_priv = to_intel_bo(object_list[i]);
3647                         if (atomic_read(&obj_priv->pending_flip) > 0)
3648                                 break;
3649                 }
3650                 if (i == count)
3651                         break;
3652
3653                 if (!signal_pending(current)) {
3654                         mutex_unlock(&dev->struct_mutex);
3655                         schedule();
3656                         mutex_lock(&dev->struct_mutex);
3657                         continue;
3658                 }
3659                 ret = -ERESTARTSYS;
3660                 break;
3661         }
3662         finish_wait(&dev_priv->pending_flip_queue, &wait);
3663
3664         return ret;
3665 }
3666
3667
3668 int
3669 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3670                        struct drm_file *file_priv,
3671                        struct drm_i915_gem_execbuffer2 *args,
3672                        struct drm_i915_gem_exec_object2 *exec_list)
3673 {
3674         drm_i915_private_t *dev_priv = dev->dev_private;
3675         struct drm_gem_object **object_list = NULL;
3676         struct drm_gem_object *batch_obj;
3677         struct drm_i915_gem_object *obj_priv;
3678         struct drm_clip_rect *cliprects = NULL;
3679         struct drm_i915_gem_relocation_entry *relocs = NULL;
3680         int ret = 0, ret2, i, pinned = 0;
3681         uint64_t exec_offset;
3682         uint32_t seqno, flush_domains, reloc_index;
3683         int pin_tries, flips;
3684
3685         struct intel_ring_buffer *ring = NULL;
3686
3687 #if WATCH_EXEC
3688         DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3689                   (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3690 #endif
3691         if (args->flags & I915_EXEC_BSD) {
3692                 if (!HAS_BSD(dev)) {
3693                         DRM_ERROR("execbuf with wrong flag\n");
3694                         return -EINVAL;
3695                 }
3696                 ring = &dev_priv->bsd_ring;
3697         } else {
3698                 ring = &dev_priv->render_ring;
3699         }
3700
3701
3702         if (args->buffer_count < 1) {
3703                 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3704                 return -EINVAL;
3705         }
3706         object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
3707         if (object_list == NULL) {
3708                 DRM_ERROR("Failed to allocate object list for %d buffers\n",
3709                           args->buffer_count);
3710                 ret = -ENOMEM;
3711                 goto pre_mutex_err;
3712         }
3713
3714         if (args->num_cliprects != 0) {
3715                 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
3716                                     GFP_KERNEL);
3717                 if (cliprects == NULL) {
3718                         ret = -ENOMEM;
3719                         goto pre_mutex_err;
3720                 }
3721
3722                 ret = copy_from_user(cliprects,
3723                                      (struct drm_clip_rect __user *)
3724                                      (uintptr_t) args->cliprects_ptr,
3725                                      sizeof(*cliprects) * args->num_cliprects);
3726                 if (ret != 0) {
3727                         DRM_ERROR("copy %d cliprects failed: %d\n",
3728                                   args->num_cliprects, ret);
3729                         goto pre_mutex_err;
3730                 }
3731         }
3732
3733         ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
3734                                             &relocs);
3735         if (ret != 0)
3736                 goto pre_mutex_err;
3737
3738         mutex_lock(&dev->struct_mutex);
3739
3740         i915_verify_inactive(dev, __FILE__, __LINE__);
3741
3742         if (atomic_read(&dev_priv->mm.wedged)) {
3743                 mutex_unlock(&dev->struct_mutex);
3744                 ret = -EIO;
3745                 goto pre_mutex_err;
3746         }
3747
3748         if (dev_priv->mm.suspended) {
3749                 mutex_unlock(&dev->struct_mutex);
3750                 ret = -EBUSY;
3751                 goto pre_mutex_err;
3752         }
3753
3754         /* Look up object handles */
3755         flips = 0;
3756         for (i = 0; i < args->buffer_count; i++) {
3757                 object_list[i] = drm_gem_object_lookup(dev, file_priv,
3758                                                        exec_list[i].handle);
3759                 if (object_list[i] == NULL) {
3760                         DRM_ERROR("Invalid object handle %d at index %d\n",
3761                                    exec_list[i].handle, i);
3762                         /* prevent error path from reading uninitialized data */
3763                         args->buffer_count = i + 1;
3764                         ret = -EBADF;
3765                         goto err;
3766                 }
3767
3768                 obj_priv = to_intel_bo(object_list[i]);
3769                 if (obj_priv->in_execbuffer) {
3770                         DRM_ERROR("Object %p appears more than once in object list\n",
3771                                    object_list[i]);
3772                         /* prevent error path from reading uninitialized data */
3773                         args->buffer_count = i + 1;
3774                         ret = -EBADF;
3775                         goto err;
3776                 }
3777                 obj_priv->in_execbuffer = true;
3778                 flips += atomic_read(&obj_priv->pending_flip);
3779         }
3780
3781         if (flips > 0) {
3782                 ret = i915_gem_wait_for_pending_flip(dev, object_list,
3783                                                      args->buffer_count);
3784                 if (ret)
3785                         goto err;
3786         }
3787
3788         /* Pin and relocate */
3789         for (pin_tries = 0; ; pin_tries++) {
3790                 ret = 0;
3791                 reloc_index = 0;
3792
3793                 for (i = 0; i < args->buffer_count; i++) {
3794                         object_list[i]->pending_read_domains = 0;
3795                         object_list[i]->pending_write_domain = 0;
3796                         ret = i915_gem_object_pin_and_relocate(object_list[i],
3797                                                                file_priv,
3798                                                                &exec_list[i],
3799                                                                &relocs[reloc_index]);
3800                         if (ret)
3801                                 break;
3802                         pinned = i + 1;
3803                         reloc_index += exec_list[i].relocation_count;
3804                 }
3805                 /* success */
3806                 if (ret == 0)
3807                         break;
3808
3809                 /* error other than GTT full, or we've already tried again */
3810                 if (ret != -ENOSPC || pin_tries >= 1) {
3811                         if (ret != -ERESTARTSYS) {
3812                                 unsigned long long total_size = 0;
3813                                 int num_fences = 0;
3814                                 for (i = 0; i < args->buffer_count; i++) {
3815                                         obj_priv = to_intel_bo(object_list[i]);
3816
3817                                         total_size += object_list[i]->size;
3818                                         num_fences +=
3819                                                 exec_list[i].flags & EXEC_OBJECT_NEEDS_FENCE &&
3820                                                 obj_priv->tiling_mode != I915_TILING_NONE;
3821                                 }
3822                                 DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes, %d fences: %d\n",
3823                                           pinned+1, args->buffer_count,
3824                                           total_size, num_fences,
3825                                           ret);
3826                                 DRM_ERROR("%d objects [%d pinned], "
3827                                           "%d object bytes [%d pinned], "
3828                                           "%d/%d gtt bytes\n",
3829                                           atomic_read(&dev->object_count),
3830                                           atomic_read(&dev->pin_count),
3831                                           atomic_read(&dev->object_memory),
3832                                           atomic_read(&dev->pin_memory),
3833                                           atomic_read(&dev->gtt_memory),
3834                                           dev->gtt_total);
3835                         }
3836                         goto err;
3837                 }
3838
3839                 /* unpin all of our buffers */
3840                 for (i = 0; i < pinned; i++)
3841                         i915_gem_object_unpin(object_list[i]);
3842                 pinned = 0;
3843
3844                 /* evict everyone we can from the aperture */
3845                 ret = i915_gem_evict_everything(dev);
3846                 if (ret && ret != -ENOSPC)
3847                         goto err;
3848         }
3849
3850         /* Set the pending read domains for the batch buffer to COMMAND */
3851         batch_obj = object_list[args->buffer_count-1];
3852         if (batch_obj->pending_write_domain) {
3853                 DRM_ERROR("Attempting to use self-modifying batch buffer\n");
3854                 ret = -EINVAL;
3855                 goto err;
3856         }
3857         batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
3858
3859         /* Sanity check the batch buffer, prior to moving objects */
3860         exec_offset = exec_list[args->buffer_count - 1].offset;
3861         ret = i915_gem_check_execbuffer (args, exec_offset);
3862         if (ret != 0) {
3863                 DRM_ERROR("execbuf with invalid offset/length\n");
3864                 goto err;
3865         }
3866
3867         i915_verify_inactive(dev, __FILE__, __LINE__);
3868
3869         /* Zero the global flush/invalidate flags. These
3870          * will be modified as new domains are computed
3871          * for each object
3872          */
3873         dev->invalidate_domains = 0;
3874         dev->flush_domains = 0;
3875
3876         for (i = 0; i < args->buffer_count; i++) {
3877                 struct drm_gem_object *obj = object_list[i];
3878
3879                 /* Compute new gpu domains and update invalidate/flush */
3880                 i915_gem_object_set_to_gpu_domain(obj);
3881         }
3882
3883         i915_verify_inactive(dev, __FILE__, __LINE__);
3884
3885         if (dev->invalidate_domains | dev->flush_domains) {
3886 #if WATCH_EXEC
3887                 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3888                           __func__,
3889                          dev->invalidate_domains,
3890                          dev->flush_domains);
3891 #endif
3892                 i915_gem_flush(dev,
3893                                dev->invalidate_domains,
3894                                dev->flush_domains);
3895                 if (dev->flush_domains & I915_GEM_GPU_DOMAINS) {
3896                         (void)i915_add_request(dev, file_priv,
3897                                         dev->flush_domains,
3898                                         &dev_priv->render_ring);
3899
3900                         if (HAS_BSD(dev))
3901                                 (void)i915_add_request(dev, file_priv,
3902                                                 dev->flush_domains,
3903                                                 &dev_priv->bsd_ring);
3904                 }
3905         }
3906
3907         for (i = 0; i < args->buffer_count; i++) {
3908                 struct drm_gem_object *obj = object_list[i];
3909                 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3910                 uint32_t old_write_domain = obj->write_domain;
3911
3912                 obj->write_domain = obj->pending_write_domain;
3913                 if (obj->write_domain)
3914                         list_move_tail(&obj_priv->gpu_write_list,
3915                                        &dev_priv->mm.gpu_write_list);
3916                 else
3917                         list_del_init(&obj_priv->gpu_write_list);
3918
3919                 trace_i915_gem_object_change_domain(obj,
3920                                                     obj->read_domains,
3921                                                     old_write_domain);
3922         }
3923
3924         i915_verify_inactive(dev, __FILE__, __LINE__);
3925
3926 #if WATCH_COHERENCY
3927         for (i = 0; i < args->buffer_count; i++) {
3928                 i915_gem_object_check_coherency(object_list[i],
3929                                                 exec_list[i].handle);
3930         }
3931 #endif
3932
3933 #if WATCH_EXEC
3934         i915_gem_dump_object(batch_obj,
3935                               args->batch_len,
3936                               __func__,
3937                               ~0);
3938 #endif
3939
3940         /* Exec the batchbuffer */
3941         ret = ring->dispatch_gem_execbuffer(dev, ring, args,
3942                         cliprects, exec_offset);
3943         if (ret) {
3944                 DRM_ERROR("dispatch failed %d\n", ret);
3945                 goto err;
3946         }
3947
3948         /*
3949          * Ensure that the commands in the batch buffer are
3950          * finished before the interrupt fires
3951          */
3952         flush_domains = i915_retire_commands(dev, ring);
3953
3954         i915_verify_inactive(dev, __FILE__, __LINE__);
3955
3956         /*
3957          * Get a seqno representing the execution of the current buffer,
3958          * which we can wait on.  We would like to mitigate these interrupts,
3959          * likely by only creating seqnos occasionally (so that we have
3960          * *some* interrupts representing completion of buffers that we can
3961          * wait on when trying to clear up gtt space).
3962          */
3963         seqno = i915_add_request(dev, file_priv, flush_domains, ring);
3964         BUG_ON(seqno == 0);
3965         for (i = 0; i < args->buffer_count; i++) {
3966                 struct drm_gem_object *obj = object_list[i];
3967                 obj_priv = to_intel_bo(obj);
3968
3969                 i915_gem_object_move_to_active(obj, seqno, ring);
3970 #if WATCH_LRU
3971                 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
3972 #endif
3973         }
3974 #if WATCH_LRU
3975         i915_dump_lru(dev, __func__);
3976 #endif
3977
3978         i915_verify_inactive(dev, __FILE__, __LINE__);
3979
3980 err:
3981         for (i = 0; i < pinned; i++)
3982                 i915_gem_object_unpin(object_list[i]);
3983
3984         for (i = 0; i < args->buffer_count; i++) {
3985                 if (object_list[i]) {
3986                         obj_priv = to_intel_bo(object_list[i]);
3987                         obj_priv->in_execbuffer = false;
3988                 }
3989                 drm_gem_object_unreference(object_list[i]);
3990         }
3991
3992         mutex_unlock(&dev->struct_mutex);
3993
3994 pre_mutex_err:
3995         /* Copy the updated relocations out regardless of current error
3996          * state.  Failure to update the relocs would mean that the next
3997          * time userland calls execbuf, it would do so with presumed offset
3998          * state that didn't match the actual object state.
3999          */
4000         ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
4001                                            relocs);
4002         if (ret2 != 0) {
4003                 DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
4004
4005                 if (ret == 0)
4006                         ret = ret2;
4007         }
4008
4009         drm_free_large(object_list);
4010         kfree(cliprects);
4011
4012         return ret;
4013 }
4014
4015 /*
4016  * Legacy execbuffer just creates an exec2 list from the original exec object
4017  * list array and passes it to the real function.
4018  */
4019 int
4020 i915_gem_execbuffer(struct drm_device *dev, void *data,
4021                     struct drm_file *file_priv)
4022 {
4023         struct drm_i915_gem_execbuffer *args = data;
4024         struct drm_i915_gem_execbuffer2 exec2;
4025         struct drm_i915_gem_exec_object *exec_list = NULL;
4026         struct drm_i915_gem_exec_object2 *exec2_list = NULL;
4027         int ret, i;
4028
4029 #if WATCH_EXEC
4030         DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
4031                   (int) args->buffers_ptr, args->buffer_count, args->batch_len);
4032 #endif
4033
4034         if (args->buffer_count < 1) {
4035                 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
4036                 return -EINVAL;
4037         }
4038
4039         /* Copy in the exec list from userland */
4040         exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
4041         exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
4042         if (exec_list == NULL || exec2_list == NULL) {
4043                 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
4044                           args->buffer_count);
4045                 drm_free_large(exec_list);
4046                 drm_free_large(exec2_list);
4047                 return -ENOMEM;
4048         }
4049         ret = copy_from_user(exec_list,
4050                              (struct drm_i915_relocation_entry __user *)
4051                              (uintptr_t) args->buffers_ptr,
4052                              sizeof(*exec_list) * args->buffer_count);
4053         if (ret != 0) {
4054                 DRM_ERROR("copy %d exec entries failed %d\n",
4055                           args->buffer_count, ret);
4056                 drm_free_large(exec_list);
4057                 drm_free_large(exec2_list);
4058                 return -EFAULT;
4059         }
4060
4061         for (i = 0; i < args->buffer_count; i++) {
4062                 exec2_list[i].handle = exec_list[i].handle;
4063                 exec2_list[i].relocation_count = exec_list[i].relocation_count;
4064                 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
4065                 exec2_list[i].alignment = exec_list[i].alignment;
4066                 exec2_list[i].offset = exec_list[i].offset;
4067                 if (!IS_I965G(dev))
4068                         exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
4069                 else
4070                         exec2_list[i].flags = 0;
4071         }
4072
4073         exec2.buffers_ptr = args->buffers_ptr;
4074         exec2.buffer_count = args->buffer_count;
4075         exec2.batch_start_offset = args->batch_start_offset;
4076         exec2.batch_len = args->batch_len;
4077         exec2.DR1 = args->DR1;
4078         exec2.DR4 = args->DR4;
4079         exec2.num_cliprects = args->num_cliprects;
4080         exec2.cliprects_ptr = args->cliprects_ptr;
4081         exec2.flags = I915_EXEC_RENDER;
4082
4083         ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
4084         if (!ret) {
4085                 /* Copy the new buffer offsets back to the user's exec list. */
4086                 for (i = 0; i < args->buffer_count; i++)
4087                         exec_list[i].offset = exec2_list[i].offset;
4088                 /* ... and back out to userspace */
4089                 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
4090                                    (uintptr_t) args->buffers_ptr,
4091                                    exec_list,
4092                                    sizeof(*exec_list) * args->buffer_count);
4093                 if (ret) {
4094                         ret = -EFAULT;
4095                         DRM_ERROR("failed to copy %d exec entries "
4096                                   "back to user (%d)\n",
4097                                   args->buffer_count, ret);
4098                 }
4099         }
4100
4101         drm_free_large(exec_list);
4102         drm_free_large(exec2_list);
4103         return ret;
4104 }
4105
4106 int
4107 i915_gem_execbuffer2(struct drm_device *dev, void *data,
4108                      struct drm_file *file_priv)
4109 {
4110         struct drm_i915_gem_execbuffer2 *args = data;
4111         struct drm_i915_gem_exec_object2 *exec2_list = NULL;
4112         int ret;
4113
4114 #if WATCH_EXEC
4115         DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
4116                   (int) args->buffers_ptr, args->buffer_count, args->batch_len);
4117 #endif
4118
4119         if (args->buffer_count < 1) {
4120                 DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
4121                 return -EINVAL;
4122         }
4123
4124         exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
4125         if (exec2_list == NULL) {
4126                 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
4127                           args->buffer_count);
4128                 return -ENOMEM;
4129         }
4130         ret = copy_from_user(exec2_list,
4131                              (struct drm_i915_relocation_entry __user *)
4132                              (uintptr_t) args->buffers_ptr,
4133                              sizeof(*exec2_list) * args->buffer_count);
4134         if (ret != 0) {
4135                 DRM_ERROR("copy %d exec entries failed %d\n",
4136                           args->buffer_count, ret);
4137                 drm_free_large(exec2_list);
4138                 return -EFAULT;
4139         }
4140
4141         ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
4142         if (!ret) {
4143                 /* Copy the new buffer offsets back to the user's exec list. */
4144                 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
4145                                    (uintptr_t) args->buffers_ptr,
4146                                    exec2_list,
4147                                    sizeof(*exec2_list) * args->buffer_count);
4148                 if (ret) {
4149                         ret = -EFAULT;
4150                         DRM_ERROR("failed to copy %d exec entries "
4151                                   "back to user (%d)\n",
4152                                   args->buffer_count, ret);
4153                 }
4154         }
4155
4156         drm_free_large(exec2_list);
4157         return ret;
4158 }
4159
4160 int
4161 i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
4162 {
4163         struct drm_device *dev = obj->dev;
4164         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4165         int ret;
4166
4167         BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
4168
4169         i915_verify_inactive(dev, __FILE__, __LINE__);
4170
4171         if (obj_priv->gtt_space != NULL) {
4172                 if (alignment == 0)
4173                         alignment = i915_gem_get_gtt_alignment(obj);
4174                 if (obj_priv->gtt_offset & (alignment - 1)) {
4175                         ret = i915_gem_object_unbind(obj);
4176                         if (ret)
4177                                 return ret;
4178                 }
4179         }
4180
4181         if (obj_priv->gtt_space == NULL) {
4182                 ret = i915_gem_object_bind_to_gtt(obj, alignment);
4183                 if (ret)
4184                         return ret;
4185         }
4186
4187         obj_priv->pin_count++;
4188
4189         /* If the object is not active and not pending a flush,
4190          * remove it from the inactive list
4191          */
4192         if (obj_priv->pin_count == 1) {
4193                 atomic_inc(&dev->pin_count);
4194                 atomic_add(obj->size, &dev->pin_memory);
4195                 if (!obj_priv->active &&
4196                     (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
4197                     !list_empty(&obj_priv->list))
4198                         list_del_init(&obj_priv->list);
4199         }
4200         i915_verify_inactive(dev, __FILE__, __LINE__);
4201
4202         return 0;
4203 }
4204
4205 void
4206 i915_gem_object_unpin(struct drm_gem_object *obj)
4207 {
4208         struct drm_device *dev = obj->dev;
4209         drm_i915_private_t *dev_priv = dev->dev_private;
4210         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4211
4212         i915_verify_inactive(dev, __FILE__, __LINE__);
4213         obj_priv->pin_count--;
4214         BUG_ON(obj_priv->pin_count < 0);
4215         BUG_ON(obj_priv->gtt_space == NULL);
4216
4217         /* If the object is no longer pinned, and is
4218          * neither active nor being flushed, then stick it on
4219          * the inactive list
4220          */
4221         if (obj_priv->pin_count == 0) {
4222                 if (!obj_priv->active &&
4223                     (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
4224                         list_move_tail(&obj_priv->list,
4225                                        &dev_priv->mm.inactive_list);
4226                 atomic_dec(&dev->pin_count);
4227                 atomic_sub(obj->size, &dev->pin_memory);
4228         }
4229         i915_verify_inactive(dev, __FILE__, __LINE__);
4230 }
4231
4232 int
4233 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
4234                    struct drm_file *file_priv)
4235 {
4236         struct drm_i915_gem_pin *args = data;
4237         struct drm_gem_object *obj;
4238         struct drm_i915_gem_object *obj_priv;
4239         int ret;
4240
4241         mutex_lock(&dev->struct_mutex);
4242
4243         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4244         if (obj == NULL) {
4245                 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
4246                           args->handle);
4247                 mutex_unlock(&dev->struct_mutex);
4248                 return -EBADF;
4249         }
4250         obj_priv = to_intel_bo(obj);
4251
4252         if (obj_priv->madv != I915_MADV_WILLNEED) {
4253                 DRM_ERROR("Attempting to pin a purgeable buffer\n");
4254                 drm_gem_object_unreference(obj);
4255                 mutex_unlock(&dev->struct_mutex);
4256                 return -EINVAL;
4257         }
4258
4259         if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
4260                 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
4261                           args->handle);
4262                 drm_gem_object_unreference(obj);
4263                 mutex_unlock(&dev->struct_mutex);
4264                 return -EINVAL;
4265         }
4266
4267         obj_priv->user_pin_count++;
4268         obj_priv->pin_filp = file_priv;
4269         if (obj_priv->user_pin_count == 1) {
4270                 ret = i915_gem_object_pin(obj, args->alignment);
4271                 if (ret != 0) {
4272                         drm_gem_object_unreference(obj);
4273                         mutex_unlock(&dev->struct_mutex);
4274                         return ret;
4275                 }
4276         }
4277
4278         /* XXX - flush the CPU caches for pinned objects
4279          * as the X server doesn't manage domains yet
4280          */
4281         i915_gem_object_flush_cpu_write_domain(obj);
4282         args->offset = obj_priv->gtt_offset;
4283         drm_gem_object_unreference(obj);
4284         mutex_unlock(&dev->struct_mutex);
4285
4286         return 0;
4287 }
4288
4289 int
4290 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
4291                      struct drm_file *file_priv)
4292 {
4293         struct drm_i915_gem_pin *args = data;
4294         struct drm_gem_object *obj;
4295         struct drm_i915_gem_object *obj_priv;
4296
4297         mutex_lock(&dev->struct_mutex);
4298
4299         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4300         if (obj == NULL) {
4301                 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
4302                           args->handle);
4303                 mutex_unlock(&dev->struct_mutex);
4304                 return -EBADF;
4305         }
4306
4307         obj_priv = to_intel_bo(obj);
4308         if (obj_priv->pin_filp != file_priv) {
4309                 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
4310                           args->handle);
4311                 drm_gem_object_unreference(obj);
4312                 mutex_unlock(&dev->struct_mutex);
4313                 return -EINVAL;
4314         }
4315         obj_priv->user_pin_count--;
4316         if (obj_priv->user_pin_count == 0) {
4317                 obj_priv->pin_filp = NULL;
4318                 i915_gem_object_unpin(obj);
4319         }
4320
4321         drm_gem_object_unreference(obj);
4322         mutex_unlock(&dev->struct_mutex);
4323         return 0;
4324 }
4325
4326 int
4327 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4328                     struct drm_file *file_priv)
4329 {
4330         struct drm_i915_gem_busy *args = data;
4331         struct drm_gem_object *obj;
4332         struct drm_i915_gem_object *obj_priv;
4333         drm_i915_private_t *dev_priv = dev->dev_private;
4334
4335         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4336         if (obj == NULL) {
4337                 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
4338                           args->handle);
4339                 return -EBADF;
4340         }
4341
4342         mutex_lock(&dev->struct_mutex);
4343         /* Update the active list for the hardware's current position.
4344          * Otherwise this only updates on a delayed timer or when irqs are
4345          * actually unmasked, and our working set ends up being larger than
4346          * required.
4347          */
4348         i915_gem_retire_requests(dev, &dev_priv->render_ring);
4349
4350         if (HAS_BSD(dev))
4351                 i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
4352
4353         obj_priv = to_intel_bo(obj);
4354         /* Don't count being on the flushing list against the object being
4355          * done.  Otherwise, a buffer left on the flushing list but not getting
4356          * flushed (because nobody's flushing that domain) won't ever return
4357          * unbusy and get reused by libdrm's bo cache.  The other expected
4358          * consumer of this interface, OpenGL's occlusion queries, also specs
4359          * that the objects get unbusy "eventually" without any interference.
4360          */
4361         args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
4362
4363         drm_gem_object_unreference(obj);
4364         mutex_unlock(&dev->struct_mutex);
4365         return 0;
4366 }
4367
4368 int
4369 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4370                         struct drm_file *file_priv)
4371 {
4372     return i915_gem_ring_throttle(dev, file_priv);
4373 }
4374
4375 int
4376 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4377                        struct drm_file *file_priv)
4378 {
4379         struct drm_i915_gem_madvise *args = data;
4380         struct drm_gem_object *obj;
4381         struct drm_i915_gem_object *obj_priv;
4382
4383         switch (args->madv) {
4384         case I915_MADV_DONTNEED:
4385         case I915_MADV_WILLNEED:
4386             break;
4387         default:
4388             return -EINVAL;
4389         }
4390
4391         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4392         if (obj == NULL) {
4393                 DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
4394                           args->handle);
4395                 return -EBADF;
4396         }
4397
4398         mutex_lock(&dev->struct_mutex);
4399         obj_priv = to_intel_bo(obj);
4400
4401         if (obj_priv->pin_count) {
4402                 drm_gem_object_unreference(obj);
4403                 mutex_unlock(&dev->struct_mutex);
4404
4405                 DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
4406                 return -EINVAL;
4407         }
4408
4409         if (obj_priv->madv != __I915_MADV_PURGED)
4410                 obj_priv->madv = args->madv;
4411
4412         /* if the object is no longer bound, discard its backing storage */
4413         if (i915_gem_object_is_purgeable(obj_priv) &&
4414             obj_priv->gtt_space == NULL)
4415                 i915_gem_object_truncate(obj);
4416
4417         args->retained = obj_priv->madv != __I915_MADV_PURGED;
4418
4419         drm_gem_object_unreference(obj);
4420         mutex_unlock(&dev->struct_mutex);
4421
4422         return 0;
4423 }
4424
4425 struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
4426                                               size_t size)
4427 {
4428         struct drm_i915_gem_object *obj;
4429
4430         obj = kzalloc(sizeof(*obj), GFP_KERNEL);
4431         if (obj == NULL)
4432                 return NULL;
4433
4434         if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4435                 kfree(obj);
4436                 return NULL;
4437         }
4438
4439         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4440         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4441
4442         obj->agp_type = AGP_USER_MEMORY;
4443         obj->base.driver_private = NULL;
4444         obj->fence_reg = I915_FENCE_REG_NONE;
4445         INIT_LIST_HEAD(&obj->list);
4446         INIT_LIST_HEAD(&obj->gpu_write_list);
4447         obj->madv = I915_MADV_WILLNEED;
4448
4449         trace_i915_gem_object_create(&obj->base);
4450
4451         return &obj->base;
4452 }
4453
4454 int i915_gem_init_object(struct drm_gem_object *obj)
4455 {
4456         BUG();
4457
4458         return 0;
4459 }
4460
4461 void i915_gem_free_object(struct drm_gem_object *obj)
4462 {
4463         struct drm_device *dev = obj->dev;
4464         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4465
4466         trace_i915_gem_object_destroy(obj);
4467
4468         while (obj_priv->pin_count > 0)
4469                 i915_gem_object_unpin(obj);
4470
4471         if (obj_priv->phys_obj)
4472                 i915_gem_detach_phys_object(dev, obj);
4473
4474         i915_gem_object_unbind(obj);
4475
4476         if (obj_priv->mmap_offset)
4477                 i915_gem_free_mmap_offset(obj);
4478
4479         drm_gem_object_release(obj);
4480
4481         kfree(obj_priv->page_cpu_valid);
4482         kfree(obj_priv->bit_17);
4483         kfree(obj_priv);
4484 }
4485
4486 /** Unbinds all inactive objects. */
4487 static int
4488 i915_gem_evict_from_inactive_list(struct drm_device *dev)
4489 {
4490         drm_i915_private_t *dev_priv = dev->dev_private;
4491
4492         while (!list_empty(&dev_priv->mm.inactive_list)) {
4493                 struct drm_gem_object *obj;
4494                 int ret;
4495
4496                 obj = &list_first_entry(&dev_priv->mm.inactive_list,
4497                                         struct drm_i915_gem_object,
4498                                         list)->base;
4499
4500                 ret = i915_gem_object_unbind(obj);
4501                 if (ret != 0) {
4502                         DRM_ERROR("Error unbinding object: %d\n", ret);
4503                         return ret;
4504                 }
4505         }
4506
4507         return 0;
4508 }
4509
4510 int
4511 i915_gem_idle(struct drm_device *dev)
4512 {
4513         drm_i915_private_t *dev_priv = dev->dev_private;
4514         int ret;
4515
4516         mutex_lock(&dev->struct_mutex);
4517
4518         if (dev_priv->mm.suspended ||
4519                         (dev_priv->render_ring.gem_object == NULL) ||
4520                         (HAS_BSD(dev) &&
4521                          dev_priv->bsd_ring.gem_object == NULL)) {
4522                 mutex_unlock(&dev->struct_mutex);
4523                 return 0;
4524         }
4525
4526         ret = i915_gpu_idle(dev);
4527         if (ret) {
4528                 mutex_unlock(&dev->struct_mutex);
4529                 return ret;
4530         }
4531
4532         /* Under UMS, be paranoid and evict. */
4533         if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
4534                 ret = i915_gem_evict_from_inactive_list(dev);
4535                 if (ret) {
4536                         mutex_unlock(&dev->struct_mutex);
4537                         return ret;
4538                 }
4539         }
4540
4541         /* Hack!  Don't let anybody do execbuf while we don't control the chip.
4542          * We need to replace this with a semaphore, or something.
4543          * And not confound mm.suspended!
4544          */
4545         dev_priv->mm.suspended = 1;
4546         del_timer(&dev_priv->hangcheck_timer);
4547
4548         i915_kernel_lost_context(dev);
4549         i915_gem_cleanup_ringbuffer(dev);
4550
4551         mutex_unlock(&dev->struct_mutex);
4552
4553         /* Cancel the retire work handler, which should be idle now. */
4554         cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4555
4556         return 0;
4557 }
4558
4559 /*
4560  * 965+ support PIPE_CONTROL commands, which provide finer grained control
4561  * over cache flushing.
4562  */
4563 static int
4564 i915_gem_init_pipe_control(struct drm_device *dev)
4565 {
4566         drm_i915_private_t *dev_priv = dev->dev_private;
4567         struct drm_gem_object *obj;
4568         struct drm_i915_gem_object *obj_priv;
4569         int ret;
4570
4571         obj = i915_gem_alloc_object(dev, 4096);
4572         if (obj == NULL) {
4573                 DRM_ERROR("Failed to allocate seqno page\n");
4574                 ret = -ENOMEM;
4575                 goto err;
4576         }
4577         obj_priv = to_intel_bo(obj);
4578         obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
4579
4580         ret = i915_gem_object_pin(obj, 4096);
4581         if (ret)
4582                 goto err_unref;
4583
4584         dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
4585         dev_priv->seqno_page =  kmap(obj_priv->pages[0]);
4586         if (dev_priv->seqno_page == NULL)
4587                 goto err_unpin;
4588
4589         dev_priv->seqno_obj = obj;
4590         memset(dev_priv->seqno_page, 0, PAGE_SIZE);
4591
4592         return 0;
4593
4594 err_unpin:
4595         i915_gem_object_unpin(obj);
4596 err_unref:
4597         drm_gem_object_unreference(obj);
4598 err:
4599         return ret;
4600 }
4601
4602
4603 static void
4604 i915_gem_cleanup_pipe_control(struct drm_device *dev)
4605 {
4606         drm_i915_private_t *dev_priv = dev->dev_private;
4607         struct drm_gem_object *obj;
4608         struct drm_i915_gem_object *obj_priv;
4609
4610         obj = dev_priv->seqno_obj;
4611         obj_priv = to_intel_bo(obj);
4612         kunmap(obj_priv->pages[0]);
4613         i915_gem_object_unpin(obj);
4614         drm_gem_object_unreference(obj);
4615         dev_priv->seqno_obj = NULL;
4616
4617         dev_priv->seqno_page = NULL;
4618 }
4619
4620 int
4621 i915_gem_init_ringbuffer(struct drm_device *dev)
4622 {
4623         drm_i915_private_t *dev_priv = dev->dev_private;
4624         int ret;
4625
4626         dev_priv->render_ring = render_ring;
4627
4628         if (!I915_NEED_GFX_HWS(dev)) {
4629                 dev_priv->render_ring.status_page.page_addr
4630                         = dev_priv->status_page_dmah->vaddr;
4631                 memset(dev_priv->render_ring.status_page.page_addr,
4632                                 0, PAGE_SIZE);
4633         }
4634
4635         if (HAS_PIPE_CONTROL(dev)) {
4636                 ret = i915_gem_init_pipe_control(dev);
4637                 if (ret)
4638                         return ret;
4639         }
4640
4641         ret = intel_init_ring_buffer(dev, &dev_priv->render_ring);
4642         if (ret)
4643                 goto cleanup_pipe_control;
4644
4645         if (HAS_BSD(dev)) {
4646                 dev_priv->bsd_ring = bsd_ring;
4647                 ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
4648                 if (ret)
4649                         goto cleanup_render_ring;
4650         }
4651
4652         return 0;
4653
4654 cleanup_render_ring:
4655         intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
4656 cleanup_pipe_control:
4657         if (HAS_PIPE_CONTROL(dev))
4658                 i915_gem_cleanup_pipe_control(dev);
4659         return ret;
4660 }
4661
4662 void
4663 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4664 {
4665         drm_i915_private_t *dev_priv = dev->dev_private;
4666
4667         intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
4668         if (HAS_BSD(dev))
4669                 intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
4670         if (HAS_PIPE_CONTROL(dev))
4671                 i915_gem_cleanup_pipe_control(dev);
4672 }
4673
4674 int
4675 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4676                        struct drm_file *file_priv)
4677 {
4678         drm_i915_private_t *dev_priv = dev->dev_private;
4679         int ret;
4680
4681         if (drm_core_check_feature(dev, DRIVER_MODESET))
4682                 return 0;
4683
4684         if (atomic_read(&dev_priv->mm.wedged)) {
4685                 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4686                 atomic_set(&dev_priv->mm.wedged, 0);
4687         }
4688
4689         mutex_lock(&dev->struct_mutex);
4690         dev_priv->mm.suspended = 0;
4691
4692         ret = i915_gem_init_ringbuffer(dev);
4693         if (ret != 0) {
4694                 mutex_unlock(&dev->struct_mutex);
4695                 return ret;
4696         }
4697
4698         spin_lock(&dev_priv->mm.active_list_lock);
4699         BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
4700         BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list));
4701         spin_unlock(&dev_priv->mm.active_list_lock);
4702
4703         BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
4704         BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
4705         BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
4706         BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list));
4707         mutex_unlock(&dev->struct_mutex);
4708
4709         ret = drm_irq_install(dev);
4710         if (ret)
4711                 goto cleanup_ringbuffer;
4712
4713         return 0;
4714
4715 cleanup_ringbuffer:
4716         mutex_lock(&dev->struct_mutex);
4717         i915_gem_cleanup_ringbuffer(dev);
4718         dev_priv->mm.suspended = 1;
4719         mutex_unlock(&dev->struct_mutex);
4720
4721         return ret;
4722 }
4723
4724 int
4725 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4726                        struct drm_file *file_priv)
4727 {
4728         if (drm_core_check_feature(dev, DRIVER_MODESET))
4729                 return 0;
4730
4731         drm_irq_uninstall(dev);
4732         return i915_gem_idle(dev);
4733 }
4734
4735 void
4736 i915_gem_lastclose(struct drm_device *dev)
4737 {
4738         int ret;
4739
4740         if (drm_core_check_feature(dev, DRIVER_MODESET))
4741                 return;
4742
4743         ret = i915_gem_idle(dev);
4744         if (ret)
4745                 DRM_ERROR("failed to idle hardware: %d\n", ret);
4746 }
4747
4748 void
4749 i915_gem_load(struct drm_device *dev)
4750 {
4751         int i;
4752         drm_i915_private_t *dev_priv = dev->dev_private;
4753
4754         spin_lock_init(&dev_priv->mm.active_list_lock);
4755         INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4756         INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
4757         INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4758         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4759         INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
4760         INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
4761         if (HAS_BSD(dev)) {
4762                 INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list);
4763                 INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list);
4764         }
4765         for (i = 0; i < 16; i++)
4766                 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4767         INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4768                           i915_gem_retire_work_handler);
4769         spin_lock(&shrink_list_lock);
4770         list_add(&dev_priv->mm.shrink_list, &shrink_list);
4771         spin_unlock(&shrink_list_lock);
4772
4773         /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4774         if (IS_GEN3(dev)) {
4775                 u32 tmp = I915_READ(MI_ARB_STATE);
4776                 if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
4777                         /* arb state is a masked write, so set bit + bit in mask */
4778                         tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
4779                         I915_WRITE(MI_ARB_STATE, tmp);
4780                 }
4781         }
4782
4783         /* Old X drivers will take 0-2 for front, back, depth buffers */
4784         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4785                 dev_priv->fence_reg_start = 3;
4786
4787         if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4788                 dev_priv->num_fence_regs = 16;
4789         else
4790                 dev_priv->num_fence_regs = 8;
4791
4792         /* Initialize fence registers to zero */
4793         if (IS_I965G(dev)) {
4794                 for (i = 0; i < 16; i++)
4795                         I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
4796         } else {
4797                 for (i = 0; i < 8; i++)
4798                         I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
4799                 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4800                         for (i = 0; i < 8; i++)
4801                                 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
4802         }
4803         i915_gem_detect_bit_6_swizzle(dev);
4804         init_waitqueue_head(&dev_priv->pending_flip_queue);
4805 }
4806
4807 /*
4808  * Create a physically contiguous memory object for this object
4809  * e.g. for cursor + overlay regs
4810  */
4811 int i915_gem_init_phys_object(struct drm_device *dev,
4812                               int id, int size)
4813 {
4814         drm_i915_private_t *dev_priv = dev->dev_private;
4815         struct drm_i915_gem_phys_object *phys_obj;
4816         int ret;
4817
4818         if (dev_priv->mm.phys_objs[id - 1] || !size)
4819                 return 0;
4820
4821         phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
4822         if (!phys_obj)
4823                 return -ENOMEM;
4824
4825         phys_obj->id = id;
4826
4827         phys_obj->handle = drm_pci_alloc(dev, size, 0);
4828         if (!phys_obj->handle) {
4829                 ret = -ENOMEM;
4830                 goto kfree_obj;
4831         }
4832 #ifdef CONFIG_X86
4833         set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4834 #endif
4835
4836         dev_priv->mm.phys_objs[id - 1] = phys_obj;
4837
4838         return 0;
4839 kfree_obj:
4840         kfree(phys_obj);
4841         return ret;
4842 }
4843
4844 void i915_gem_free_phys_object(struct drm_device *dev, int id)
4845 {
4846         drm_i915_private_t *dev_priv = dev->dev_private;
4847         struct drm_i915_gem_phys_object *phys_obj;
4848
4849         if (!dev_priv->mm.phys_objs[id - 1])
4850                 return;
4851
4852         phys_obj = dev_priv->mm.phys_objs[id - 1];
4853         if (phys_obj->cur_obj) {
4854                 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4855         }
4856
4857 #ifdef CONFIG_X86
4858         set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4859 #endif
4860         drm_pci_free(dev, phys_obj->handle);
4861         kfree(phys_obj);
4862         dev_priv->mm.phys_objs[id - 1] = NULL;
4863 }
4864
4865 void i915_gem_free_all_phys_object(struct drm_device *dev)
4866 {
4867         int i;
4868
4869         for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4870                 i915_gem_free_phys_object(dev, i);
4871 }
4872
4873 void i915_gem_detach_phys_object(struct drm_device *dev,
4874                                  struct drm_gem_object *obj)
4875 {
4876         struct drm_i915_gem_object *obj_priv;
4877         int i;
4878         int ret;
4879         int page_count;
4880
4881         obj_priv = to_intel_bo(obj);
4882         if (!obj_priv->phys_obj)
4883                 return;
4884
4885         ret = i915_gem_object_get_pages(obj, 0);
4886         if (ret)
4887                 goto out;
4888
4889         page_count = obj->size / PAGE_SIZE;
4890
4891         for (i = 0; i < page_count; i++) {
4892                 char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
4893                 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4894
4895                 memcpy(dst, src, PAGE_SIZE);
4896                 kunmap_atomic(dst, KM_USER0);
4897         }
4898         drm_clflush_pages(obj_priv->pages, page_count);
4899         drm_agp_chipset_flush(dev);
4900
4901         i915_gem_object_put_pages(obj);
4902 out:
4903         obj_priv->phys_obj->cur_obj = NULL;
4904         obj_priv->phys_obj = NULL;
4905 }
4906
4907 int
4908 i915_gem_attach_phys_object(struct drm_device *dev,
4909                             struct drm_gem_object *obj, int id)
4910 {
4911         drm_i915_private_t *dev_priv = dev->dev_private;
4912         struct drm_i915_gem_object *obj_priv;
4913         int ret = 0;
4914         int page_count;
4915         int i;
4916
4917         if (id > I915_MAX_PHYS_OBJECT)
4918                 return -EINVAL;
4919
4920         obj_priv = to_intel_bo(obj);
4921
4922         if (obj_priv->phys_obj) {
4923                 if (obj_priv->phys_obj->id == id)
4924                         return 0;
4925                 i915_gem_detach_phys_object(dev, obj);
4926         }
4927
4928
4929         /* create a new object */
4930         if (!dev_priv->mm.phys_objs[id - 1]) {
4931                 ret = i915_gem_init_phys_object(dev, id,
4932                                                 obj->size);
4933                 if (ret) {
4934                         DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
4935                         goto out;
4936                 }
4937         }
4938
4939         /* bind to the object */
4940         obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
4941         obj_priv->phys_obj->cur_obj = obj;
4942
4943         ret = i915_gem_object_get_pages(obj, 0);
4944         if (ret) {
4945                 DRM_ERROR("failed to get page list\n");
4946                 goto out;
4947         }
4948
4949         page_count = obj->size / PAGE_SIZE;
4950
4951         for (i = 0; i < page_count; i++) {
4952                 char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
4953                 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4954
4955                 memcpy(dst, src, PAGE_SIZE);
4956                 kunmap_atomic(src, KM_USER0);
4957         }
4958
4959         i915_gem_object_put_pages(obj);
4960
4961         return 0;
4962 out:
4963         return ret;
4964 }
4965
4966 static int
4967 i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4968                      struct drm_i915_gem_pwrite *args,
4969                      struct drm_file *file_priv)
4970 {
4971         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4972         void *obj_addr;
4973         int ret;
4974         char __user *user_data;
4975
4976         user_data = (char __user *) (uintptr_t) args->data_ptr;
4977         obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
4978
4979         DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
4980         ret = copy_from_user(obj_addr, user_data, args->size);
4981         if (ret)
4982                 return -EFAULT;
4983
4984         drm_agp_chipset_flush(dev);
4985         return 0;
4986 }
4987
4988 void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
4989 {
4990         struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
4991
4992         /* Clean up our request list when the client is going away, so that
4993          * later retire_requests won't dereference our soon-to-be-gone
4994          * file_priv.
4995          */
4996         mutex_lock(&dev->struct_mutex);
4997         while (!list_empty(&i915_file_priv->mm.request_list))
4998                 list_del_init(i915_file_priv->mm.request_list.next);
4999         mutex_unlock(&dev->struct_mutex);
5000 }
5001
5002 static int
5003 i915_gpu_is_active(struct drm_device *dev)
5004 {
5005         drm_i915_private_t *dev_priv = dev->dev_private;
5006         int lists_empty;
5007
5008         spin_lock(&dev_priv->mm.active_list_lock);
5009         lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
5010                       list_empty(&dev_priv->render_ring.active_list);
5011         if (HAS_BSD(dev))
5012                 lists_empty &= list_empty(&dev_priv->bsd_ring.active_list);
5013         spin_unlock(&dev_priv->mm.active_list_lock);
5014
5015         return !lists_empty;
5016 }
5017
5018 static int
5019 i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
5020 {
5021         drm_i915_private_t *dev_priv, *next_dev;
5022         struct drm_i915_gem_object *obj_priv, *next_obj;
5023         int cnt = 0;
5024         int would_deadlock = 1;
5025
5026         /* "fast-path" to count number of available objects */
5027         if (nr_to_scan == 0) {
5028                 spin_lock(&shrink_list_lock);
5029                 list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
5030                         struct drm_device *dev = dev_priv->dev;
5031
5032                         if (mutex_trylock(&dev->struct_mutex)) {
5033                                 list_for_each_entry(obj_priv,
5034                                                     &dev_priv->mm.inactive_list,
5035                                                     list)
5036                                         cnt++;
5037                                 mutex_unlock(&dev->struct_mutex);
5038                         }
5039                 }
5040                 spin_unlock(&shrink_list_lock);
5041
5042                 return (cnt / 100) * sysctl_vfs_cache_pressure;
5043         }
5044
5045         spin_lock(&shrink_list_lock);
5046
5047 rescan:
5048         /* first scan for clean buffers */
5049         list_for_each_entry_safe(dev_priv, next_dev,
5050                                  &shrink_list, mm.shrink_list) {
5051                 struct drm_device *dev = dev_priv->dev;
5052
5053                 if (! mutex_trylock(&dev->struct_mutex))
5054                         continue;
5055
5056                 spin_unlock(&shrink_list_lock);
5057                 i915_gem_retire_requests(dev, &dev_priv->render_ring);
5058
5059                 if (HAS_BSD(dev))
5060                         i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
5061
5062                 list_for_each_entry_safe(obj_priv, next_obj,
5063                                          &dev_priv->mm.inactive_list,
5064                                          list) {
5065                         if (i915_gem_object_is_purgeable(obj_priv)) {
5066                                 i915_gem_object_unbind(&obj_priv->base);
5067                                 if (--nr_to_scan <= 0)
5068                                         break;
5069                         }
5070                 }
5071
5072                 spin_lock(&shrink_list_lock);
5073                 mutex_unlock(&dev->struct_mutex);
5074
5075                 would_deadlock = 0;
5076
5077                 if (nr_to_scan <= 0)
5078                         break;
5079         }
5080
5081         /* second pass, evict/count anything still on the inactive list */
5082         list_for_each_entry_safe(dev_priv, next_dev,
5083                                  &shrink_list, mm.shrink_list) {
5084                 struct drm_device *dev = dev_priv->dev;
5085
5086                 if (! mutex_trylock(&dev->struct_mutex))
5087                         continue;
5088
5089                 spin_unlock(&shrink_list_lock);
5090
5091                 list_for_each_entry_safe(obj_priv, next_obj,
5092                                          &dev_priv->mm.inactive_list,
5093                                          list) {
5094                         if (nr_to_scan > 0) {
5095                                 i915_gem_object_unbind(&obj_priv->base);
5096                                 nr_to_scan--;
5097                         } else
5098                                 cnt++;
5099                 }
5100
5101                 spin_lock(&shrink_list_lock);
5102                 mutex_unlock(&dev->struct_mutex);
5103
5104                 would_deadlock = 0;
5105         }
5106
5107         if (nr_to_scan) {
5108                 int active = 0;
5109
5110                 /*
5111                  * We are desperate for pages, so as a last resort, wait
5112                  * for the GPU to finish and discard whatever we can.
5113                  * This has a dramatic impact to reduce the number of
5114                  * OOM-killer events whilst running the GPU aggressively.
5115                  */
5116                 list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
5117                         struct drm_device *dev = dev_priv->dev;
5118
5119                         if (!mutex_trylock(&dev->struct_mutex))
5120                                 continue;
5121
5122                         spin_unlock(&shrink_list_lock);
5123
5124                         if (i915_gpu_is_active(dev)) {
5125                                 i915_gpu_idle(dev);
5126                                 active++;
5127                         }
5128
5129                         spin_lock(&shrink_list_lock);
5130                         mutex_unlock(&dev->struct_mutex);
5131                 }
5132
5133                 if (active)
5134                         goto rescan;
5135         }
5136
5137         spin_unlock(&shrink_list_lock);
5138
5139         if (would_deadlock)
5140                 return -1;
5141         else if (cnt > 0)
5142                 return (cnt / 100) * sysctl_vfs_cache_pressure;
5143         else
5144                 return 0;
5145 }
5146
5147 static struct shrinker shrinker = {
5148         .shrink = i915_gem_shrink,
5149         .seeks = DEFAULT_SEEKS,
5150 };
5151
5152 __init void
5153 i915_gem_shrinker_init(void)
5154 {
5155     register_shrinker(&shrinker);
5156 }
5157
5158 __exit void
5159 i915_gem_shrinker_exit(void)
5160 {
5161     unregister_shrinker(&shrinker);
5162 }