]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/gpu/drm/i915/i915_gem.c
drm/i915: Rework DPLL calculation parameters for Ironlake
[net-next-2.6.git] / drivers / gpu / drm / i915 / i915_gem.c
CommitLineData
673a394b
EA
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "i915_drm.h"
31#include "i915_drv.h"
1c5d22f7 32#include "i915_trace.h"
652c393a 33#include "intel_drv.h"
673a394b 34#include <linux/swap.h>
79e53945 35#include <linux/pci.h>
673a394b 36
28dfe52a
EA
37#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
38
e47c68e9
EA
39static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
40static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
41static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
e47c68e9
EA
42static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
43 int write);
44static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
45 uint64_t offset,
46 uint64_t size);
47static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
673a394b 48static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
de151cf6
JB
49static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
50 unsigned alignment);
de151cf6 51static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
07f73f69 52static int i915_gem_evict_something(struct drm_device *dev, int min_size);
ab5ee576 53static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
71acb5eb
DA
54static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
55 struct drm_i915_gem_pwrite *args,
56 struct drm_file *file_priv);
673a394b 57
31169714
CW
58static LIST_HEAD(shrink_list);
59static DEFINE_SPINLOCK(shrink_list_lock);
60
79e53945
JB
61int i915_gem_do_init(struct drm_device *dev, unsigned long start,
62 unsigned long end)
673a394b
EA
63{
64 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b 65
79e53945
JB
66 if (start >= end ||
67 (start & (PAGE_SIZE - 1)) != 0 ||
68 (end & (PAGE_SIZE - 1)) != 0) {
673a394b
EA
69 return -EINVAL;
70 }
71
79e53945
JB
72 drm_mm_init(&dev_priv->mm.gtt_space, start,
73 end - start);
673a394b 74
79e53945
JB
75 dev->gtt_total = (uint32_t) (end - start);
76
77 return 0;
78}
673a394b 79
79e53945
JB
80int
81i915_gem_init_ioctl(struct drm_device *dev, void *data,
82 struct drm_file *file_priv)
83{
84 struct drm_i915_gem_init *args = data;
85 int ret;
86
87 mutex_lock(&dev->struct_mutex);
88 ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
673a394b
EA
89 mutex_unlock(&dev->struct_mutex);
90
79e53945 91 return ret;
673a394b
EA
92}
93
5a125c3c
EA
94int
95i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
96 struct drm_file *file_priv)
97{
5a125c3c 98 struct drm_i915_gem_get_aperture *args = data;
5a125c3c
EA
99
100 if (!(dev->driver->driver_features & DRIVER_GEM))
101 return -ENODEV;
102
103 args->aper_size = dev->gtt_total;
2678d9d6
KP
104 args->aper_available_size = (args->aper_size -
105 atomic_read(&dev->pin_memory));
5a125c3c
EA
106
107 return 0;
108}
109
673a394b
EA
110
111/**
112 * Creates a new mm object and returns a handle to it.
113 */
114int
115i915_gem_create_ioctl(struct drm_device *dev, void *data,
116 struct drm_file *file_priv)
117{
118 struct drm_i915_gem_create *args = data;
119 struct drm_gem_object *obj;
a1a2d1d3
PP
120 int ret;
121 u32 handle;
673a394b
EA
122
123 args->size = roundup(args->size, PAGE_SIZE);
124
125 /* Allocate the new object */
126 obj = drm_gem_object_alloc(dev, args->size);
127 if (obj == NULL)
128 return -ENOMEM;
129
130 ret = drm_gem_handle_create(file_priv, obj, &handle);
131 mutex_lock(&dev->struct_mutex);
132 drm_gem_object_handle_unreference(obj);
133 mutex_unlock(&dev->struct_mutex);
134
135 if (ret)
136 return ret;
137
138 args->handle = handle;
139
140 return 0;
141}
142
eb01459f
EA
143static inline int
144fast_shmem_read(struct page **pages,
145 loff_t page_base, int page_offset,
146 char __user *data,
147 int length)
148{
149 char __iomem *vaddr;
2bc43b5c 150 int unwritten;
eb01459f
EA
151
152 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
153 if (vaddr == NULL)
154 return -ENOMEM;
2bc43b5c 155 unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
eb01459f
EA
156 kunmap_atomic(vaddr, KM_USER0);
157
2bc43b5c
FM
158 if (unwritten)
159 return -EFAULT;
160
161 return 0;
eb01459f
EA
162}
163
280b713b
EA
164static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
165{
166 drm_i915_private_t *dev_priv = obj->dev->dev_private;
167 struct drm_i915_gem_object *obj_priv = obj->driver_private;
168
169 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
170 obj_priv->tiling_mode != I915_TILING_NONE;
171}
172
40123c1f
EA
173static inline int
174slow_shmem_copy(struct page *dst_page,
175 int dst_offset,
176 struct page *src_page,
177 int src_offset,
178 int length)
179{
180 char *dst_vaddr, *src_vaddr;
181
182 dst_vaddr = kmap_atomic(dst_page, KM_USER0);
183 if (dst_vaddr == NULL)
184 return -ENOMEM;
185
186 src_vaddr = kmap_atomic(src_page, KM_USER1);
187 if (src_vaddr == NULL) {
188 kunmap_atomic(dst_vaddr, KM_USER0);
189 return -ENOMEM;
190 }
191
192 memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
193
194 kunmap_atomic(src_vaddr, KM_USER1);
195 kunmap_atomic(dst_vaddr, KM_USER0);
196
197 return 0;
198}
199
280b713b
EA
200static inline int
201slow_shmem_bit17_copy(struct page *gpu_page,
202 int gpu_offset,
203 struct page *cpu_page,
204 int cpu_offset,
205 int length,
206 int is_read)
207{
208 char *gpu_vaddr, *cpu_vaddr;
209
210 /* Use the unswizzled path if this page isn't affected. */
211 if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
212 if (is_read)
213 return slow_shmem_copy(cpu_page, cpu_offset,
214 gpu_page, gpu_offset, length);
215 else
216 return slow_shmem_copy(gpu_page, gpu_offset,
217 cpu_page, cpu_offset, length);
218 }
219
220 gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
221 if (gpu_vaddr == NULL)
222 return -ENOMEM;
223
224 cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
225 if (cpu_vaddr == NULL) {
226 kunmap_atomic(gpu_vaddr, KM_USER0);
227 return -ENOMEM;
228 }
229
230 /* Copy the data, XORing A6 with A17 (1). The user already knows he's
231 * XORing with the other bits (A9 for Y, A9 and A10 for X)
232 */
233 while (length > 0) {
234 int cacheline_end = ALIGN(gpu_offset + 1, 64);
235 int this_length = min(cacheline_end - gpu_offset, length);
236 int swizzled_gpu_offset = gpu_offset ^ 64;
237
238 if (is_read) {
239 memcpy(cpu_vaddr + cpu_offset,
240 gpu_vaddr + swizzled_gpu_offset,
241 this_length);
242 } else {
243 memcpy(gpu_vaddr + swizzled_gpu_offset,
244 cpu_vaddr + cpu_offset,
245 this_length);
246 }
247 cpu_offset += this_length;
248 gpu_offset += this_length;
249 length -= this_length;
250 }
251
252 kunmap_atomic(cpu_vaddr, KM_USER1);
253 kunmap_atomic(gpu_vaddr, KM_USER0);
254
255 return 0;
256}
257
eb01459f
EA
258/**
259 * This is the fast shmem pread path, which attempts to copy_from_user directly
260 * from the backing pages of the object to the user's address space. On a
261 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
262 */
263static int
264i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
265 struct drm_i915_gem_pread *args,
266 struct drm_file *file_priv)
267{
268 struct drm_i915_gem_object *obj_priv = obj->driver_private;
269 ssize_t remain;
270 loff_t offset, page_base;
271 char __user *user_data;
272 int page_offset, page_length;
273 int ret;
274
275 user_data = (char __user *) (uintptr_t) args->data_ptr;
276 remain = args->size;
277
278 mutex_lock(&dev->struct_mutex);
279
4bdadb97 280 ret = i915_gem_object_get_pages(obj, 0);
eb01459f
EA
281 if (ret != 0)
282 goto fail_unlock;
283
284 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
285 args->size);
286 if (ret != 0)
287 goto fail_put_pages;
288
289 obj_priv = obj->driver_private;
290 offset = args->offset;
291
292 while (remain > 0) {
293 /* Operation in this page
294 *
295 * page_base = page offset within aperture
296 * page_offset = offset within page
297 * page_length = bytes to copy for this page
298 */
299 page_base = (offset & ~(PAGE_SIZE-1));
300 page_offset = offset & (PAGE_SIZE-1);
301 page_length = remain;
302 if ((page_offset + remain) > PAGE_SIZE)
303 page_length = PAGE_SIZE - page_offset;
304
305 ret = fast_shmem_read(obj_priv->pages,
306 page_base, page_offset,
307 user_data, page_length);
308 if (ret)
309 goto fail_put_pages;
310
311 remain -= page_length;
312 user_data += page_length;
313 offset += page_length;
314 }
315
316fail_put_pages:
317 i915_gem_object_put_pages(obj);
318fail_unlock:
319 mutex_unlock(&dev->struct_mutex);
320
321 return ret;
322}
323
07f73f69
CW
324static int
325i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
326{
327 int ret;
328
4bdadb97 329 ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
07f73f69
CW
330
331 /* If we've insufficient memory to map in the pages, attempt
332 * to make some space by throwing out some old buffers.
333 */
334 if (ret == -ENOMEM) {
335 struct drm_device *dev = obj->dev;
07f73f69
CW
336
337 ret = i915_gem_evict_something(dev, obj->size);
338 if (ret)
339 return ret;
340
4bdadb97 341 ret = i915_gem_object_get_pages(obj, 0);
07f73f69
CW
342 }
343
344 return ret;
345}
346
eb01459f
EA
347/**
348 * This is the fallback shmem pread path, which allocates temporary storage
349 * in kernel space to copy_to_user into outside of the struct_mutex, so we
350 * can copy out of the object's backing pages while holding the struct mutex
351 * and not take page faults.
352 */
353static int
354i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
355 struct drm_i915_gem_pread *args,
356 struct drm_file *file_priv)
357{
358 struct drm_i915_gem_object *obj_priv = obj->driver_private;
359 struct mm_struct *mm = current->mm;
360 struct page **user_pages;
361 ssize_t remain;
362 loff_t offset, pinned_pages, i;
363 loff_t first_data_page, last_data_page, num_pages;
364 int shmem_page_index, shmem_page_offset;
365 int data_page_index, data_page_offset;
366 int page_length;
367 int ret;
368 uint64_t data_ptr = args->data_ptr;
280b713b 369 int do_bit17_swizzling;
eb01459f
EA
370
371 remain = args->size;
372
373 /* Pin the user pages containing the data. We can't fault while
374 * holding the struct mutex, yet we want to hold it while
375 * dereferencing the user data.
376 */
377 first_data_page = data_ptr / PAGE_SIZE;
378 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
379 num_pages = last_data_page - first_data_page + 1;
380
8e7d2b2c 381 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
eb01459f
EA
382 if (user_pages == NULL)
383 return -ENOMEM;
384
385 down_read(&mm->mmap_sem);
386 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
e5e9ecde 387 num_pages, 1, 0, user_pages, NULL);
eb01459f
EA
388 up_read(&mm->mmap_sem);
389 if (pinned_pages < num_pages) {
390 ret = -EFAULT;
391 goto fail_put_user_pages;
392 }
393
280b713b
EA
394 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
395
eb01459f
EA
396 mutex_lock(&dev->struct_mutex);
397
07f73f69
CW
398 ret = i915_gem_object_get_pages_or_evict(obj);
399 if (ret)
eb01459f
EA
400 goto fail_unlock;
401
402 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
403 args->size);
404 if (ret != 0)
405 goto fail_put_pages;
406
407 obj_priv = obj->driver_private;
408 offset = args->offset;
409
410 while (remain > 0) {
411 /* Operation in this page
412 *
413 * shmem_page_index = page number within shmem file
414 * shmem_page_offset = offset within page in shmem file
415 * data_page_index = page number in get_user_pages return
416 * data_page_offset = offset with data_page_index page.
417 * page_length = bytes to copy for this page
418 */
419 shmem_page_index = offset / PAGE_SIZE;
420 shmem_page_offset = offset & ~PAGE_MASK;
421 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
422 data_page_offset = data_ptr & ~PAGE_MASK;
423
424 page_length = remain;
425 if ((shmem_page_offset + page_length) > PAGE_SIZE)
426 page_length = PAGE_SIZE - shmem_page_offset;
427 if ((data_page_offset + page_length) > PAGE_SIZE)
428 page_length = PAGE_SIZE - data_page_offset;
429
280b713b
EA
430 if (do_bit17_swizzling) {
431 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
432 shmem_page_offset,
433 user_pages[data_page_index],
434 data_page_offset,
435 page_length,
436 1);
437 } else {
438 ret = slow_shmem_copy(user_pages[data_page_index],
439 data_page_offset,
440 obj_priv->pages[shmem_page_index],
441 shmem_page_offset,
442 page_length);
443 }
eb01459f
EA
444 if (ret)
445 goto fail_put_pages;
446
447 remain -= page_length;
448 data_ptr += page_length;
449 offset += page_length;
450 }
451
452fail_put_pages:
453 i915_gem_object_put_pages(obj);
454fail_unlock:
455 mutex_unlock(&dev->struct_mutex);
456fail_put_user_pages:
457 for (i = 0; i < pinned_pages; i++) {
458 SetPageDirty(user_pages[i]);
459 page_cache_release(user_pages[i]);
460 }
8e7d2b2c 461 drm_free_large(user_pages);
eb01459f
EA
462
463 return ret;
464}
465
673a394b
EA
466/**
467 * Reads data from the object referenced by handle.
468 *
469 * On error, the contents of *data are undefined.
470 */
471int
472i915_gem_pread_ioctl(struct drm_device *dev, void *data,
473 struct drm_file *file_priv)
474{
475 struct drm_i915_gem_pread *args = data;
476 struct drm_gem_object *obj;
477 struct drm_i915_gem_object *obj_priv;
673a394b
EA
478 int ret;
479
480 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
481 if (obj == NULL)
482 return -EBADF;
483 obj_priv = obj->driver_private;
484
485 /* Bounds check source.
486 *
487 * XXX: This could use review for overflow issues...
488 */
489 if (args->offset > obj->size || args->size > obj->size ||
490 args->offset + args->size > obj->size) {
491 drm_gem_object_unreference(obj);
492 return -EINVAL;
493 }
494
280b713b 495 if (i915_gem_object_needs_bit17_swizzle(obj)) {
eb01459f 496 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
280b713b
EA
497 } else {
498 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
499 if (ret != 0)
500 ret = i915_gem_shmem_pread_slow(dev, obj, args,
501 file_priv);
502 }
673a394b
EA
503
504 drm_gem_object_unreference(obj);
673a394b 505
eb01459f 506 return ret;
673a394b
EA
507}
508
0839ccb8
KP
509/* This is the fast write path which cannot handle
510 * page faults in the source data
9b7530cc 511 */
0839ccb8
KP
512
513static inline int
514fast_user_write(struct io_mapping *mapping,
515 loff_t page_base, int page_offset,
516 char __user *user_data,
517 int length)
9b7530cc 518{
9b7530cc 519 char *vaddr_atomic;
0839ccb8 520 unsigned long unwritten;
9b7530cc 521
0839ccb8
KP
522 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
523 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
524 user_data, length);
525 io_mapping_unmap_atomic(vaddr_atomic);
526 if (unwritten)
527 return -EFAULT;
528 return 0;
529}
530
531/* Here's the write path which can sleep for
532 * page faults
533 */
534
535static inline int
3de09aa3
EA
536slow_kernel_write(struct io_mapping *mapping,
537 loff_t gtt_base, int gtt_offset,
538 struct page *user_page, int user_offset,
539 int length)
0839ccb8 540{
3de09aa3 541 char *src_vaddr, *dst_vaddr;
0839ccb8
KP
542 unsigned long unwritten;
543
3de09aa3
EA
544 dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
545 src_vaddr = kmap_atomic(user_page, KM_USER1);
546 unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
547 src_vaddr + user_offset,
548 length);
549 kunmap_atomic(src_vaddr, KM_USER1);
550 io_mapping_unmap_atomic(dst_vaddr);
0839ccb8
KP
551 if (unwritten)
552 return -EFAULT;
9b7530cc 553 return 0;
9b7530cc
LT
554}
555
40123c1f
EA
556static inline int
557fast_shmem_write(struct page **pages,
558 loff_t page_base, int page_offset,
559 char __user *data,
560 int length)
561{
562 char __iomem *vaddr;
d0088775 563 unsigned long unwritten;
40123c1f
EA
564
565 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
566 if (vaddr == NULL)
567 return -ENOMEM;
d0088775 568 unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
40123c1f
EA
569 kunmap_atomic(vaddr, KM_USER0);
570
d0088775
DA
571 if (unwritten)
572 return -EFAULT;
40123c1f
EA
573 return 0;
574}
575
3de09aa3
EA
576/**
577 * This is the fast pwrite path, where we copy the data directly from the
578 * user into the GTT, uncached.
579 */
673a394b 580static int
3de09aa3
EA
581i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
582 struct drm_i915_gem_pwrite *args,
583 struct drm_file *file_priv)
673a394b
EA
584{
585 struct drm_i915_gem_object *obj_priv = obj->driver_private;
0839ccb8 586 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b 587 ssize_t remain;
0839ccb8 588 loff_t offset, page_base;
673a394b 589 char __user *user_data;
0839ccb8
KP
590 int page_offset, page_length;
591 int ret;
673a394b
EA
592
593 user_data = (char __user *) (uintptr_t) args->data_ptr;
594 remain = args->size;
595 if (!access_ok(VERIFY_READ, user_data, remain))
596 return -EFAULT;
597
598
599 mutex_lock(&dev->struct_mutex);
600 ret = i915_gem_object_pin(obj, 0);
601 if (ret) {
602 mutex_unlock(&dev->struct_mutex);
603 return ret;
604 }
2ef7eeaa 605 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
673a394b
EA
606 if (ret)
607 goto fail;
608
609 obj_priv = obj->driver_private;
610 offset = obj_priv->gtt_offset + args->offset;
673a394b
EA
611
612 while (remain > 0) {
613 /* Operation in this page
614 *
0839ccb8
KP
615 * page_base = page offset within aperture
616 * page_offset = offset within page
617 * page_length = bytes to copy for this page
673a394b 618 */
0839ccb8
KP
619 page_base = (offset & ~(PAGE_SIZE-1));
620 page_offset = offset & (PAGE_SIZE-1);
621 page_length = remain;
622 if ((page_offset + remain) > PAGE_SIZE)
623 page_length = PAGE_SIZE - page_offset;
624
625 ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
626 page_offset, user_data, page_length);
627
628 /* If we get a fault while copying data, then (presumably) our
3de09aa3
EA
629 * source page isn't available. Return the error and we'll
630 * retry in the slow path.
0839ccb8 631 */
3de09aa3
EA
632 if (ret)
633 goto fail;
673a394b 634
0839ccb8
KP
635 remain -= page_length;
636 user_data += page_length;
637 offset += page_length;
673a394b 638 }
673a394b
EA
639
640fail:
641 i915_gem_object_unpin(obj);
642 mutex_unlock(&dev->struct_mutex);
643
644 return ret;
645}
646
3de09aa3
EA
647/**
648 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
649 * the memory and maps it using kmap_atomic for copying.
650 *
651 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
652 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
653 */
3043c60c 654static int
3de09aa3
EA
655i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
656 struct drm_i915_gem_pwrite *args,
657 struct drm_file *file_priv)
673a394b 658{
3de09aa3
EA
659 struct drm_i915_gem_object *obj_priv = obj->driver_private;
660 drm_i915_private_t *dev_priv = dev->dev_private;
661 ssize_t remain;
662 loff_t gtt_page_base, offset;
663 loff_t first_data_page, last_data_page, num_pages;
664 loff_t pinned_pages, i;
665 struct page **user_pages;
666 struct mm_struct *mm = current->mm;
667 int gtt_page_offset, data_page_offset, data_page_index, page_length;
673a394b 668 int ret;
3de09aa3
EA
669 uint64_t data_ptr = args->data_ptr;
670
671 remain = args->size;
672
673 /* Pin the user pages containing the data. We can't fault while
674 * holding the struct mutex, and all of the pwrite implementations
675 * want to hold it while dereferencing the user data.
676 */
677 first_data_page = data_ptr / PAGE_SIZE;
678 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
679 num_pages = last_data_page - first_data_page + 1;
680
8e7d2b2c 681 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
3de09aa3
EA
682 if (user_pages == NULL)
683 return -ENOMEM;
684
685 down_read(&mm->mmap_sem);
686 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
687 num_pages, 0, 0, user_pages, NULL);
688 up_read(&mm->mmap_sem);
689 if (pinned_pages < num_pages) {
690 ret = -EFAULT;
691 goto out_unpin_pages;
692 }
673a394b
EA
693
694 mutex_lock(&dev->struct_mutex);
3de09aa3
EA
695 ret = i915_gem_object_pin(obj, 0);
696 if (ret)
697 goto out_unlock;
698
699 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
700 if (ret)
701 goto out_unpin_object;
702
703 obj_priv = obj->driver_private;
704 offset = obj_priv->gtt_offset + args->offset;
705
706 while (remain > 0) {
707 /* Operation in this page
708 *
709 * gtt_page_base = page offset within aperture
710 * gtt_page_offset = offset within page in aperture
711 * data_page_index = page number in get_user_pages return
712 * data_page_offset = offset with data_page_index page.
713 * page_length = bytes to copy for this page
714 */
715 gtt_page_base = offset & PAGE_MASK;
716 gtt_page_offset = offset & ~PAGE_MASK;
717 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
718 data_page_offset = data_ptr & ~PAGE_MASK;
719
720 page_length = remain;
721 if ((gtt_page_offset + page_length) > PAGE_SIZE)
722 page_length = PAGE_SIZE - gtt_page_offset;
723 if ((data_page_offset + page_length) > PAGE_SIZE)
724 page_length = PAGE_SIZE - data_page_offset;
725
726 ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
727 gtt_page_base, gtt_page_offset,
728 user_pages[data_page_index],
729 data_page_offset,
730 page_length);
731
732 /* If we get a fault while copying data, then (presumably) our
733 * source page isn't available. Return the error and we'll
734 * retry in the slow path.
735 */
736 if (ret)
737 goto out_unpin_object;
738
739 remain -= page_length;
740 offset += page_length;
741 data_ptr += page_length;
742 }
743
744out_unpin_object:
745 i915_gem_object_unpin(obj);
746out_unlock:
747 mutex_unlock(&dev->struct_mutex);
748out_unpin_pages:
749 for (i = 0; i < pinned_pages; i++)
750 page_cache_release(user_pages[i]);
8e7d2b2c 751 drm_free_large(user_pages);
3de09aa3
EA
752
753 return ret;
754}
755
40123c1f
EA
756/**
757 * This is the fast shmem pwrite path, which attempts to directly
758 * copy_from_user into the kmapped pages backing the object.
759 */
3043c60c 760static int
40123c1f
EA
761i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
762 struct drm_i915_gem_pwrite *args,
763 struct drm_file *file_priv)
673a394b 764{
40123c1f
EA
765 struct drm_i915_gem_object *obj_priv = obj->driver_private;
766 ssize_t remain;
767 loff_t offset, page_base;
768 char __user *user_data;
769 int page_offset, page_length;
673a394b 770 int ret;
40123c1f
EA
771
772 user_data = (char __user *) (uintptr_t) args->data_ptr;
773 remain = args->size;
673a394b
EA
774
775 mutex_lock(&dev->struct_mutex);
776
4bdadb97 777 ret = i915_gem_object_get_pages(obj, 0);
40123c1f
EA
778 if (ret != 0)
779 goto fail_unlock;
673a394b 780
e47c68e9 781 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
40123c1f
EA
782 if (ret != 0)
783 goto fail_put_pages;
784
785 obj_priv = obj->driver_private;
786 offset = args->offset;
787 obj_priv->dirty = 1;
788
789 while (remain > 0) {
790 /* Operation in this page
791 *
792 * page_base = page offset within aperture
793 * page_offset = offset within page
794 * page_length = bytes to copy for this page
795 */
796 page_base = (offset & ~(PAGE_SIZE-1));
797 page_offset = offset & (PAGE_SIZE-1);
798 page_length = remain;
799 if ((page_offset + remain) > PAGE_SIZE)
800 page_length = PAGE_SIZE - page_offset;
801
802 ret = fast_shmem_write(obj_priv->pages,
803 page_base, page_offset,
804 user_data, page_length);
805 if (ret)
806 goto fail_put_pages;
807
808 remain -= page_length;
809 user_data += page_length;
810 offset += page_length;
811 }
812
813fail_put_pages:
814 i915_gem_object_put_pages(obj);
815fail_unlock:
816 mutex_unlock(&dev->struct_mutex);
817
818 return ret;
819}
820
821/**
822 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
823 * the memory and maps it using kmap_atomic for copying.
824 *
825 * This avoids taking mmap_sem for faulting on the user's address while the
826 * struct_mutex is held.
827 */
828static int
829i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
830 struct drm_i915_gem_pwrite *args,
831 struct drm_file *file_priv)
832{
833 struct drm_i915_gem_object *obj_priv = obj->driver_private;
834 struct mm_struct *mm = current->mm;
835 struct page **user_pages;
836 ssize_t remain;
837 loff_t offset, pinned_pages, i;
838 loff_t first_data_page, last_data_page, num_pages;
839 int shmem_page_index, shmem_page_offset;
840 int data_page_index, data_page_offset;
841 int page_length;
842 int ret;
843 uint64_t data_ptr = args->data_ptr;
280b713b 844 int do_bit17_swizzling;
40123c1f
EA
845
846 remain = args->size;
847
848 /* Pin the user pages containing the data. We can't fault while
849 * holding the struct mutex, and all of the pwrite implementations
850 * want to hold it while dereferencing the user data.
851 */
852 first_data_page = data_ptr / PAGE_SIZE;
853 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
854 num_pages = last_data_page - first_data_page + 1;
855
8e7d2b2c 856 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
40123c1f
EA
857 if (user_pages == NULL)
858 return -ENOMEM;
859
860 down_read(&mm->mmap_sem);
861 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
862 num_pages, 0, 0, user_pages, NULL);
863 up_read(&mm->mmap_sem);
864 if (pinned_pages < num_pages) {
865 ret = -EFAULT;
866 goto fail_put_user_pages;
673a394b
EA
867 }
868
280b713b
EA
869 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
870
40123c1f
EA
871 mutex_lock(&dev->struct_mutex);
872
07f73f69
CW
873 ret = i915_gem_object_get_pages_or_evict(obj);
874 if (ret)
40123c1f
EA
875 goto fail_unlock;
876
877 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
878 if (ret != 0)
879 goto fail_put_pages;
880
881 obj_priv = obj->driver_private;
673a394b 882 offset = args->offset;
40123c1f 883 obj_priv->dirty = 1;
673a394b 884
40123c1f
EA
885 while (remain > 0) {
886 /* Operation in this page
887 *
888 * shmem_page_index = page number within shmem file
889 * shmem_page_offset = offset within page in shmem file
890 * data_page_index = page number in get_user_pages return
891 * data_page_offset = offset with data_page_index page.
892 * page_length = bytes to copy for this page
893 */
894 shmem_page_index = offset / PAGE_SIZE;
895 shmem_page_offset = offset & ~PAGE_MASK;
896 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
897 data_page_offset = data_ptr & ~PAGE_MASK;
898
899 page_length = remain;
900 if ((shmem_page_offset + page_length) > PAGE_SIZE)
901 page_length = PAGE_SIZE - shmem_page_offset;
902 if ((data_page_offset + page_length) > PAGE_SIZE)
903 page_length = PAGE_SIZE - data_page_offset;
904
280b713b
EA
905 if (do_bit17_swizzling) {
906 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
907 shmem_page_offset,
908 user_pages[data_page_index],
909 data_page_offset,
910 page_length,
911 0);
912 } else {
913 ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
914 shmem_page_offset,
915 user_pages[data_page_index],
916 data_page_offset,
917 page_length);
918 }
40123c1f
EA
919 if (ret)
920 goto fail_put_pages;
921
922 remain -= page_length;
923 data_ptr += page_length;
924 offset += page_length;
673a394b
EA
925 }
926
40123c1f
EA
927fail_put_pages:
928 i915_gem_object_put_pages(obj);
929fail_unlock:
673a394b 930 mutex_unlock(&dev->struct_mutex);
40123c1f
EA
931fail_put_user_pages:
932 for (i = 0; i < pinned_pages; i++)
933 page_cache_release(user_pages[i]);
8e7d2b2c 934 drm_free_large(user_pages);
673a394b 935
40123c1f 936 return ret;
673a394b
EA
937}
938
939/**
940 * Writes data to the object referenced by handle.
941 *
942 * On error, the contents of the buffer that were to be modified are undefined.
943 */
944int
945i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
946 struct drm_file *file_priv)
947{
948 struct drm_i915_gem_pwrite *args = data;
949 struct drm_gem_object *obj;
950 struct drm_i915_gem_object *obj_priv;
951 int ret = 0;
952
953 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
954 if (obj == NULL)
955 return -EBADF;
956 obj_priv = obj->driver_private;
957
958 /* Bounds check destination.
959 *
960 * XXX: This could use review for overflow issues...
961 */
962 if (args->offset > obj->size || args->size > obj->size ||
963 args->offset + args->size > obj->size) {
964 drm_gem_object_unreference(obj);
965 return -EINVAL;
966 }
967
968 /* We can only do the GTT pwrite on untiled buffers, as otherwise
969 * it would end up going through the fenced access, and we'll get
970 * different detiling behavior between reading and writing.
971 * pread/pwrite currently are reading and writing from the CPU
972 * perspective, requiring manual detiling by the client.
973 */
71acb5eb
DA
974 if (obj_priv->phys_obj)
975 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
976 else if (obj_priv->tiling_mode == I915_TILING_NONE &&
3de09aa3
EA
977 dev->gtt_total != 0) {
978 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
979 if (ret == -EFAULT) {
980 ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
981 file_priv);
982 }
280b713b
EA
983 } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
984 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
40123c1f
EA
985 } else {
986 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
987 if (ret == -EFAULT) {
988 ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
989 file_priv);
990 }
991 }
673a394b
EA
992
993#if WATCH_PWRITE
994 if (ret)
995 DRM_INFO("pwrite failed %d\n", ret);
996#endif
997
998 drm_gem_object_unreference(obj);
999
1000 return ret;
1001}
1002
1003/**
2ef7eeaa
EA
1004 * Called when user space prepares to use an object with the CPU, either
1005 * through the mmap ioctl's mapping or a GTT mapping.
673a394b
EA
1006 */
1007int
1008i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1009 struct drm_file *file_priv)
1010{
a09ba7fa 1011 struct drm_i915_private *dev_priv = dev->dev_private;
673a394b
EA
1012 struct drm_i915_gem_set_domain *args = data;
1013 struct drm_gem_object *obj;
652c393a 1014 struct drm_i915_gem_object *obj_priv;
2ef7eeaa
EA
1015 uint32_t read_domains = args->read_domains;
1016 uint32_t write_domain = args->write_domain;
673a394b
EA
1017 int ret;
1018
1019 if (!(dev->driver->driver_features & DRIVER_GEM))
1020 return -ENODEV;
1021
2ef7eeaa 1022 /* Only handle setting domains to types used by the CPU. */
21d509e3 1023 if (write_domain & I915_GEM_GPU_DOMAINS)
2ef7eeaa
EA
1024 return -EINVAL;
1025
21d509e3 1026 if (read_domains & I915_GEM_GPU_DOMAINS)
2ef7eeaa
EA
1027 return -EINVAL;
1028
1029 /* Having something in the write domain implies it's in the read
1030 * domain, and only that read domain. Enforce that in the request.
1031 */
1032 if (write_domain != 0 && read_domains != write_domain)
1033 return -EINVAL;
1034
673a394b
EA
1035 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1036 if (obj == NULL)
1037 return -EBADF;
652c393a 1038 obj_priv = obj->driver_private;
673a394b
EA
1039
1040 mutex_lock(&dev->struct_mutex);
652c393a
JB
1041
1042 intel_mark_busy(dev, obj);
1043
673a394b 1044#if WATCH_BUF
cfd43c02 1045 DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
2ef7eeaa 1046 obj, obj->size, read_domains, write_domain);
673a394b 1047#endif
2ef7eeaa
EA
1048 if (read_domains & I915_GEM_DOMAIN_GTT) {
1049 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
02354392 1050
a09ba7fa
EA
1051 /* Update the LRU on the fence for the CPU access that's
1052 * about to occur.
1053 */
1054 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
1055 list_move_tail(&obj_priv->fence_list,
1056 &dev_priv->mm.fence_list);
1057 }
1058
02354392
EA
1059 /* Silently promote "you're not bound, there was nothing to do"
1060 * to success, since the client was just asking us to
1061 * make sure everything was done.
1062 */
1063 if (ret == -EINVAL)
1064 ret = 0;
2ef7eeaa 1065 } else {
e47c68e9 1066 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
2ef7eeaa
EA
1067 }
1068
673a394b
EA
1069 drm_gem_object_unreference(obj);
1070 mutex_unlock(&dev->struct_mutex);
1071 return ret;
1072}
1073
1074/**
1075 * Called when user space has done writes to this buffer
1076 */
1077int
1078i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1079 struct drm_file *file_priv)
1080{
1081 struct drm_i915_gem_sw_finish *args = data;
1082 struct drm_gem_object *obj;
1083 struct drm_i915_gem_object *obj_priv;
1084 int ret = 0;
1085
1086 if (!(dev->driver->driver_features & DRIVER_GEM))
1087 return -ENODEV;
1088
1089 mutex_lock(&dev->struct_mutex);
1090 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1091 if (obj == NULL) {
1092 mutex_unlock(&dev->struct_mutex);
1093 return -EBADF;
1094 }
1095
1096#if WATCH_BUF
cfd43c02 1097 DRM_INFO("%s: sw_finish %d (%p %zd)\n",
673a394b
EA
1098 __func__, args->handle, obj, obj->size);
1099#endif
1100 obj_priv = obj->driver_private;
1101
1102 /* Pinned buffers may be scanout, so flush the cache */
e47c68e9
EA
1103 if (obj_priv->pin_count)
1104 i915_gem_object_flush_cpu_write_domain(obj);
1105
673a394b
EA
1106 drm_gem_object_unreference(obj);
1107 mutex_unlock(&dev->struct_mutex);
1108 return ret;
1109}
1110
1111/**
1112 * Maps the contents of an object, returning the address it is mapped
1113 * into.
1114 *
1115 * While the mapping holds a reference on the contents of the object, it doesn't
1116 * imply a ref on the object itself.
1117 */
1118int
1119i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1120 struct drm_file *file_priv)
1121{
1122 struct drm_i915_gem_mmap *args = data;
1123 struct drm_gem_object *obj;
1124 loff_t offset;
1125 unsigned long addr;
1126
1127 if (!(dev->driver->driver_features & DRIVER_GEM))
1128 return -ENODEV;
1129
1130 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1131 if (obj == NULL)
1132 return -EBADF;
1133
1134 offset = args->offset;
1135
1136 down_write(&current->mm->mmap_sem);
1137 addr = do_mmap(obj->filp, 0, args->size,
1138 PROT_READ | PROT_WRITE, MAP_SHARED,
1139 args->offset);
1140 up_write(&current->mm->mmap_sem);
1141 mutex_lock(&dev->struct_mutex);
1142 drm_gem_object_unreference(obj);
1143 mutex_unlock(&dev->struct_mutex);
1144 if (IS_ERR((void *)addr))
1145 return addr;
1146
1147 args->addr_ptr = (uint64_t) addr;
1148
1149 return 0;
1150}
1151
de151cf6
JB
1152/**
1153 * i915_gem_fault - fault a page into the GTT
1154 * vma: VMA in question
1155 * vmf: fault info
1156 *
1157 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1158 * from userspace. The fault handler takes care of binding the object to
1159 * the GTT (if needed), allocating and programming a fence register (again,
1160 * only if needed based on whether the old reg is still valid or the object
1161 * is tiled) and inserting a new PTE into the faulting process.
1162 *
1163 * Note that the faulting process may involve evicting existing objects
1164 * from the GTT and/or fence registers to make room. So performance may
1165 * suffer if the GTT working set is large or there are few fence registers
1166 * left.
1167 */
1168int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1169{
1170 struct drm_gem_object *obj = vma->vm_private_data;
1171 struct drm_device *dev = obj->dev;
1172 struct drm_i915_private *dev_priv = dev->dev_private;
1173 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1174 pgoff_t page_offset;
1175 unsigned long pfn;
1176 int ret = 0;
0f973f27 1177 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
de151cf6
JB
1178
1179 /* We don't use vmf->pgoff since that has the fake offset */
1180 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1181 PAGE_SHIFT;
1182
1183 /* Now bind it into the GTT if needed */
1184 mutex_lock(&dev->struct_mutex);
1185 if (!obj_priv->gtt_space) {
e67b8ce1 1186 ret = i915_gem_object_bind_to_gtt(obj, 0);
c715089f
CW
1187 if (ret)
1188 goto unlock;
07f4f3e8 1189
14b60391 1190 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
07f4f3e8
KH
1191
1192 ret = i915_gem_object_set_to_gtt_domain(obj, write);
c715089f
CW
1193 if (ret)
1194 goto unlock;
de151cf6
JB
1195 }
1196
1197 /* Need a new fence register? */
a09ba7fa 1198 if (obj_priv->tiling_mode != I915_TILING_NONE) {
8c4b8c3f 1199 ret = i915_gem_object_get_fence_reg(obj);
c715089f
CW
1200 if (ret)
1201 goto unlock;
d9ddcb96 1202 }
de151cf6
JB
1203
1204 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
1205 page_offset;
1206
1207 /* Finally, remap it using the new GTT offset */
1208 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
c715089f 1209unlock:
de151cf6
JB
1210 mutex_unlock(&dev->struct_mutex);
1211
1212 switch (ret) {
c715089f
CW
1213 case 0:
1214 case -ERESTARTSYS:
1215 return VM_FAULT_NOPAGE;
de151cf6
JB
1216 case -ENOMEM:
1217 case -EAGAIN:
1218 return VM_FAULT_OOM;
de151cf6 1219 default:
c715089f 1220 return VM_FAULT_SIGBUS;
de151cf6
JB
1221 }
1222}
1223
1224/**
1225 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1226 * @obj: obj in question
1227 *
1228 * GEM memory mapping works by handing back to userspace a fake mmap offset
1229 * it can use in a subsequent mmap(2) call. The DRM core code then looks
1230 * up the object based on the offset and sets up the various memory mapping
1231 * structures.
1232 *
1233 * This routine allocates and attaches a fake offset for @obj.
1234 */
1235static int
1236i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1237{
1238 struct drm_device *dev = obj->dev;
1239 struct drm_gem_mm *mm = dev->mm_private;
1240 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1241 struct drm_map_list *list;
f77d390c 1242 struct drm_local_map *map;
de151cf6
JB
1243 int ret = 0;
1244
1245 /* Set the object up for mmap'ing */
1246 list = &obj->map_list;
9a298b2a 1247 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
de151cf6
JB
1248 if (!list->map)
1249 return -ENOMEM;
1250
1251 map = list->map;
1252 map->type = _DRM_GEM;
1253 map->size = obj->size;
1254 map->handle = obj;
1255
1256 /* Get a DRM GEM mmap offset allocated... */
1257 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
1258 obj->size / PAGE_SIZE, 0, 0);
1259 if (!list->file_offset_node) {
1260 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
1261 ret = -ENOMEM;
1262 goto out_free_list;
1263 }
1264
1265 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
1266 obj->size / PAGE_SIZE, 0);
1267 if (!list->file_offset_node) {
1268 ret = -ENOMEM;
1269 goto out_free_list;
1270 }
1271
1272 list->hash.key = list->file_offset_node->start;
1273 if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
1274 DRM_ERROR("failed to add to map hash\n");
5618ca6a 1275 ret = -ENOMEM;
de151cf6
JB
1276 goto out_free_mm;
1277 }
1278
1279 /* By now we should be all set, any drm_mmap request on the offset
1280 * below will get to our mmap & fault handler */
1281 obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
1282
1283 return 0;
1284
1285out_free_mm:
1286 drm_mm_put_block(list->file_offset_node);
1287out_free_list:
9a298b2a 1288 kfree(list->map);
de151cf6
JB
1289
1290 return ret;
1291}
1292
901782b2
CW
1293/**
1294 * i915_gem_release_mmap - remove physical page mappings
1295 * @obj: obj in question
1296 *
af901ca1 1297 * Preserve the reservation of the mmapping with the DRM core code, but
901782b2
CW
1298 * relinquish ownership of the pages back to the system.
1299 *
1300 * It is vital that we remove the page mapping if we have mapped a tiled
1301 * object through the GTT and then lose the fence register due to
1302 * resource pressure. Similarly if the object has been moved out of the
1303 * aperture, than pages mapped into userspace must be revoked. Removing the
1304 * mapping will then trigger a page fault on the next user access, allowing
1305 * fixup by i915_gem_fault().
1306 */
d05ca301 1307void
901782b2
CW
1308i915_gem_release_mmap(struct drm_gem_object *obj)
1309{
1310 struct drm_device *dev = obj->dev;
1311 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1312
1313 if (dev->dev_mapping)
1314 unmap_mapping_range(dev->dev_mapping,
1315 obj_priv->mmap_offset, obj->size, 1);
1316}
1317
ab00b3e5
JB
1318static void
1319i915_gem_free_mmap_offset(struct drm_gem_object *obj)
1320{
1321 struct drm_device *dev = obj->dev;
1322 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1323 struct drm_gem_mm *mm = dev->mm_private;
1324 struct drm_map_list *list;
1325
1326 list = &obj->map_list;
1327 drm_ht_remove_item(&mm->offset_hash, &list->hash);
1328
1329 if (list->file_offset_node) {
1330 drm_mm_put_block(list->file_offset_node);
1331 list->file_offset_node = NULL;
1332 }
1333
1334 if (list->map) {
9a298b2a 1335 kfree(list->map);
ab00b3e5
JB
1336 list->map = NULL;
1337 }
1338
1339 obj_priv->mmap_offset = 0;
1340}
1341
de151cf6
JB
1342/**
1343 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1344 * @obj: object to check
1345 *
1346 * Return the required GTT alignment for an object, taking into account
1347 * potential fence register mapping if needed.
1348 */
1349static uint32_t
1350i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
1351{
1352 struct drm_device *dev = obj->dev;
1353 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1354 int start, i;
1355
1356 /*
1357 * Minimum alignment is 4k (GTT page size), but might be greater
1358 * if a fence register is needed for the object.
1359 */
1360 if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
1361 return 4096;
1362
1363 /*
1364 * Previous chips need to be aligned to the size of the smallest
1365 * fence register that can contain the object.
1366 */
1367 if (IS_I9XX(dev))
1368 start = 1024*1024;
1369 else
1370 start = 512*1024;
1371
1372 for (i = start; i < obj->size; i <<= 1)
1373 ;
1374
1375 return i;
1376}
1377
1378/**
1379 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1380 * @dev: DRM device
1381 * @data: GTT mapping ioctl data
1382 * @file_priv: GEM object info
1383 *
1384 * Simply returns the fake offset to userspace so it can mmap it.
1385 * The mmap call will end up in drm_gem_mmap(), which will set things
1386 * up so we can get faults in the handler above.
1387 *
1388 * The fault handler will take care of binding the object into the GTT
1389 * (since it may have been evicted to make room for something), allocating
1390 * a fence register, and mapping the appropriate aperture address into
1391 * userspace.
1392 */
1393int
1394i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1395 struct drm_file *file_priv)
1396{
1397 struct drm_i915_gem_mmap_gtt *args = data;
1398 struct drm_i915_private *dev_priv = dev->dev_private;
1399 struct drm_gem_object *obj;
1400 struct drm_i915_gem_object *obj_priv;
1401 int ret;
1402
1403 if (!(dev->driver->driver_features & DRIVER_GEM))
1404 return -ENODEV;
1405
1406 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1407 if (obj == NULL)
1408 return -EBADF;
1409
1410 mutex_lock(&dev->struct_mutex);
1411
1412 obj_priv = obj->driver_private;
1413
ab18282d
CW
1414 if (obj_priv->madv != I915_MADV_WILLNEED) {
1415 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1416 drm_gem_object_unreference(obj);
1417 mutex_unlock(&dev->struct_mutex);
1418 return -EINVAL;
1419 }
1420
1421
de151cf6
JB
1422 if (!obj_priv->mmap_offset) {
1423 ret = i915_gem_create_mmap_offset(obj);
13af1062
CW
1424 if (ret) {
1425 drm_gem_object_unreference(obj);
1426 mutex_unlock(&dev->struct_mutex);
de151cf6 1427 return ret;
13af1062 1428 }
de151cf6
JB
1429 }
1430
1431 args->offset = obj_priv->mmap_offset;
1432
de151cf6
JB
1433 /*
1434 * Pull it into the GTT so that we have a page list (makes the
1435 * initial fault faster and any subsequent flushing possible).
1436 */
1437 if (!obj_priv->agp_mem) {
e67b8ce1 1438 ret = i915_gem_object_bind_to_gtt(obj, 0);
de151cf6
JB
1439 if (ret) {
1440 drm_gem_object_unreference(obj);
1441 mutex_unlock(&dev->struct_mutex);
1442 return ret;
1443 }
14b60391 1444 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
de151cf6
JB
1445 }
1446
1447 drm_gem_object_unreference(obj);
1448 mutex_unlock(&dev->struct_mutex);
1449
1450 return 0;
1451}
1452
6911a9b8 1453void
856fa198 1454i915_gem_object_put_pages(struct drm_gem_object *obj)
673a394b
EA
1455{
1456 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1457 int page_count = obj->size / PAGE_SIZE;
1458 int i;
1459
856fa198 1460 BUG_ON(obj_priv->pages_refcount == 0);
bb6baf76 1461 BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
673a394b 1462
856fa198
EA
1463 if (--obj_priv->pages_refcount != 0)
1464 return;
673a394b 1465
280b713b
EA
1466 if (obj_priv->tiling_mode != I915_TILING_NONE)
1467 i915_gem_object_save_bit_17_swizzle(obj);
1468
3ef94daa 1469 if (obj_priv->madv == I915_MADV_DONTNEED)
13a05fd9 1470 obj_priv->dirty = 0;
3ef94daa
CW
1471
1472 for (i = 0; i < page_count; i++) {
1473 if (obj_priv->pages[i] == NULL)
1474 break;
1475
1476 if (obj_priv->dirty)
1477 set_page_dirty(obj_priv->pages[i]);
1478
1479 if (obj_priv->madv == I915_MADV_WILLNEED)
856fa198 1480 mark_page_accessed(obj_priv->pages[i]);
3ef94daa
CW
1481
1482 page_cache_release(obj_priv->pages[i]);
1483 }
673a394b
EA
1484 obj_priv->dirty = 0;
1485
8e7d2b2c 1486 drm_free_large(obj_priv->pages);
856fa198 1487 obj_priv->pages = NULL;
673a394b
EA
1488}
1489
1490static void
ce44b0ea 1491i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
673a394b
EA
1492{
1493 struct drm_device *dev = obj->dev;
1494 drm_i915_private_t *dev_priv = dev->dev_private;
1495 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1496
1497 /* Add a reference if we're newly entering the active list. */
1498 if (!obj_priv->active) {
1499 drm_gem_object_reference(obj);
1500 obj_priv->active = 1;
1501 }
1502 /* Move from whatever list we were on to the tail of execution. */
5e118f41 1503 spin_lock(&dev_priv->mm.active_list_lock);
673a394b
EA
1504 list_move_tail(&obj_priv->list,
1505 &dev_priv->mm.active_list);
5e118f41 1506 spin_unlock(&dev_priv->mm.active_list_lock);
ce44b0ea 1507 obj_priv->last_rendering_seqno = seqno;
673a394b
EA
1508}
1509
ce44b0ea
EA
1510static void
1511i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1512{
1513 struct drm_device *dev = obj->dev;
1514 drm_i915_private_t *dev_priv = dev->dev_private;
1515 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1516
1517 BUG_ON(!obj_priv->active);
1518 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
1519 obj_priv->last_rendering_seqno = 0;
1520}
673a394b 1521
963b4836
CW
1522/* Immediately discard the backing storage */
1523static void
1524i915_gem_object_truncate(struct drm_gem_object *obj)
1525{
bb6baf76
CW
1526 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1527 struct inode *inode;
963b4836 1528
bb6baf76
CW
1529 inode = obj->filp->f_path.dentry->d_inode;
1530 if (inode->i_op->truncate)
1531 inode->i_op->truncate (inode);
1532
1533 obj_priv->madv = __I915_MADV_PURGED;
963b4836
CW
1534}
1535
1536static inline int
1537i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
1538{
1539 return obj_priv->madv == I915_MADV_DONTNEED;
1540}
1541
673a394b
EA
1542static void
1543i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1544{
1545 struct drm_device *dev = obj->dev;
1546 drm_i915_private_t *dev_priv = dev->dev_private;
1547 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1548
1549 i915_verify_inactive(dev, __FILE__, __LINE__);
1550 if (obj_priv->pin_count != 0)
1551 list_del_init(&obj_priv->list);
1552 else
1553 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1554
ce44b0ea 1555 obj_priv->last_rendering_seqno = 0;
673a394b
EA
1556 if (obj_priv->active) {
1557 obj_priv->active = 0;
1558 drm_gem_object_unreference(obj);
1559 }
1560 i915_verify_inactive(dev, __FILE__, __LINE__);
1561}
1562
1563/**
1564 * Creates a new sequence number, emitting a write of it to the status page
1565 * plus an interrupt, which will trigger i915_user_interrupt_handler.
1566 *
1567 * Must be called with struct_lock held.
1568 *
1569 * Returned sequence numbers are nonzero on success.
1570 */
5a5a0c64 1571uint32_t
b962442e
EA
1572i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1573 uint32_t flush_domains)
673a394b
EA
1574{
1575 drm_i915_private_t *dev_priv = dev->dev_private;
b962442e 1576 struct drm_i915_file_private *i915_file_priv = NULL;
673a394b
EA
1577 struct drm_i915_gem_request *request;
1578 uint32_t seqno;
1579 int was_empty;
1580 RING_LOCALS;
1581
b962442e
EA
1582 if (file_priv != NULL)
1583 i915_file_priv = file_priv->driver_priv;
1584
9a298b2a 1585 request = kzalloc(sizeof(*request), GFP_KERNEL);
673a394b
EA
1586 if (request == NULL)
1587 return 0;
1588
1589 /* Grab the seqno we're going to make this request be, and bump the
1590 * next (skipping 0 so it can be the reserved no-seqno value).
1591 */
1592 seqno = dev_priv->mm.next_gem_seqno;
1593 dev_priv->mm.next_gem_seqno++;
1594 if (dev_priv->mm.next_gem_seqno == 0)
1595 dev_priv->mm.next_gem_seqno++;
1596
1597 BEGIN_LP_RING(4);
1598 OUT_RING(MI_STORE_DWORD_INDEX);
1599 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1600 OUT_RING(seqno);
1601
1602 OUT_RING(MI_USER_INTERRUPT);
1603 ADVANCE_LP_RING();
1604
44d98a61 1605 DRM_DEBUG_DRIVER("%d\n", seqno);
673a394b
EA
1606
1607 request->seqno = seqno;
1608 request->emitted_jiffies = jiffies;
673a394b
EA
1609 was_empty = list_empty(&dev_priv->mm.request_list);
1610 list_add_tail(&request->list, &dev_priv->mm.request_list);
b962442e
EA
1611 if (i915_file_priv) {
1612 list_add_tail(&request->client_list,
1613 &i915_file_priv->mm.request_list);
1614 } else {
1615 INIT_LIST_HEAD(&request->client_list);
1616 }
673a394b 1617
ce44b0ea
EA
1618 /* Associate any objects on the flushing list matching the write
1619 * domain we're flushing with our flush.
1620 */
1621 if (flush_domains != 0) {
1622 struct drm_i915_gem_object *obj_priv, *next;
1623
1624 list_for_each_entry_safe(obj_priv, next,
1625 &dev_priv->mm.flushing_list, list) {
1626 struct drm_gem_object *obj = obj_priv->obj;
1627
1628 if ((obj->write_domain & flush_domains) ==
1629 obj->write_domain) {
1c5d22f7
CW
1630 uint32_t old_write_domain = obj->write_domain;
1631
ce44b0ea
EA
1632 obj->write_domain = 0;
1633 i915_gem_object_move_to_active(obj, seqno);
1c5d22f7
CW
1634
1635 trace_i915_gem_object_change_domain(obj,
1636 obj->read_domains,
1637 old_write_domain);
ce44b0ea
EA
1638 }
1639 }
1640
1641 }
1642
f65d9421
BG
1643 if (!dev_priv->mm.suspended) {
1644 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
1645 if (was_empty)
1646 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1647 }
673a394b
EA
1648 return seqno;
1649}
1650
1651/**
1652 * Command execution barrier
1653 *
1654 * Ensures that all commands in the ring are finished
1655 * before signalling the CPU
1656 */
3043c60c 1657static uint32_t
673a394b
EA
1658i915_retire_commands(struct drm_device *dev)
1659{
1660 drm_i915_private_t *dev_priv = dev->dev_private;
1661 uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1662 uint32_t flush_domains = 0;
1663 RING_LOCALS;
1664
1665 /* The sampler always gets flushed on i965 (sigh) */
1666 if (IS_I965G(dev))
1667 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
1668 BEGIN_LP_RING(2);
1669 OUT_RING(cmd);
1670 OUT_RING(0); /* noop */
1671 ADVANCE_LP_RING();
1672 return flush_domains;
1673}
1674
1675/**
1676 * Moves buffers associated only with the given active seqno from the active
1677 * to inactive list, potentially freeing them.
1678 */
1679static void
1680i915_gem_retire_request(struct drm_device *dev,
1681 struct drm_i915_gem_request *request)
1682{
1683 drm_i915_private_t *dev_priv = dev->dev_private;
1684
1c5d22f7
CW
1685 trace_i915_gem_request_retire(dev, request->seqno);
1686
673a394b
EA
1687 /* Move any buffers on the active list that are no longer referenced
1688 * by the ringbuffer to the flushing/inactive lists as appropriate.
1689 */
5e118f41 1690 spin_lock(&dev_priv->mm.active_list_lock);
673a394b
EA
1691 while (!list_empty(&dev_priv->mm.active_list)) {
1692 struct drm_gem_object *obj;
1693 struct drm_i915_gem_object *obj_priv;
1694
1695 obj_priv = list_first_entry(&dev_priv->mm.active_list,
1696 struct drm_i915_gem_object,
1697 list);
1698 obj = obj_priv->obj;
1699
1700 /* If the seqno being retired doesn't match the oldest in the
1701 * list, then the oldest in the list must still be newer than
1702 * this seqno.
1703 */
1704 if (obj_priv->last_rendering_seqno != request->seqno)
5e118f41 1705 goto out;
de151cf6 1706
673a394b
EA
1707#if WATCH_LRU
1708 DRM_INFO("%s: retire %d moves to inactive list %p\n",
1709 __func__, request->seqno, obj);
1710#endif
1711
ce44b0ea
EA
1712 if (obj->write_domain != 0)
1713 i915_gem_object_move_to_flushing(obj);
68c84342
SL
1714 else {
1715 /* Take a reference on the object so it won't be
1716 * freed while the spinlock is held. The list
1717 * protection for this spinlock is safe when breaking
1718 * the lock like this since the next thing we do
1719 * is just get the head of the list again.
1720 */
1721 drm_gem_object_reference(obj);
673a394b 1722 i915_gem_object_move_to_inactive(obj);
68c84342
SL
1723 spin_unlock(&dev_priv->mm.active_list_lock);
1724 drm_gem_object_unreference(obj);
1725 spin_lock(&dev_priv->mm.active_list_lock);
1726 }
673a394b 1727 }
5e118f41
CW
1728out:
1729 spin_unlock(&dev_priv->mm.active_list_lock);
673a394b
EA
1730}
1731
1732/**
1733 * Returns true if seq1 is later than seq2.
1734 */
22be1724 1735bool
673a394b
EA
1736i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1737{
1738 return (int32_t)(seq1 - seq2) >= 0;
1739}
1740
1741uint32_t
1742i915_get_gem_seqno(struct drm_device *dev)
1743{
1744 drm_i915_private_t *dev_priv = dev->dev_private;
1745
1746 return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
1747}
1748
1749/**
1750 * This function clears the request list as sequence numbers are passed.
1751 */
1752void
1753i915_gem_retire_requests(struct drm_device *dev)
1754{
1755 drm_i915_private_t *dev_priv = dev->dev_private;
1756 uint32_t seqno;
1757
9d34e5db 1758 if (!dev_priv->hw_status_page || list_empty(&dev_priv->mm.request_list))
6c0594a3
KW
1759 return;
1760
673a394b
EA
1761 seqno = i915_get_gem_seqno(dev);
1762
1763 while (!list_empty(&dev_priv->mm.request_list)) {
1764 struct drm_i915_gem_request *request;
1765 uint32_t retiring_seqno;
1766
1767 request = list_first_entry(&dev_priv->mm.request_list,
1768 struct drm_i915_gem_request,
1769 list);
1770 retiring_seqno = request->seqno;
1771
1772 if (i915_seqno_passed(seqno, retiring_seqno) ||
ba1234d1 1773 atomic_read(&dev_priv->mm.wedged)) {
673a394b
EA
1774 i915_gem_retire_request(dev, request);
1775
1776 list_del(&request->list);
b962442e 1777 list_del(&request->client_list);
9a298b2a 1778 kfree(request);
673a394b
EA
1779 } else
1780 break;
1781 }
9d34e5db
CW
1782
1783 if (unlikely (dev_priv->trace_irq_seqno &&
1784 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
1785 i915_user_irq_put(dev);
1786 dev_priv->trace_irq_seqno = 0;
1787 }
673a394b
EA
1788}
1789
1790void
1791i915_gem_retire_work_handler(struct work_struct *work)
1792{
1793 drm_i915_private_t *dev_priv;
1794 struct drm_device *dev;
1795
1796 dev_priv = container_of(work, drm_i915_private_t,
1797 mm.retire_work.work);
1798 dev = dev_priv->dev;
1799
1800 mutex_lock(&dev->struct_mutex);
1801 i915_gem_retire_requests(dev);
6dbe2772
KP
1802 if (!dev_priv->mm.suspended &&
1803 !list_empty(&dev_priv->mm.request_list))
9c9fe1f8 1804 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
673a394b
EA
1805 mutex_unlock(&dev->struct_mutex);
1806}
1807
5a5a0c64 1808int
48764bf4 1809i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
673a394b
EA
1810{
1811 drm_i915_private_t *dev_priv = dev->dev_private;
802c7eb6 1812 u32 ier;
673a394b
EA
1813 int ret = 0;
1814
1815 BUG_ON(seqno == 0);
1816
ba1234d1 1817 if (atomic_read(&dev_priv->mm.wedged))
ffed1d09
BG
1818 return -EIO;
1819
673a394b 1820 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
f2b115e6 1821 if (IS_IRONLAKE(dev))
036a4a7d
ZW
1822 ier = I915_READ(DEIER) | I915_READ(GTIER);
1823 else
1824 ier = I915_READ(IER);
802c7eb6
JB
1825 if (!ier) {
1826 DRM_ERROR("something (likely vbetool) disabled "
1827 "interrupts, re-enabling\n");
1828 i915_driver_irq_preinstall(dev);
1829 i915_driver_irq_postinstall(dev);
1830 }
1831
1c5d22f7
CW
1832 trace_i915_gem_request_wait_begin(dev, seqno);
1833
673a394b
EA
1834 dev_priv->mm.waiting_gem_seqno = seqno;
1835 i915_user_irq_get(dev);
48764bf4
DV
1836 if (interruptible)
1837 ret = wait_event_interruptible(dev_priv->irq_queue,
1838 i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
1839 atomic_read(&dev_priv->mm.wedged));
1840 else
1841 wait_event(dev_priv->irq_queue,
1842 i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
1843 atomic_read(&dev_priv->mm.wedged));
1844
673a394b
EA
1845 i915_user_irq_put(dev);
1846 dev_priv->mm.waiting_gem_seqno = 0;
1c5d22f7
CW
1847
1848 trace_i915_gem_request_wait_end(dev, seqno);
673a394b 1849 }
ba1234d1 1850 if (atomic_read(&dev_priv->mm.wedged))
673a394b
EA
1851 ret = -EIO;
1852
1853 if (ret && ret != -ERESTARTSYS)
1854 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
1855 __func__, ret, seqno, i915_get_gem_seqno(dev));
1856
1857 /* Directly dispatch request retiring. While we have the work queue
1858 * to handle this, the waiter on a request often wants an associated
1859 * buffer to have made it to the inactive list, and we would need
1860 * a separate wait queue to handle that.
1861 */
1862 if (ret == 0)
1863 i915_gem_retire_requests(dev);
1864
1865 return ret;
1866}
1867
48764bf4
DV
1868/**
1869 * Waits for a sequence number to be signaled, and cleans up the
1870 * request and object lists appropriately for that event.
1871 */
1872static int
1873i915_wait_request(struct drm_device *dev, uint32_t seqno)
1874{
1875 return i915_do_wait_request(dev, seqno, 1);
1876}
1877
673a394b
EA
1878static void
1879i915_gem_flush(struct drm_device *dev,
1880 uint32_t invalidate_domains,
1881 uint32_t flush_domains)
1882{
1883 drm_i915_private_t *dev_priv = dev->dev_private;
1884 uint32_t cmd;
1885 RING_LOCALS;
1886
1887#if WATCH_EXEC
1888 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
1889 invalidate_domains, flush_domains);
1890#endif
1c5d22f7
CW
1891 trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno,
1892 invalidate_domains, flush_domains);
673a394b
EA
1893
1894 if (flush_domains & I915_GEM_DOMAIN_CPU)
1895 drm_agp_chipset_flush(dev);
1896
21d509e3 1897 if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
673a394b
EA
1898 /*
1899 * read/write caches:
1900 *
1901 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
1902 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
1903 * also flushed at 2d versus 3d pipeline switches.
1904 *
1905 * read-only caches:
1906 *
1907 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
1908 * MI_READ_FLUSH is set, and is always flushed on 965.
1909 *
1910 * I915_GEM_DOMAIN_COMMAND may not exist?
1911 *
1912 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
1913 * invalidated when MI_EXE_FLUSH is set.
1914 *
1915 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
1916 * invalidated with every MI_FLUSH.
1917 *
1918 * TLBs:
1919 *
1920 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
1921 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
1922 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
1923 * are flushed at any MI_FLUSH.
1924 */
1925
1926 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1927 if ((invalidate_domains|flush_domains) &
1928 I915_GEM_DOMAIN_RENDER)
1929 cmd &= ~MI_NO_WRITE_FLUSH;
1930 if (!IS_I965G(dev)) {
1931 /*
1932 * On the 965, the sampler cache always gets flushed
1933 * and this bit is reserved.
1934 */
1935 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
1936 cmd |= MI_READ_FLUSH;
1937 }
1938 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
1939 cmd |= MI_EXE_FLUSH;
1940
1941#if WATCH_EXEC
1942 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
1943#endif
1944 BEGIN_LP_RING(2);
1945 OUT_RING(cmd);
48764bf4 1946 OUT_RING(MI_NOOP);
673a394b
EA
1947 ADVANCE_LP_RING();
1948 }
1949}
1950
1951/**
1952 * Ensures that all rendering to the object has completed and the object is
1953 * safe to unbind from the GTT or access from the CPU.
1954 */
1955static int
1956i915_gem_object_wait_rendering(struct drm_gem_object *obj)
1957{
1958 struct drm_device *dev = obj->dev;
1959 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1960 int ret;
1961
e47c68e9
EA
1962 /* This function only exists to support waiting for existing rendering,
1963 * not for emitting required flushes.
673a394b 1964 */
e47c68e9 1965 BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
673a394b
EA
1966
1967 /* If there is rendering queued on the buffer being evicted, wait for
1968 * it.
1969 */
1970 if (obj_priv->active) {
1971#if WATCH_BUF
1972 DRM_INFO("%s: object %p wait for seqno %08x\n",
1973 __func__, obj, obj_priv->last_rendering_seqno);
1974#endif
1975 ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
1976 if (ret != 0)
1977 return ret;
1978 }
1979
1980 return 0;
1981}
1982
1983/**
1984 * Unbinds an object from the GTT aperture.
1985 */
0f973f27 1986int
673a394b
EA
1987i915_gem_object_unbind(struct drm_gem_object *obj)
1988{
1989 struct drm_device *dev = obj->dev;
1990 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1991 int ret = 0;
1992
1993#if WATCH_BUF
1994 DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
1995 DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
1996#endif
1997 if (obj_priv->gtt_space == NULL)
1998 return 0;
1999
2000 if (obj_priv->pin_count != 0) {
2001 DRM_ERROR("Attempting to unbind pinned buffer\n");
2002 return -EINVAL;
2003 }
2004
5323fd04
EA
2005 /* blow away mappings if mapped through GTT */
2006 i915_gem_release_mmap(obj);
2007
673a394b
EA
2008 /* Move the object to the CPU domain to ensure that
2009 * any possible CPU writes while it's not in the GTT
2010 * are flushed when we go to remap it. This will
2011 * also ensure that all pending GPU writes are finished
2012 * before we unbind.
2013 */
e47c68e9 2014 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
673a394b 2015 if (ret) {
e47c68e9
EA
2016 if (ret != -ERESTARTSYS)
2017 DRM_ERROR("set_domain failed: %d\n", ret);
673a394b
EA
2018 return ret;
2019 }
2020
5323fd04
EA
2021 BUG_ON(obj_priv->active);
2022
96b47b65
DV
2023 /* release the fence reg _after_ flushing */
2024 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
2025 i915_gem_clear_fence_reg(obj);
2026
673a394b
EA
2027 if (obj_priv->agp_mem != NULL) {
2028 drm_unbind_agp(obj_priv->agp_mem);
2029 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
2030 obj_priv->agp_mem = NULL;
2031 }
2032
856fa198 2033 i915_gem_object_put_pages(obj);
a32808c0 2034 BUG_ON(obj_priv->pages_refcount);
673a394b
EA
2035
2036 if (obj_priv->gtt_space) {
2037 atomic_dec(&dev->gtt_count);
2038 atomic_sub(obj->size, &dev->gtt_memory);
2039
2040 drm_mm_put_block(obj_priv->gtt_space);
2041 obj_priv->gtt_space = NULL;
2042 }
2043
2044 /* Remove ourselves from the LRU list if present. */
2045 if (!list_empty(&obj_priv->list))
2046 list_del_init(&obj_priv->list);
2047
963b4836
CW
2048 if (i915_gem_object_is_purgeable(obj_priv))
2049 i915_gem_object_truncate(obj);
2050
1c5d22f7
CW
2051 trace_i915_gem_object_unbind(obj);
2052
673a394b
EA
2053 return 0;
2054}
2055
07f73f69
CW
2056static struct drm_gem_object *
2057i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
2058{
2059 drm_i915_private_t *dev_priv = dev->dev_private;
2060 struct drm_i915_gem_object *obj_priv;
2061 struct drm_gem_object *best = NULL;
2062 struct drm_gem_object *first = NULL;
2063
2064 /* Try to find the smallest clean object */
2065 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
2066 struct drm_gem_object *obj = obj_priv->obj;
2067 if (obj->size >= min_size) {
963b4836
CW
2068 if ((!obj_priv->dirty ||
2069 i915_gem_object_is_purgeable(obj_priv)) &&
07f73f69
CW
2070 (!best || obj->size < best->size)) {
2071 best = obj;
2072 if (best->size == min_size)
2073 return best;
2074 }
2075 if (!first)
2076 first = obj;
2077 }
2078 }
2079
2080 return best ? best : first;
2081}
2082
673a394b 2083static int
07f73f69
CW
2084i915_gem_evict_everything(struct drm_device *dev)
2085{
2086 drm_i915_private_t *dev_priv = dev->dev_private;
2087 uint32_t seqno;
2088 int ret;
2089 bool lists_empty;
2090
07f73f69
CW
2091 spin_lock(&dev_priv->mm.active_list_lock);
2092 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2093 list_empty(&dev_priv->mm.flushing_list) &&
2094 list_empty(&dev_priv->mm.active_list));
2095 spin_unlock(&dev_priv->mm.active_list_lock);
2096
9731129c 2097 if (lists_empty)
07f73f69 2098 return -ENOSPC;
07f73f69
CW
2099
2100 /* Flush everything (on to the inactive lists) and evict */
2101 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2102 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
2103 if (seqno == 0)
2104 return -ENOMEM;
2105
2106 ret = i915_wait_request(dev, seqno);
2107 if (ret)
2108 return ret;
2109
ab5ee576 2110 ret = i915_gem_evict_from_inactive_list(dev);
07f73f69
CW
2111 if (ret)
2112 return ret;
2113
2114 spin_lock(&dev_priv->mm.active_list_lock);
2115 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2116 list_empty(&dev_priv->mm.flushing_list) &&
2117 list_empty(&dev_priv->mm.active_list));
2118 spin_unlock(&dev_priv->mm.active_list_lock);
2119 BUG_ON(!lists_empty);
2120
2121 return 0;
2122}
2123
673a394b 2124static int
07f73f69 2125i915_gem_evict_something(struct drm_device *dev, int min_size)
673a394b
EA
2126{
2127 drm_i915_private_t *dev_priv = dev->dev_private;
2128 struct drm_gem_object *obj;
07f73f69 2129 int ret;
673a394b
EA
2130
2131 for (;;) {
07f73f69
CW
2132 i915_gem_retire_requests(dev);
2133
673a394b
EA
2134 /* If there's an inactive buffer available now, grab it
2135 * and be done.
2136 */
07f73f69
CW
2137 obj = i915_gem_find_inactive_object(dev, min_size);
2138 if (obj) {
2139 struct drm_i915_gem_object *obj_priv;
2140
673a394b
EA
2141#if WATCH_LRU
2142 DRM_INFO("%s: evicting %p\n", __func__, obj);
2143#endif
07f73f69
CW
2144 obj_priv = obj->driver_private;
2145 BUG_ON(obj_priv->pin_count != 0);
673a394b
EA
2146 BUG_ON(obj_priv->active);
2147
2148 /* Wait on the rendering and unbind the buffer. */
07f73f69 2149 return i915_gem_object_unbind(obj);
673a394b
EA
2150 }
2151
2152 /* If we didn't get anything, but the ring is still processing
07f73f69
CW
2153 * things, wait for the next to finish and hopefully leave us
2154 * a buffer to evict.
673a394b
EA
2155 */
2156 if (!list_empty(&dev_priv->mm.request_list)) {
2157 struct drm_i915_gem_request *request;
2158
2159 request = list_first_entry(&dev_priv->mm.request_list,
2160 struct drm_i915_gem_request,
2161 list);
2162
2163 ret = i915_wait_request(dev, request->seqno);
2164 if (ret)
07f73f69 2165 return ret;
673a394b 2166
07f73f69 2167 continue;
673a394b
EA
2168 }
2169
2170 /* If we didn't have anything on the request list but there
2171 * are buffers awaiting a flush, emit one and try again.
2172 * When we wait on it, those buffers waiting for that flush
2173 * will get moved to inactive.
2174 */
2175 if (!list_empty(&dev_priv->mm.flushing_list)) {
07f73f69 2176 struct drm_i915_gem_object *obj_priv;
673a394b 2177
9a1e2582
CW
2178 /* Find an object that we can immediately reuse */
2179 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
2180 obj = obj_priv->obj;
2181 if (obj->size >= min_size)
2182 break;
673a394b 2183
9a1e2582
CW
2184 obj = NULL;
2185 }
673a394b 2186
9a1e2582
CW
2187 if (obj != NULL) {
2188 uint32_t seqno;
673a394b 2189
9a1e2582
CW
2190 i915_gem_flush(dev,
2191 obj->write_domain,
2192 obj->write_domain);
2193 seqno = i915_add_request(dev, NULL, obj->write_domain);
2194 if (seqno == 0)
2195 return -ENOMEM;
ac94a962 2196
9a1e2582
CW
2197 ret = i915_wait_request(dev, seqno);
2198 if (ret)
2199 return ret;
2200
2201 continue;
2202 }
673a394b
EA
2203 }
2204
07f73f69
CW
2205 /* If we didn't do any of the above, there's no single buffer
2206 * large enough to swap out for the new one, so just evict
2207 * everything and start again. (This should be rare.)
673a394b 2208 */
9731129c 2209 if (!list_empty (&dev_priv->mm.inactive_list))
ab5ee576 2210 return i915_gem_evict_from_inactive_list(dev);
9731129c 2211 else
07f73f69 2212 return i915_gem_evict_everything(dev);
ac94a962 2213 }
ac94a962
KP
2214}
2215
6911a9b8 2216int
4bdadb97
CW
2217i915_gem_object_get_pages(struct drm_gem_object *obj,
2218 gfp_t gfpmask)
673a394b
EA
2219{
2220 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2221 int page_count, i;
2222 struct address_space *mapping;
2223 struct inode *inode;
2224 struct page *page;
2225 int ret;
2226
856fa198 2227 if (obj_priv->pages_refcount++ != 0)
673a394b
EA
2228 return 0;
2229
2230 /* Get the list of pages out of our struct file. They'll be pinned
2231 * at this point until we release them.
2232 */
2233 page_count = obj->size / PAGE_SIZE;
856fa198 2234 BUG_ON(obj_priv->pages != NULL);
8e7d2b2c 2235 obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
856fa198 2236 if (obj_priv->pages == NULL) {
856fa198 2237 obj_priv->pages_refcount--;
673a394b
EA
2238 return -ENOMEM;
2239 }
2240
2241 inode = obj->filp->f_path.dentry->d_inode;
2242 mapping = inode->i_mapping;
2243 for (i = 0; i < page_count; i++) {
4bdadb97
CW
2244 page = read_cache_page_gfp(mapping, i,
2245 mapping_gfp_mask (mapping) |
2246 __GFP_COLD |
2247 gfpmask);
673a394b
EA
2248 if (IS_ERR(page)) {
2249 ret = PTR_ERR(page);
856fa198 2250 i915_gem_object_put_pages(obj);
673a394b
EA
2251 return ret;
2252 }
856fa198 2253 obj_priv->pages[i] = page;
673a394b 2254 }
280b713b
EA
2255
2256 if (obj_priv->tiling_mode != I915_TILING_NONE)
2257 i915_gem_object_do_bit_17_swizzle(obj);
2258
673a394b
EA
2259 return 0;
2260}
2261
de151cf6
JB
2262static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
2263{
2264 struct drm_gem_object *obj = reg->obj;
2265 struct drm_device *dev = obj->dev;
2266 drm_i915_private_t *dev_priv = dev->dev_private;
2267 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2268 int regnum = obj_priv->fence_reg;
2269 uint64_t val;
2270
2271 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2272 0xfffff000) << 32;
2273 val |= obj_priv->gtt_offset & 0xfffff000;
2274 val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2275 if (obj_priv->tiling_mode == I915_TILING_Y)
2276 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2277 val |= I965_FENCE_REG_VALID;
2278
2279 I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
2280}
2281
2282static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
2283{
2284 struct drm_gem_object *obj = reg->obj;
2285 struct drm_device *dev = obj->dev;
2286 drm_i915_private_t *dev_priv = dev->dev_private;
2287 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2288 int regnum = obj_priv->fence_reg;
0f973f27 2289 int tile_width;
dc529a4f 2290 uint32_t fence_reg, val;
de151cf6
JB
2291 uint32_t pitch_val;
2292
2293 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
2294 (obj_priv->gtt_offset & (obj->size - 1))) {
f06da264 2295 WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
0f973f27 2296 __func__, obj_priv->gtt_offset, obj->size);
de151cf6
JB
2297 return;
2298 }
2299
0f973f27
JB
2300 if (obj_priv->tiling_mode == I915_TILING_Y &&
2301 HAS_128_BYTE_Y_TILING(dev))
2302 tile_width = 128;
de151cf6 2303 else
0f973f27
JB
2304 tile_width = 512;
2305
2306 /* Note: pitch better be a power of two tile widths */
2307 pitch_val = obj_priv->stride / tile_width;
2308 pitch_val = ffs(pitch_val) - 1;
de151cf6
JB
2309
2310 val = obj_priv->gtt_offset;
2311 if (obj_priv->tiling_mode == I915_TILING_Y)
2312 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2313 val |= I915_FENCE_SIZE_BITS(obj->size);
2314 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2315 val |= I830_FENCE_REG_VALID;
2316
dc529a4f
EA
2317 if (regnum < 8)
2318 fence_reg = FENCE_REG_830_0 + (regnum * 4);
2319 else
2320 fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
2321 I915_WRITE(fence_reg, val);
de151cf6
JB
2322}
2323
2324static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2325{
2326 struct drm_gem_object *obj = reg->obj;
2327 struct drm_device *dev = obj->dev;
2328 drm_i915_private_t *dev_priv = dev->dev_private;
2329 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2330 int regnum = obj_priv->fence_reg;
2331 uint32_t val;
2332 uint32_t pitch_val;
8d7773a3 2333 uint32_t fence_size_bits;
de151cf6 2334
8d7773a3 2335 if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
de151cf6 2336 (obj_priv->gtt_offset & (obj->size - 1))) {
8d7773a3 2337 WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
0f973f27 2338 __func__, obj_priv->gtt_offset);
de151cf6
JB
2339 return;
2340 }
2341
e76a16de
EA
2342 pitch_val = obj_priv->stride / 128;
2343 pitch_val = ffs(pitch_val) - 1;
2344 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2345
de151cf6
JB
2346 val = obj_priv->gtt_offset;
2347 if (obj_priv->tiling_mode == I915_TILING_Y)
2348 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
8d7773a3
DV
2349 fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
2350 WARN_ON(fence_size_bits & ~0x00000f00);
2351 val |= fence_size_bits;
de151cf6
JB
2352 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2353 val |= I830_FENCE_REG_VALID;
2354
2355 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
de151cf6
JB
2356}
2357
2358/**
2359 * i915_gem_object_get_fence_reg - set up a fence reg for an object
2360 * @obj: object to map through a fence reg
2361 *
2362 * When mapping objects through the GTT, userspace wants to be able to write
2363 * to them without having to worry about swizzling if the object is tiled.
2364 *
2365 * This function walks the fence regs looking for a free one for @obj,
2366 * stealing one if it can't find any.
2367 *
2368 * It then sets up the reg based on the object's properties: address, pitch
2369 * and tiling format.
2370 */
8c4b8c3f
CW
2371int
2372i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
de151cf6
JB
2373{
2374 struct drm_device *dev = obj->dev;
79e53945 2375 struct drm_i915_private *dev_priv = dev->dev_private;
de151cf6
JB
2376 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2377 struct drm_i915_fence_reg *reg = NULL;
fc7170ba
CW
2378 struct drm_i915_gem_object *old_obj_priv = NULL;
2379 int i, ret, avail;
de151cf6 2380
a09ba7fa
EA
2381 /* Just update our place in the LRU if our fence is getting used. */
2382 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
2383 list_move_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
2384 return 0;
2385 }
2386
de151cf6
JB
2387 switch (obj_priv->tiling_mode) {
2388 case I915_TILING_NONE:
2389 WARN(1, "allocating a fence for non-tiled object?\n");
2390 break;
2391 case I915_TILING_X:
0f973f27
JB
2392 if (!obj_priv->stride)
2393 return -EINVAL;
2394 WARN((obj_priv->stride & (512 - 1)),
2395 "object 0x%08x is X tiled but has non-512B pitch\n",
2396 obj_priv->gtt_offset);
de151cf6
JB
2397 break;
2398 case I915_TILING_Y:
0f973f27
JB
2399 if (!obj_priv->stride)
2400 return -EINVAL;
2401 WARN((obj_priv->stride & (128 - 1)),
2402 "object 0x%08x is Y tiled but has non-128B pitch\n",
2403 obj_priv->gtt_offset);
de151cf6
JB
2404 break;
2405 }
2406
2407 /* First try to find a free reg */
fc7170ba 2408 avail = 0;
de151cf6
JB
2409 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2410 reg = &dev_priv->fence_regs[i];
2411 if (!reg->obj)
2412 break;
fc7170ba
CW
2413
2414 old_obj_priv = reg->obj->driver_private;
2415 if (!old_obj_priv->pin_count)
2416 avail++;
de151cf6
JB
2417 }
2418
2419 /* None available, try to steal one or wait for a user to finish */
2420 if (i == dev_priv->num_fence_regs) {
a09ba7fa 2421 struct drm_gem_object *old_obj = NULL;
de151cf6 2422
fc7170ba 2423 if (avail == 0)
2939e1f5 2424 return -ENOSPC;
fc7170ba 2425
a09ba7fa
EA
2426 list_for_each_entry(old_obj_priv, &dev_priv->mm.fence_list,
2427 fence_list) {
2428 old_obj = old_obj_priv->obj;
d7619c4b 2429
d7619c4b
CW
2430 if (old_obj_priv->pin_count)
2431 continue;
2432
a09ba7fa
EA
2433 /* Take a reference, as otherwise the wait_rendering
2434 * below may cause the object to get freed out from
2435 * under us.
2436 */
2437 drm_gem_object_reference(old_obj);
2438
d7619c4b
CW
2439 /* i915 uses fences for GPU access to tiled buffers */
2440 if (IS_I965G(dev) || !old_obj_priv->active)
de151cf6 2441 break;
d7619c4b 2442
a09ba7fa
EA
2443 /* This brings the object to the head of the LRU if it
2444 * had been written to. The only way this should
2445 * result in us waiting longer than the expected
2446 * optimal amount of time is if there was a
2447 * fence-using buffer later that was read-only.
2448 */
2449 i915_gem_object_flush_gpu_write_domain(old_obj);
2450 ret = i915_gem_object_wait_rendering(old_obj);
58c2fb64
CW
2451 if (ret != 0) {
2452 drm_gem_object_unreference(old_obj);
d7619c4b 2453 return ret;
58c2fb64
CW
2454 }
2455
a09ba7fa 2456 break;
de151cf6
JB
2457 }
2458
2459 /*
2460 * Zap this virtual mapping so we can set up a fence again
2461 * for this object next time we need it.
2462 */
58c2fb64
CW
2463 i915_gem_release_mmap(old_obj);
2464
a09ba7fa 2465 i = old_obj_priv->fence_reg;
58c2fb64
CW
2466 reg = &dev_priv->fence_regs[i];
2467
de151cf6 2468 old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
a09ba7fa 2469 list_del_init(&old_obj_priv->fence_list);
58c2fb64 2470
a09ba7fa 2471 drm_gem_object_unreference(old_obj);
de151cf6
JB
2472 }
2473
2474 obj_priv->fence_reg = i;
a09ba7fa
EA
2475 list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
2476
de151cf6
JB
2477 reg->obj = obj;
2478
2479 if (IS_I965G(dev))
2480 i965_write_fence_reg(reg);
2481 else if (IS_I9XX(dev))
2482 i915_write_fence_reg(reg);
2483 else
2484 i830_write_fence_reg(reg);
d9ddcb96 2485
1c5d22f7
CW
2486 trace_i915_gem_object_get_fence(obj, i, obj_priv->tiling_mode);
2487
d9ddcb96 2488 return 0;
de151cf6
JB
2489}
2490
2491/**
2492 * i915_gem_clear_fence_reg - clear out fence register info
2493 * @obj: object to clear
2494 *
2495 * Zeroes out the fence register itself and clears out the associated
2496 * data structures in dev_priv and obj_priv.
2497 */
2498static void
2499i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2500{
2501 struct drm_device *dev = obj->dev;
79e53945 2502 drm_i915_private_t *dev_priv = dev->dev_private;
de151cf6
JB
2503 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2504
2505 if (IS_I965G(dev))
2506 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
dc529a4f
EA
2507 else {
2508 uint32_t fence_reg;
2509
2510 if (obj_priv->fence_reg < 8)
2511 fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
2512 else
2513 fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg -
2514 8) * 4;
2515
2516 I915_WRITE(fence_reg, 0);
2517 }
de151cf6
JB
2518
2519 dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
2520 obj_priv->fence_reg = I915_FENCE_REG_NONE;
a09ba7fa 2521 list_del_init(&obj_priv->fence_list);
de151cf6
JB
2522}
2523
52dc7d32
CW
2524/**
2525 * i915_gem_object_put_fence_reg - waits on outstanding fenced access
2526 * to the buffer to finish, and then resets the fence register.
2527 * @obj: tiled object holding a fence register.
2528 *
2529 * Zeroes out the fence register itself and clears out the associated
2530 * data structures in dev_priv and obj_priv.
2531 */
2532int
2533i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2534{
2535 struct drm_device *dev = obj->dev;
2536 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2537
2538 if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
2539 return 0;
2540
2541 /* On the i915, GPU access to tiled buffers is via a fence,
2542 * therefore we must wait for any outstanding access to complete
2543 * before clearing the fence.
2544 */
2545 if (!IS_I965G(dev)) {
2546 int ret;
2547
2548 i915_gem_object_flush_gpu_write_domain(obj);
2549 i915_gem_object_flush_gtt_write_domain(obj);
2550 ret = i915_gem_object_wait_rendering(obj);
2551 if (ret != 0)
2552 return ret;
2553 }
2554
2555 i915_gem_clear_fence_reg (obj);
2556
2557 return 0;
2558}
2559
673a394b
EA
2560/**
2561 * Finds free space in the GTT aperture and binds the object there.
2562 */
2563static int
2564i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2565{
2566 struct drm_device *dev = obj->dev;
2567 drm_i915_private_t *dev_priv = dev->dev_private;
2568 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2569 struct drm_mm_node *free_space;
4bdadb97 2570 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
07f73f69 2571 int ret;
673a394b 2572
bb6baf76 2573 if (obj_priv->madv != I915_MADV_WILLNEED) {
3ef94daa
CW
2574 DRM_ERROR("Attempting to bind a purgeable object\n");
2575 return -EINVAL;
2576 }
2577
673a394b 2578 if (alignment == 0)
0f973f27 2579 alignment = i915_gem_get_gtt_alignment(obj);
8d7773a3 2580 if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
673a394b
EA
2581 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2582 return -EINVAL;
2583 }
2584
2585 search_free:
2586 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2587 obj->size, alignment, 0);
2588 if (free_space != NULL) {
2589 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
2590 alignment);
2591 if (obj_priv->gtt_space != NULL) {
2592 obj_priv->gtt_space->private = obj;
2593 obj_priv->gtt_offset = obj_priv->gtt_space->start;
2594 }
2595 }
2596 if (obj_priv->gtt_space == NULL) {
2597 /* If the gtt is empty and we're still having trouble
2598 * fitting our object in, we're out of memory.
2599 */
2600#if WATCH_LRU
2601 DRM_INFO("%s: GTT full, evicting something\n", __func__);
2602#endif
07f73f69 2603 ret = i915_gem_evict_something(dev, obj->size);
9731129c 2604 if (ret)
673a394b 2605 return ret;
9731129c 2606
673a394b
EA
2607 goto search_free;
2608 }
2609
2610#if WATCH_BUF
cfd43c02 2611 DRM_INFO("Binding object of size %zd at 0x%08x\n",
673a394b
EA
2612 obj->size, obj_priv->gtt_offset);
2613#endif
4bdadb97 2614 ret = i915_gem_object_get_pages(obj, gfpmask);
673a394b
EA
2615 if (ret) {
2616 drm_mm_put_block(obj_priv->gtt_space);
2617 obj_priv->gtt_space = NULL;
07f73f69
CW
2618
2619 if (ret == -ENOMEM) {
2620 /* first try to clear up some space from the GTT */
2621 ret = i915_gem_evict_something(dev, obj->size);
2622 if (ret) {
07f73f69 2623 /* now try to shrink everyone else */
4bdadb97
CW
2624 if (gfpmask) {
2625 gfpmask = 0;
2626 goto search_free;
07f73f69
CW
2627 }
2628
2629 return ret;
2630 }
2631
2632 goto search_free;
2633 }
2634
673a394b
EA
2635 return ret;
2636 }
2637
673a394b
EA
2638 /* Create an AGP memory structure pointing at our pages, and bind it
2639 * into the GTT.
2640 */
2641 obj_priv->agp_mem = drm_agp_bind_pages(dev,
856fa198 2642 obj_priv->pages,
07f73f69 2643 obj->size >> PAGE_SHIFT,
ba1eb1d8
KP
2644 obj_priv->gtt_offset,
2645 obj_priv->agp_type);
673a394b 2646 if (obj_priv->agp_mem == NULL) {
856fa198 2647 i915_gem_object_put_pages(obj);
673a394b
EA
2648 drm_mm_put_block(obj_priv->gtt_space);
2649 obj_priv->gtt_space = NULL;
07f73f69
CW
2650
2651 ret = i915_gem_evict_something(dev, obj->size);
9731129c 2652 if (ret)
07f73f69 2653 return ret;
07f73f69
CW
2654
2655 goto search_free;
673a394b
EA
2656 }
2657 atomic_inc(&dev->gtt_count);
2658 atomic_add(obj->size, &dev->gtt_memory);
2659
2660 /* Assert that the object is not currently in any GPU domain. As it
2661 * wasn't in the GTT, there shouldn't be any way it could have been in
2662 * a GPU cache
2663 */
21d509e3
CW
2664 BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
2665 BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
673a394b 2666
1c5d22f7
CW
2667 trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
2668
673a394b
EA
2669 return 0;
2670}
2671
2672void
2673i915_gem_clflush_object(struct drm_gem_object *obj)
2674{
2675 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2676
2677 /* If we don't have a page list set up, then we're not pinned
2678 * to GPU, and we can ignore the cache flush because it'll happen
2679 * again at bind time.
2680 */
856fa198 2681 if (obj_priv->pages == NULL)
673a394b
EA
2682 return;
2683
1c5d22f7 2684 trace_i915_gem_object_clflush(obj);
cfa16a0d 2685
856fa198 2686 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
673a394b
EA
2687}
2688
e47c68e9
EA
2689/** Flushes any GPU write domain for the object if it's dirty. */
2690static void
2691i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2692{
2693 struct drm_device *dev = obj->dev;
2694 uint32_t seqno;
1c5d22f7 2695 uint32_t old_write_domain;
e47c68e9
EA
2696
2697 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
2698 return;
2699
2700 /* Queue the GPU write cache flushing we need. */
1c5d22f7 2701 old_write_domain = obj->write_domain;
e47c68e9 2702 i915_gem_flush(dev, 0, obj->write_domain);
b962442e 2703 seqno = i915_add_request(dev, NULL, obj->write_domain);
e47c68e9
EA
2704 obj->write_domain = 0;
2705 i915_gem_object_move_to_active(obj, seqno);
1c5d22f7
CW
2706
2707 trace_i915_gem_object_change_domain(obj,
2708 obj->read_domains,
2709 old_write_domain);
e47c68e9
EA
2710}
2711
2712/** Flushes the GTT write domain for the object if it's dirty. */
2713static void
2714i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
2715{
1c5d22f7
CW
2716 uint32_t old_write_domain;
2717
e47c68e9
EA
2718 if (obj->write_domain != I915_GEM_DOMAIN_GTT)
2719 return;
2720
2721 /* No actual flushing is required for the GTT write domain. Writes
2722 * to it immediately go to main memory as far as we know, so there's
2723 * no chipset flush. It also doesn't land in render cache.
2724 */
1c5d22f7 2725 old_write_domain = obj->write_domain;
e47c68e9 2726 obj->write_domain = 0;
1c5d22f7
CW
2727
2728 trace_i915_gem_object_change_domain(obj,
2729 obj->read_domains,
2730 old_write_domain);
e47c68e9
EA
2731}
2732
2733/** Flushes the CPU write domain for the object if it's dirty. */
2734static void
2735i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2736{
2737 struct drm_device *dev = obj->dev;
1c5d22f7 2738 uint32_t old_write_domain;
e47c68e9
EA
2739
2740 if (obj->write_domain != I915_GEM_DOMAIN_CPU)
2741 return;
2742
2743 i915_gem_clflush_object(obj);
2744 drm_agp_chipset_flush(dev);
1c5d22f7 2745 old_write_domain = obj->write_domain;
e47c68e9 2746 obj->write_domain = 0;
1c5d22f7
CW
2747
2748 trace_i915_gem_object_change_domain(obj,
2749 obj->read_domains,
2750 old_write_domain);
e47c68e9
EA
2751}
2752
6b95a207
KH
2753void
2754i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
2755{
2756 switch (obj->write_domain) {
2757 case I915_GEM_DOMAIN_GTT:
2758 i915_gem_object_flush_gtt_write_domain(obj);
2759 break;
2760 case I915_GEM_DOMAIN_CPU:
2761 i915_gem_object_flush_cpu_write_domain(obj);
2762 break;
2763 default:
2764 i915_gem_object_flush_gpu_write_domain(obj);
2765 break;
2766 }
2767}
2768
2ef7eeaa
EA
2769/**
2770 * Moves a single object to the GTT read, and possibly write domain.
2771 *
2772 * This function returns when the move is complete, including waiting on
2773 * flushes to occur.
2774 */
79e53945 2775int
2ef7eeaa
EA
2776i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2777{
2ef7eeaa 2778 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1c5d22f7 2779 uint32_t old_write_domain, old_read_domains;
e47c68e9 2780 int ret;
2ef7eeaa 2781
02354392
EA
2782 /* Not valid to be called on unbound objects. */
2783 if (obj_priv->gtt_space == NULL)
2784 return -EINVAL;
2785
e47c68e9
EA
2786 i915_gem_object_flush_gpu_write_domain(obj);
2787 /* Wait on any GPU rendering and flushing to occur. */
2788 ret = i915_gem_object_wait_rendering(obj);
2789 if (ret != 0)
2790 return ret;
2791
1c5d22f7
CW
2792 old_write_domain = obj->write_domain;
2793 old_read_domains = obj->read_domains;
2794
e47c68e9
EA
2795 /* If we're writing through the GTT domain, then CPU and GPU caches
2796 * will need to be invalidated at next use.
2ef7eeaa 2797 */
e47c68e9
EA
2798 if (write)
2799 obj->read_domains &= I915_GEM_DOMAIN_GTT;
2ef7eeaa 2800
e47c68e9 2801 i915_gem_object_flush_cpu_write_domain(obj);
2ef7eeaa 2802
e47c68e9
EA
2803 /* It should now be out of any other write domains, and we can update
2804 * the domain values for our changes.
2805 */
2806 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2807 obj->read_domains |= I915_GEM_DOMAIN_GTT;
2808 if (write) {
2809 obj->write_domain = I915_GEM_DOMAIN_GTT;
2810 obj_priv->dirty = 1;
2ef7eeaa
EA
2811 }
2812
1c5d22f7
CW
2813 trace_i915_gem_object_change_domain(obj,
2814 old_read_domains,
2815 old_write_domain);
2816
e47c68e9
EA
2817 return 0;
2818}
2819
b9241ea3
ZW
2820/*
2821 * Prepare buffer for display plane. Use uninterruptible for possible flush
2822 * wait, as in modesetting process we're not supposed to be interrupted.
2823 */
2824int
2825i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
2826{
2827 struct drm_device *dev = obj->dev;
2828 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2829 uint32_t old_write_domain, old_read_domains;
2830 int ret;
2831
2832 /* Not valid to be called on unbound objects. */
2833 if (obj_priv->gtt_space == NULL)
2834 return -EINVAL;
2835
2836 i915_gem_object_flush_gpu_write_domain(obj);
2837
2838 /* Wait on any GPU rendering and flushing to occur. */
2839 if (obj_priv->active) {
2840#if WATCH_BUF
2841 DRM_INFO("%s: object %p wait for seqno %08x\n",
2842 __func__, obj, obj_priv->last_rendering_seqno);
2843#endif
2844 ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0);
2845 if (ret != 0)
2846 return ret;
2847 }
2848
2849 old_write_domain = obj->write_domain;
2850 old_read_domains = obj->read_domains;
2851
2852 obj->read_domains &= I915_GEM_DOMAIN_GTT;
2853
2854 i915_gem_object_flush_cpu_write_domain(obj);
2855
2856 /* It should now be out of any other write domains, and we can update
2857 * the domain values for our changes.
2858 */
2859 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2860 obj->read_domains |= I915_GEM_DOMAIN_GTT;
2861 obj->write_domain = I915_GEM_DOMAIN_GTT;
2862 obj_priv->dirty = 1;
2863
2864 trace_i915_gem_object_change_domain(obj,
2865 old_read_domains,
2866 old_write_domain);
2867
2868 return 0;
2869}
2870
e47c68e9
EA
2871/**
2872 * Moves a single object to the CPU read, and possibly write domain.
2873 *
2874 * This function returns when the move is complete, including waiting on
2875 * flushes to occur.
2876 */
2877static int
2878i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2879{
1c5d22f7 2880 uint32_t old_write_domain, old_read_domains;
e47c68e9
EA
2881 int ret;
2882
2883 i915_gem_object_flush_gpu_write_domain(obj);
2ef7eeaa 2884 /* Wait on any GPU rendering and flushing to occur. */
e47c68e9
EA
2885 ret = i915_gem_object_wait_rendering(obj);
2886 if (ret != 0)
2887 return ret;
2ef7eeaa 2888
e47c68e9 2889 i915_gem_object_flush_gtt_write_domain(obj);
2ef7eeaa 2890
e47c68e9
EA
2891 /* If we have a partially-valid cache of the object in the CPU,
2892 * finish invalidating it and free the per-page flags.
2ef7eeaa 2893 */
e47c68e9 2894 i915_gem_object_set_to_full_cpu_read_domain(obj);
2ef7eeaa 2895
1c5d22f7
CW
2896 old_write_domain = obj->write_domain;
2897 old_read_domains = obj->read_domains;
2898
e47c68e9
EA
2899 /* Flush the CPU cache if it's still invalid. */
2900 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2ef7eeaa 2901 i915_gem_clflush_object(obj);
2ef7eeaa 2902
e47c68e9 2903 obj->read_domains |= I915_GEM_DOMAIN_CPU;
2ef7eeaa
EA
2904 }
2905
2906 /* It should now be out of any other write domains, and we can update
2907 * the domain values for our changes.
2908 */
e47c68e9
EA
2909 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2910
2911 /* If we're writing through the CPU, then the GPU read domains will
2912 * need to be invalidated at next use.
2913 */
2914 if (write) {
2915 obj->read_domains &= I915_GEM_DOMAIN_CPU;
2916 obj->write_domain = I915_GEM_DOMAIN_CPU;
2917 }
2ef7eeaa 2918
1c5d22f7
CW
2919 trace_i915_gem_object_change_domain(obj,
2920 old_read_domains,
2921 old_write_domain);
2922
2ef7eeaa
EA
2923 return 0;
2924}
2925
673a394b
EA
2926/*
2927 * Set the next domain for the specified object. This
2928 * may not actually perform the necessary flushing/invaliding though,
2929 * as that may want to be batched with other set_domain operations
2930 *
2931 * This is (we hope) the only really tricky part of gem. The goal
2932 * is fairly simple -- track which caches hold bits of the object
2933 * and make sure they remain coherent. A few concrete examples may
2934 * help to explain how it works. For shorthand, we use the notation
2935 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
2936 * a pair of read and write domain masks.
2937 *
2938 * Case 1: the batch buffer
2939 *
2940 * 1. Allocated
2941 * 2. Written by CPU
2942 * 3. Mapped to GTT
2943 * 4. Read by GPU
2944 * 5. Unmapped from GTT
2945 * 6. Freed
2946 *
2947 * Let's take these a step at a time
2948 *
2949 * 1. Allocated
2950 * Pages allocated from the kernel may still have
2951 * cache contents, so we set them to (CPU, CPU) always.
2952 * 2. Written by CPU (using pwrite)
2953 * The pwrite function calls set_domain (CPU, CPU) and
2954 * this function does nothing (as nothing changes)
2955 * 3. Mapped by GTT
2956 * This function asserts that the object is not
2957 * currently in any GPU-based read or write domains
2958 * 4. Read by GPU
2959 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
2960 * As write_domain is zero, this function adds in the
2961 * current read domains (CPU+COMMAND, 0).
2962 * flush_domains is set to CPU.
2963 * invalidate_domains is set to COMMAND
2964 * clflush is run to get data out of the CPU caches
2965 * then i915_dev_set_domain calls i915_gem_flush to
2966 * emit an MI_FLUSH and drm_agp_chipset_flush
2967 * 5. Unmapped from GTT
2968 * i915_gem_object_unbind calls set_domain (CPU, CPU)
2969 * flush_domains and invalidate_domains end up both zero
2970 * so no flushing/invalidating happens
2971 * 6. Freed
2972 * yay, done
2973 *
2974 * Case 2: The shared render buffer
2975 *
2976 * 1. Allocated
2977 * 2. Mapped to GTT
2978 * 3. Read/written by GPU
2979 * 4. set_domain to (CPU,CPU)
2980 * 5. Read/written by CPU
2981 * 6. Read/written by GPU
2982 *
2983 * 1. Allocated
2984 * Same as last example, (CPU, CPU)
2985 * 2. Mapped to GTT
2986 * Nothing changes (assertions find that it is not in the GPU)
2987 * 3. Read/written by GPU
2988 * execbuffer calls set_domain (RENDER, RENDER)
2989 * flush_domains gets CPU
2990 * invalidate_domains gets GPU
2991 * clflush (obj)
2992 * MI_FLUSH and drm_agp_chipset_flush
2993 * 4. set_domain (CPU, CPU)
2994 * flush_domains gets GPU
2995 * invalidate_domains gets CPU
2996 * wait_rendering (obj) to make sure all drawing is complete.
2997 * This will include an MI_FLUSH to get the data from GPU
2998 * to memory
2999 * clflush (obj) to invalidate the CPU cache
3000 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
3001 * 5. Read/written by CPU
3002 * cache lines are loaded and dirtied
3003 * 6. Read written by GPU
3004 * Same as last GPU access
3005 *
3006 * Case 3: The constant buffer
3007 *
3008 * 1. Allocated
3009 * 2. Written by CPU
3010 * 3. Read by GPU
3011 * 4. Updated (written) by CPU again
3012 * 5. Read by GPU
3013 *
3014 * 1. Allocated
3015 * (CPU, CPU)
3016 * 2. Written by CPU
3017 * (CPU, CPU)
3018 * 3. Read by GPU
3019 * (CPU+RENDER, 0)
3020 * flush_domains = CPU
3021 * invalidate_domains = RENDER
3022 * clflush (obj)
3023 * MI_FLUSH
3024 * drm_agp_chipset_flush
3025 * 4. Updated (written) by CPU again
3026 * (CPU, CPU)
3027 * flush_domains = 0 (no previous write domain)
3028 * invalidate_domains = 0 (no new read domains)
3029 * 5. Read by GPU
3030 * (CPU+RENDER, 0)
3031 * flush_domains = CPU
3032 * invalidate_domains = RENDER
3033 * clflush (obj)
3034 * MI_FLUSH
3035 * drm_agp_chipset_flush
3036 */
c0d90829 3037static void
8b0e378a 3038i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
673a394b
EA
3039{
3040 struct drm_device *dev = obj->dev;
3041 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3042 uint32_t invalidate_domains = 0;
3043 uint32_t flush_domains = 0;
1c5d22f7 3044 uint32_t old_read_domains;
e47c68e9 3045
8b0e378a
EA
3046 BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
3047 BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
673a394b 3048
652c393a
JB
3049 intel_mark_busy(dev, obj);
3050
673a394b
EA
3051#if WATCH_BUF
3052 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
3053 __func__, obj,
8b0e378a
EA
3054 obj->read_domains, obj->pending_read_domains,
3055 obj->write_domain, obj->pending_write_domain);
673a394b
EA
3056#endif
3057 /*
3058 * If the object isn't moving to a new write domain,
3059 * let the object stay in multiple read domains
3060 */
8b0e378a
EA
3061 if (obj->pending_write_domain == 0)
3062 obj->pending_read_domains |= obj->read_domains;
673a394b
EA
3063 else
3064 obj_priv->dirty = 1;
3065
3066 /*
3067 * Flush the current write domain if
3068 * the new read domains don't match. Invalidate
3069 * any read domains which differ from the old
3070 * write domain
3071 */
8b0e378a
EA
3072 if (obj->write_domain &&
3073 obj->write_domain != obj->pending_read_domains) {
673a394b 3074 flush_domains |= obj->write_domain;
8b0e378a
EA
3075 invalidate_domains |=
3076 obj->pending_read_domains & ~obj->write_domain;
673a394b
EA
3077 }
3078 /*
3079 * Invalidate any read caches which may have
3080 * stale data. That is, any new read domains.
3081 */
8b0e378a 3082 invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
673a394b
EA
3083 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
3084#if WATCH_BUF
3085 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
3086 __func__, flush_domains, invalidate_domains);
3087#endif
673a394b
EA
3088 i915_gem_clflush_object(obj);
3089 }
3090
1c5d22f7
CW
3091 old_read_domains = obj->read_domains;
3092
efbeed96
EA
3093 /* The actual obj->write_domain will be updated with
3094 * pending_write_domain after we emit the accumulated flush for all
3095 * of our domain changes in execbuffers (which clears objects'
3096 * write_domains). So if we have a current write domain that we
3097 * aren't changing, set pending_write_domain to that.
3098 */
3099 if (flush_domains == 0 && obj->pending_write_domain == 0)
3100 obj->pending_write_domain = obj->write_domain;
8b0e378a 3101 obj->read_domains = obj->pending_read_domains;
673a394b
EA
3102
3103 dev->invalidate_domains |= invalidate_domains;
3104 dev->flush_domains |= flush_domains;
3105#if WATCH_BUF
3106 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
3107 __func__,
3108 obj->read_domains, obj->write_domain,
3109 dev->invalidate_domains, dev->flush_domains);
3110#endif
1c5d22f7
CW
3111
3112 trace_i915_gem_object_change_domain(obj,
3113 old_read_domains,
3114 obj->write_domain);
673a394b
EA
3115}
3116
3117/**
e47c68e9 3118 * Moves the object from a partially CPU read to a full one.
673a394b 3119 *
e47c68e9
EA
3120 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
3121 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
673a394b 3122 */
e47c68e9
EA
3123static void
3124i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
673a394b
EA
3125{
3126 struct drm_i915_gem_object *obj_priv = obj->driver_private;
673a394b 3127
e47c68e9
EA
3128 if (!obj_priv->page_cpu_valid)
3129 return;
3130
3131 /* If we're partially in the CPU read domain, finish moving it in.
3132 */
3133 if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
3134 int i;
3135
3136 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
3137 if (obj_priv->page_cpu_valid[i])
3138 continue;
856fa198 3139 drm_clflush_pages(obj_priv->pages + i, 1);
e47c68e9 3140 }
e47c68e9
EA
3141 }
3142
3143 /* Free the page_cpu_valid mappings which are now stale, whether
3144 * or not we've got I915_GEM_DOMAIN_CPU.
3145 */
9a298b2a 3146 kfree(obj_priv->page_cpu_valid);
e47c68e9
EA
3147 obj_priv->page_cpu_valid = NULL;
3148}
3149
3150/**
3151 * Set the CPU read domain on a range of the object.
3152 *
3153 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
3154 * not entirely valid. The page_cpu_valid member of the object flags which
3155 * pages have been flushed, and will be respected by
3156 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
3157 * of the whole object.
3158 *
3159 * This function returns when the move is complete, including waiting on
3160 * flushes to occur.
3161 */
3162static int
3163i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3164 uint64_t offset, uint64_t size)
3165{
3166 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1c5d22f7 3167 uint32_t old_read_domains;
e47c68e9 3168 int i, ret;
673a394b 3169
e47c68e9
EA
3170 if (offset == 0 && size == obj->size)
3171 return i915_gem_object_set_to_cpu_domain(obj, 0);
673a394b 3172
e47c68e9
EA
3173 i915_gem_object_flush_gpu_write_domain(obj);
3174 /* Wait on any GPU rendering and flushing to occur. */
6a47baa6 3175 ret = i915_gem_object_wait_rendering(obj);
e47c68e9 3176 if (ret != 0)
6a47baa6 3177 return ret;
e47c68e9
EA
3178 i915_gem_object_flush_gtt_write_domain(obj);
3179
3180 /* If we're already fully in the CPU read domain, we're done. */
3181 if (obj_priv->page_cpu_valid == NULL &&
3182 (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
3183 return 0;
673a394b 3184
e47c68e9
EA
3185 /* Otherwise, create/clear the per-page CPU read domain flag if we're
3186 * newly adding I915_GEM_DOMAIN_CPU
3187 */
673a394b 3188 if (obj_priv->page_cpu_valid == NULL) {
9a298b2a
EA
3189 obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
3190 GFP_KERNEL);
e47c68e9
EA
3191 if (obj_priv->page_cpu_valid == NULL)
3192 return -ENOMEM;
3193 } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
3194 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
673a394b
EA
3195
3196 /* Flush the cache on any pages that are still invalid from the CPU's
3197 * perspective.
3198 */
e47c68e9
EA
3199 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
3200 i++) {
673a394b
EA
3201 if (obj_priv->page_cpu_valid[i])
3202 continue;
3203
856fa198 3204 drm_clflush_pages(obj_priv->pages + i, 1);
673a394b
EA
3205
3206 obj_priv->page_cpu_valid[i] = 1;
3207 }
3208
e47c68e9
EA
3209 /* It should now be out of any other write domains, and we can update
3210 * the domain values for our changes.
3211 */
3212 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3213
1c5d22f7 3214 old_read_domains = obj->read_domains;
e47c68e9
EA
3215 obj->read_domains |= I915_GEM_DOMAIN_CPU;
3216
1c5d22f7
CW
3217 trace_i915_gem_object_change_domain(obj,
3218 old_read_domains,
3219 obj->write_domain);
3220
673a394b
EA
3221 return 0;
3222}
3223
673a394b
EA
3224/**
3225 * Pin an object to the GTT and evaluate the relocations landing in it.
3226 */
3227static int
3228i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3229 struct drm_file *file_priv,
76446cac 3230 struct drm_i915_gem_exec_object2 *entry,
40a5f0de 3231 struct drm_i915_gem_relocation_entry *relocs)
673a394b
EA
3232{
3233 struct drm_device *dev = obj->dev;
0839ccb8 3234 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b
EA
3235 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3236 int i, ret;
0839ccb8 3237 void __iomem *reloc_page;
76446cac
JB
3238 bool need_fence;
3239
3240 need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
3241 obj_priv->tiling_mode != I915_TILING_NONE;
3242
3243 /* Check fence reg constraints and rebind if necessary */
3244 if (need_fence && !i915_obj_fenceable(dev, obj))
3245 i915_gem_object_unbind(obj);
673a394b
EA
3246
3247 /* Choose the GTT offset for our buffer and put it there. */
3248 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
3249 if (ret)
3250 return ret;
3251
76446cac
JB
3252 /*
3253 * Pre-965 chips need a fence register set up in order to
3254 * properly handle blits to/from tiled surfaces.
3255 */
3256 if (need_fence) {
3257 ret = i915_gem_object_get_fence_reg(obj);
3258 if (ret != 0) {
3259 if (ret != -EBUSY && ret != -ERESTARTSYS)
3260 DRM_ERROR("Failure to install fence: %d\n",
3261 ret);
3262 i915_gem_object_unpin(obj);
3263 return ret;
3264 }
3265 }
3266
673a394b
EA
3267 entry->offset = obj_priv->gtt_offset;
3268
673a394b
EA
3269 /* Apply the relocations, using the GTT aperture to avoid cache
3270 * flushing requirements.
3271 */
3272 for (i = 0; i < entry->relocation_count; i++) {
40a5f0de 3273 struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
673a394b
EA
3274 struct drm_gem_object *target_obj;
3275 struct drm_i915_gem_object *target_obj_priv;
3043c60c
EA
3276 uint32_t reloc_val, reloc_offset;
3277 uint32_t __iomem *reloc_entry;
673a394b 3278
673a394b 3279 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
40a5f0de 3280 reloc->target_handle);
673a394b
EA
3281 if (target_obj == NULL) {
3282 i915_gem_object_unpin(obj);
3283 return -EBADF;
3284 }
3285 target_obj_priv = target_obj->driver_private;
3286
8542a0bb
CW
3287#if WATCH_RELOC
3288 DRM_INFO("%s: obj %p offset %08x target %d "
3289 "read %08x write %08x gtt %08x "
3290 "presumed %08x delta %08x\n",
3291 __func__,
3292 obj,
3293 (int) reloc->offset,
3294 (int) reloc->target_handle,
3295 (int) reloc->read_domains,
3296 (int) reloc->write_domain,
3297 (int) target_obj_priv->gtt_offset,
3298 (int) reloc->presumed_offset,
3299 reloc->delta);
3300#endif
3301
673a394b
EA
3302 /* The target buffer should have appeared before us in the
3303 * exec_object list, so it should have a GTT space bound by now.
3304 */
3305 if (target_obj_priv->gtt_space == NULL) {
3306 DRM_ERROR("No GTT space found for object %d\n",
40a5f0de 3307 reloc->target_handle);
673a394b
EA
3308 drm_gem_object_unreference(target_obj);
3309 i915_gem_object_unpin(obj);
3310 return -EINVAL;
3311 }
3312
8542a0bb 3313 /* Validate that the target is in a valid r/w GPU domain */
40a5f0de
EA
3314 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
3315 reloc->read_domains & I915_GEM_DOMAIN_CPU) {
e47c68e9
EA
3316 DRM_ERROR("reloc with read/write CPU domains: "
3317 "obj %p target %d offset %d "
3318 "read %08x write %08x",
40a5f0de
EA
3319 obj, reloc->target_handle,
3320 (int) reloc->offset,
3321 reloc->read_domains,
3322 reloc->write_domain);
491152b8
CW
3323 drm_gem_object_unreference(target_obj);
3324 i915_gem_object_unpin(obj);
e47c68e9
EA
3325 return -EINVAL;
3326 }
40a5f0de
EA
3327 if (reloc->write_domain && target_obj->pending_write_domain &&
3328 reloc->write_domain != target_obj->pending_write_domain) {
673a394b
EA
3329 DRM_ERROR("Write domain conflict: "
3330 "obj %p target %d offset %d "
3331 "new %08x old %08x\n",
40a5f0de
EA
3332 obj, reloc->target_handle,
3333 (int) reloc->offset,
3334 reloc->write_domain,
673a394b
EA
3335 target_obj->pending_write_domain);
3336 drm_gem_object_unreference(target_obj);
3337 i915_gem_object_unpin(obj);
3338 return -EINVAL;
3339 }
3340
40a5f0de
EA
3341 target_obj->pending_read_domains |= reloc->read_domains;
3342 target_obj->pending_write_domain |= reloc->write_domain;
673a394b
EA
3343
3344 /* If the relocation already has the right value in it, no
3345 * more work needs to be done.
3346 */
40a5f0de 3347 if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
673a394b
EA
3348 drm_gem_object_unreference(target_obj);
3349 continue;
3350 }
3351
8542a0bb
CW
3352 /* Check that the relocation address is valid... */
3353 if (reloc->offset > obj->size - 4) {
3354 DRM_ERROR("Relocation beyond object bounds: "
3355 "obj %p target %d offset %d size %d.\n",
3356 obj, reloc->target_handle,
3357 (int) reloc->offset, (int) obj->size);
3358 drm_gem_object_unreference(target_obj);
3359 i915_gem_object_unpin(obj);
3360 return -EINVAL;
3361 }
3362 if (reloc->offset & 3) {
3363 DRM_ERROR("Relocation not 4-byte aligned: "
3364 "obj %p target %d offset %d.\n",
3365 obj, reloc->target_handle,
3366 (int) reloc->offset);
3367 drm_gem_object_unreference(target_obj);
3368 i915_gem_object_unpin(obj);
3369 return -EINVAL;
3370 }
3371
3372 /* and points to somewhere within the target object. */
3373 if (reloc->delta >= target_obj->size) {
3374 DRM_ERROR("Relocation beyond target object bounds: "
3375 "obj %p target %d delta %d size %d.\n",
3376 obj, reloc->target_handle,
3377 (int) reloc->delta, (int) target_obj->size);
3378 drm_gem_object_unreference(target_obj);
3379 i915_gem_object_unpin(obj);
3380 return -EINVAL;
3381 }
3382
2ef7eeaa
EA
3383 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
3384 if (ret != 0) {
3385 drm_gem_object_unreference(target_obj);
3386 i915_gem_object_unpin(obj);
3387 return -EINVAL;
673a394b
EA
3388 }
3389
3390 /* Map the page containing the relocation we're going to
3391 * perform.
3392 */
40a5f0de 3393 reloc_offset = obj_priv->gtt_offset + reloc->offset;
0839ccb8
KP
3394 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
3395 (reloc_offset &
3396 ~(PAGE_SIZE - 1)));
3043c60c 3397 reloc_entry = (uint32_t __iomem *)(reloc_page +
0839ccb8 3398 (reloc_offset & (PAGE_SIZE - 1)));
40a5f0de 3399 reloc_val = target_obj_priv->gtt_offset + reloc->delta;
673a394b
EA
3400
3401#if WATCH_BUF
3402 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
40a5f0de 3403 obj, (unsigned int) reloc->offset,
673a394b
EA
3404 readl(reloc_entry), reloc_val);
3405#endif
3406 writel(reloc_val, reloc_entry);
0839ccb8 3407 io_mapping_unmap_atomic(reloc_page);
673a394b 3408
40a5f0de
EA
3409 /* The updated presumed offset for this entry will be
3410 * copied back out to the user.
673a394b 3411 */
40a5f0de 3412 reloc->presumed_offset = target_obj_priv->gtt_offset;
673a394b
EA
3413
3414 drm_gem_object_unreference(target_obj);
3415 }
3416
673a394b
EA
3417#if WATCH_BUF
3418 if (0)
3419 i915_gem_dump_object(obj, 128, __func__, ~0);
3420#endif
3421 return 0;
3422}
3423
3424/** Dispatch a batchbuffer to the ring
3425 */
3426static int
3427i915_dispatch_gem_execbuffer(struct drm_device *dev,
76446cac 3428 struct drm_i915_gem_execbuffer2 *exec,
201361a5 3429 struct drm_clip_rect *cliprects,
673a394b
EA
3430 uint64_t exec_offset)
3431{
3432 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b
EA
3433 int nbox = exec->num_cliprects;
3434 int i = 0, count;
83d60795 3435 uint32_t exec_start, exec_len;
673a394b
EA
3436 RING_LOCALS;
3437
3438 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3439 exec_len = (uint32_t) exec->batch_len;
3440
8f0dc5bf 3441 trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
1c5d22f7 3442
673a394b
EA
3443 count = nbox ? nbox : 1;
3444
3445 for (i = 0; i < count; i++) {
3446 if (i < nbox) {
201361a5 3447 int ret = i915_emit_box(dev, cliprects, i,
673a394b
EA
3448 exec->DR1, exec->DR4);
3449 if (ret)
3450 return ret;
3451 }
3452
3453 if (IS_I830(dev) || IS_845G(dev)) {
3454 BEGIN_LP_RING(4);
3455 OUT_RING(MI_BATCH_BUFFER);
3456 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
3457 OUT_RING(exec_start + exec_len - 4);
3458 OUT_RING(0);
3459 ADVANCE_LP_RING();
3460 } else {
3461 BEGIN_LP_RING(2);
3462 if (IS_I965G(dev)) {
3463 OUT_RING(MI_BATCH_BUFFER_START |
3464 (2 << 6) |
3465 MI_BATCH_NON_SECURE_I965);
3466 OUT_RING(exec_start);
3467 } else {
3468 OUT_RING(MI_BATCH_BUFFER_START |
3469 (2 << 6));
3470 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
3471 }
3472 ADVANCE_LP_RING();
3473 }
3474 }
3475
3476 /* XXX breadcrumb */
3477 return 0;
3478}
3479
3480/* Throttle our rendering by waiting until the ring has completed our requests
3481 * emitted over 20 msec ago.
3482 *
b962442e
EA
3483 * Note that if we were to use the current jiffies each time around the loop,
3484 * we wouldn't escape the function with any frames outstanding if the time to
3485 * render a frame was over 20ms.
3486 *
673a394b
EA
3487 * This should get us reasonable parallelism between CPU and GPU but also
3488 * relatively low latency when blocking on a particular request to finish.
3489 */
3490static int
3491i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
3492{
3493 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
3494 int ret = 0;
b962442e 3495 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
673a394b
EA
3496
3497 mutex_lock(&dev->struct_mutex);
b962442e
EA
3498 while (!list_empty(&i915_file_priv->mm.request_list)) {
3499 struct drm_i915_gem_request *request;
3500
3501 request = list_first_entry(&i915_file_priv->mm.request_list,
3502 struct drm_i915_gem_request,
3503 client_list);
3504
3505 if (time_after_eq(request->emitted_jiffies, recent_enough))
3506 break;
3507
3508 ret = i915_wait_request(dev, request->seqno);
3509 if (ret != 0)
3510 break;
3511 }
673a394b 3512 mutex_unlock(&dev->struct_mutex);
b962442e 3513
673a394b
EA
3514 return ret;
3515}
3516
40a5f0de 3517static int
76446cac 3518i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list,
40a5f0de
EA
3519 uint32_t buffer_count,
3520 struct drm_i915_gem_relocation_entry **relocs)
3521{
3522 uint32_t reloc_count = 0, reloc_index = 0, i;
3523 int ret;
3524
3525 *relocs = NULL;
3526 for (i = 0; i < buffer_count; i++) {
3527 if (reloc_count + exec_list[i].relocation_count < reloc_count)
3528 return -EINVAL;
3529 reloc_count += exec_list[i].relocation_count;
3530 }
3531
8e7d2b2c 3532 *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
76446cac
JB
3533 if (*relocs == NULL) {
3534 DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count);
40a5f0de 3535 return -ENOMEM;
76446cac 3536 }
40a5f0de
EA
3537
3538 for (i = 0; i < buffer_count; i++) {
3539 struct drm_i915_gem_relocation_entry __user *user_relocs;
3540
3541 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3542
3543 ret = copy_from_user(&(*relocs)[reloc_index],
3544 user_relocs,
3545 exec_list[i].relocation_count *
3546 sizeof(**relocs));
3547 if (ret != 0) {
8e7d2b2c 3548 drm_free_large(*relocs);
40a5f0de 3549 *relocs = NULL;
2bc43b5c 3550 return -EFAULT;
40a5f0de
EA
3551 }
3552
3553 reloc_index += exec_list[i].relocation_count;
3554 }
3555
2bc43b5c 3556 return 0;
40a5f0de
EA
3557}
3558
3559static int
76446cac 3560i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
40a5f0de
EA
3561 uint32_t buffer_count,
3562 struct drm_i915_gem_relocation_entry *relocs)
3563{
3564 uint32_t reloc_count = 0, i;
2bc43b5c 3565 int ret = 0;
40a5f0de 3566
93533c29
CW
3567 if (relocs == NULL)
3568 return 0;
3569
40a5f0de
EA
3570 for (i = 0; i < buffer_count; i++) {
3571 struct drm_i915_gem_relocation_entry __user *user_relocs;
2bc43b5c 3572 int unwritten;
40a5f0de
EA
3573
3574 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3575
2bc43b5c
FM
3576 unwritten = copy_to_user(user_relocs,
3577 &relocs[reloc_count],
3578 exec_list[i].relocation_count *
3579 sizeof(*relocs));
3580
3581 if (unwritten) {
3582 ret = -EFAULT;
3583 goto err;
40a5f0de
EA
3584 }
3585
3586 reloc_count += exec_list[i].relocation_count;
3587 }
3588
2bc43b5c 3589err:
8e7d2b2c 3590 drm_free_large(relocs);
40a5f0de
EA
3591
3592 return ret;
3593}
3594
83d60795 3595static int
76446cac 3596i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
83d60795
CW
3597 uint64_t exec_offset)
3598{
3599 uint32_t exec_start, exec_len;
3600
3601 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3602 exec_len = (uint32_t) exec->batch_len;
3603
3604 if ((exec_start | exec_len) & 0x7)
3605 return -EINVAL;
3606
3607 if (!exec_start)
3608 return -EINVAL;
3609
3610 return 0;
3611}
3612
6b95a207
KH
3613static int
3614i915_gem_wait_for_pending_flip(struct drm_device *dev,
3615 struct drm_gem_object **object_list,
3616 int count)
3617{
3618 drm_i915_private_t *dev_priv = dev->dev_private;
3619 struct drm_i915_gem_object *obj_priv;
3620 DEFINE_WAIT(wait);
3621 int i, ret = 0;
3622
3623 for (;;) {
3624 prepare_to_wait(&dev_priv->pending_flip_queue,
3625 &wait, TASK_INTERRUPTIBLE);
3626 for (i = 0; i < count; i++) {
3627 obj_priv = object_list[i]->driver_private;
3628 if (atomic_read(&obj_priv->pending_flip) > 0)
3629 break;
3630 }
3631 if (i == count)
3632 break;
3633
3634 if (!signal_pending(current)) {
3635 mutex_unlock(&dev->struct_mutex);
3636 schedule();
3637 mutex_lock(&dev->struct_mutex);
3638 continue;
3639 }
3640 ret = -ERESTARTSYS;
3641 break;
3642 }
3643 finish_wait(&dev_priv->pending_flip_queue, &wait);
3644
3645 return ret;
3646}
3647
673a394b 3648int
76446cac
JB
3649i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3650 struct drm_file *file_priv,
3651 struct drm_i915_gem_execbuffer2 *args,
3652 struct drm_i915_gem_exec_object2 *exec_list)
673a394b
EA
3653{
3654 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b
EA
3655 struct drm_gem_object **object_list = NULL;
3656 struct drm_gem_object *batch_obj;
b70d11da 3657 struct drm_i915_gem_object *obj_priv;
201361a5 3658 struct drm_clip_rect *cliprects = NULL;
93533c29 3659 struct drm_i915_gem_relocation_entry *relocs = NULL;
76446cac 3660 int ret = 0, ret2, i, pinned = 0;
673a394b 3661 uint64_t exec_offset;
40a5f0de 3662 uint32_t seqno, flush_domains, reloc_index;
6b95a207 3663 int pin_tries, flips;
673a394b
EA
3664
3665#if WATCH_EXEC
3666 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3667 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3668#endif
3669
4f481ed2
EA
3670 if (args->buffer_count < 1) {
3671 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3672 return -EINVAL;
3673 }
c8e0f93a 3674 object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
76446cac
JB
3675 if (object_list == NULL) {
3676 DRM_ERROR("Failed to allocate object list for %d buffers\n",
673a394b
EA
3677 args->buffer_count);
3678 ret = -ENOMEM;
3679 goto pre_mutex_err;
3680 }
673a394b 3681
201361a5 3682 if (args->num_cliprects != 0) {
9a298b2a
EA
3683 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
3684 GFP_KERNEL);
201361a5
EA
3685 if (cliprects == NULL)
3686 goto pre_mutex_err;
3687
3688 ret = copy_from_user(cliprects,
3689 (struct drm_clip_rect __user *)
3690 (uintptr_t) args->cliprects_ptr,
3691 sizeof(*cliprects) * args->num_cliprects);
3692 if (ret != 0) {
3693 DRM_ERROR("copy %d cliprects failed: %d\n",
3694 args->num_cliprects, ret);
3695 goto pre_mutex_err;
3696 }
3697 }
3698
40a5f0de
EA
3699 ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
3700 &relocs);
3701 if (ret != 0)
3702 goto pre_mutex_err;
3703
673a394b
EA
3704 mutex_lock(&dev->struct_mutex);
3705
3706 i915_verify_inactive(dev, __FILE__, __LINE__);
3707
ba1234d1 3708 if (atomic_read(&dev_priv->mm.wedged)) {
673a394b 3709 mutex_unlock(&dev->struct_mutex);
a198bc80
CW
3710 ret = -EIO;
3711 goto pre_mutex_err;
673a394b
EA
3712 }
3713
3714 if (dev_priv->mm.suspended) {
673a394b 3715 mutex_unlock(&dev->struct_mutex);
a198bc80
CW
3716 ret = -EBUSY;
3717 goto pre_mutex_err;
673a394b
EA
3718 }
3719
ac94a962 3720 /* Look up object handles */
6b95a207 3721 flips = 0;
673a394b
EA
3722 for (i = 0; i < args->buffer_count; i++) {
3723 object_list[i] = drm_gem_object_lookup(dev, file_priv,
3724 exec_list[i].handle);
3725 if (object_list[i] == NULL) {
3726 DRM_ERROR("Invalid object handle %d at index %d\n",
3727 exec_list[i].handle, i);
0ce907f8
CW
3728 /* prevent error path from reading uninitialized data */
3729 args->buffer_count = i + 1;
673a394b
EA
3730 ret = -EBADF;
3731 goto err;
3732 }
b70d11da
KH
3733
3734 obj_priv = object_list[i]->driver_private;
3735 if (obj_priv->in_execbuffer) {
3736 DRM_ERROR("Object %p appears more than once in object list\n",
3737 object_list[i]);
0ce907f8
CW
3738 /* prevent error path from reading uninitialized data */
3739 args->buffer_count = i + 1;
b70d11da
KH
3740 ret = -EBADF;
3741 goto err;
3742 }
3743 obj_priv->in_execbuffer = true;
6b95a207
KH
3744 flips += atomic_read(&obj_priv->pending_flip);
3745 }
3746
3747 if (flips > 0) {
3748 ret = i915_gem_wait_for_pending_flip(dev, object_list,
3749 args->buffer_count);
3750 if (ret)
3751 goto err;
ac94a962 3752 }
673a394b 3753
ac94a962
KP
3754 /* Pin and relocate */
3755 for (pin_tries = 0; ; pin_tries++) {
3756 ret = 0;
40a5f0de
EA
3757 reloc_index = 0;
3758
ac94a962
KP
3759 for (i = 0; i < args->buffer_count; i++) {
3760 object_list[i]->pending_read_domains = 0;
3761 object_list[i]->pending_write_domain = 0;
3762 ret = i915_gem_object_pin_and_relocate(object_list[i],
3763 file_priv,
40a5f0de
EA
3764 &exec_list[i],
3765 &relocs[reloc_index]);
ac94a962
KP
3766 if (ret)
3767 break;
3768 pinned = i + 1;
40a5f0de 3769 reloc_index += exec_list[i].relocation_count;
ac94a962
KP
3770 }
3771 /* success */
3772 if (ret == 0)
3773 break;
3774
3775 /* error other than GTT full, or we've already tried again */
2939e1f5 3776 if (ret != -ENOSPC || pin_tries >= 1) {
07f73f69
CW
3777 if (ret != -ERESTARTSYS) {
3778 unsigned long long total_size = 0;
3779 for (i = 0; i < args->buffer_count; i++)
3780 total_size += object_list[i]->size;
3781 DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes: %d\n",
3782 pinned+1, args->buffer_count,
3783 total_size, ret);
3784 DRM_ERROR("%d objects [%d pinned], "
3785 "%d object bytes [%d pinned], "
3786 "%d/%d gtt bytes\n",
3787 atomic_read(&dev->object_count),
3788 atomic_read(&dev->pin_count),
3789 atomic_read(&dev->object_memory),
3790 atomic_read(&dev->pin_memory),
3791 atomic_read(&dev->gtt_memory),
3792 dev->gtt_total);
3793 }
673a394b
EA
3794 goto err;
3795 }
ac94a962
KP
3796
3797 /* unpin all of our buffers */
3798 for (i = 0; i < pinned; i++)
3799 i915_gem_object_unpin(object_list[i]);
b1177636 3800 pinned = 0;
ac94a962
KP
3801
3802 /* evict everyone we can from the aperture */
3803 ret = i915_gem_evict_everything(dev);
07f73f69 3804 if (ret && ret != -ENOSPC)
ac94a962 3805 goto err;
673a394b
EA
3806 }
3807
3808 /* Set the pending read domains for the batch buffer to COMMAND */
3809 batch_obj = object_list[args->buffer_count-1];
5f26a2c7
CW
3810 if (batch_obj->pending_write_domain) {
3811 DRM_ERROR("Attempting to use self-modifying batch buffer\n");
3812 ret = -EINVAL;
3813 goto err;
3814 }
3815 batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
673a394b 3816
83d60795
CW
3817 /* Sanity check the batch buffer, prior to moving objects */
3818 exec_offset = exec_list[args->buffer_count - 1].offset;
3819 ret = i915_gem_check_execbuffer (args, exec_offset);
3820 if (ret != 0) {
3821 DRM_ERROR("execbuf with invalid offset/length\n");
3822 goto err;
3823 }
3824
673a394b
EA
3825 i915_verify_inactive(dev, __FILE__, __LINE__);
3826
646f0f6e
KP
3827 /* Zero the global flush/invalidate flags. These
3828 * will be modified as new domains are computed
3829 * for each object
3830 */
3831 dev->invalidate_domains = 0;
3832 dev->flush_domains = 0;
3833
673a394b
EA
3834 for (i = 0; i < args->buffer_count; i++) {
3835 struct drm_gem_object *obj = object_list[i];
673a394b 3836
646f0f6e 3837 /* Compute new gpu domains and update invalidate/flush */
8b0e378a 3838 i915_gem_object_set_to_gpu_domain(obj);
673a394b
EA
3839 }
3840
3841 i915_verify_inactive(dev, __FILE__, __LINE__);
3842
646f0f6e
KP
3843 if (dev->invalidate_domains | dev->flush_domains) {
3844#if WATCH_EXEC
3845 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3846 __func__,
3847 dev->invalidate_domains,
3848 dev->flush_domains);
3849#endif
3850 i915_gem_flush(dev,
3851 dev->invalidate_domains,
3852 dev->flush_domains);
3853 if (dev->flush_domains)
b962442e
EA
3854 (void)i915_add_request(dev, file_priv,
3855 dev->flush_domains);
646f0f6e 3856 }
673a394b 3857
efbeed96
EA
3858 for (i = 0; i < args->buffer_count; i++) {
3859 struct drm_gem_object *obj = object_list[i];
1c5d22f7 3860 uint32_t old_write_domain = obj->write_domain;
efbeed96
EA
3861
3862 obj->write_domain = obj->pending_write_domain;
1c5d22f7
CW
3863 trace_i915_gem_object_change_domain(obj,
3864 obj->read_domains,
3865 old_write_domain);
efbeed96
EA
3866 }
3867
673a394b
EA
3868 i915_verify_inactive(dev, __FILE__, __LINE__);
3869
3870#if WATCH_COHERENCY
3871 for (i = 0; i < args->buffer_count; i++) {
3872 i915_gem_object_check_coherency(object_list[i],
3873 exec_list[i].handle);
3874 }
3875#endif
3876
673a394b 3877#if WATCH_EXEC
6911a9b8 3878 i915_gem_dump_object(batch_obj,
673a394b
EA
3879 args->batch_len,
3880 __func__,
3881 ~0);
3882#endif
3883
673a394b 3884 /* Exec the batchbuffer */
201361a5 3885 ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
673a394b
EA
3886 if (ret) {
3887 DRM_ERROR("dispatch failed %d\n", ret);
3888 goto err;
3889 }
3890
3891 /*
3892 * Ensure that the commands in the batch buffer are
3893 * finished before the interrupt fires
3894 */
3895 flush_domains = i915_retire_commands(dev);
3896
3897 i915_verify_inactive(dev, __FILE__, __LINE__);
3898
3899 /*
3900 * Get a seqno representing the execution of the current buffer,
3901 * which we can wait on. We would like to mitigate these interrupts,
3902 * likely by only creating seqnos occasionally (so that we have
3903 * *some* interrupts representing completion of buffers that we can
3904 * wait on when trying to clear up gtt space).
3905 */
b962442e 3906 seqno = i915_add_request(dev, file_priv, flush_domains);
673a394b 3907 BUG_ON(seqno == 0);
673a394b
EA
3908 for (i = 0; i < args->buffer_count; i++) {
3909 struct drm_gem_object *obj = object_list[i];
673a394b 3910
ce44b0ea 3911 i915_gem_object_move_to_active(obj, seqno);
673a394b
EA
3912#if WATCH_LRU
3913 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
3914#endif
3915 }
3916#if WATCH_LRU
3917 i915_dump_lru(dev, __func__);
3918#endif
3919
3920 i915_verify_inactive(dev, __FILE__, __LINE__);
3921
673a394b 3922err:
aad87dff
JL
3923 for (i = 0; i < pinned; i++)
3924 i915_gem_object_unpin(object_list[i]);
3925
b70d11da
KH
3926 for (i = 0; i < args->buffer_count; i++) {
3927 if (object_list[i]) {
3928 obj_priv = object_list[i]->driver_private;
3929 obj_priv->in_execbuffer = false;
3930 }
aad87dff 3931 drm_gem_object_unreference(object_list[i]);
b70d11da 3932 }
673a394b 3933
673a394b
EA
3934 mutex_unlock(&dev->struct_mutex);
3935
93533c29 3936pre_mutex_err:
40a5f0de
EA
3937 /* Copy the updated relocations out regardless of current error
3938 * state. Failure to update the relocs would mean that the next
3939 * time userland calls execbuf, it would do so with presumed offset
3940 * state that didn't match the actual object state.
3941 */
3942 ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
3943 relocs);
3944 if (ret2 != 0) {
3945 DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
3946
3947 if (ret == 0)
3948 ret = ret2;
3949 }
3950
8e7d2b2c 3951 drm_free_large(object_list);
9a298b2a 3952 kfree(cliprects);
673a394b
EA
3953
3954 return ret;
3955}
3956
76446cac
JB
3957/*
3958 * Legacy execbuffer just creates an exec2 list from the original exec object
3959 * list array and passes it to the real function.
3960 */
3961int
3962i915_gem_execbuffer(struct drm_device *dev, void *data,
3963 struct drm_file *file_priv)
3964{
3965 struct drm_i915_gem_execbuffer *args = data;
3966 struct drm_i915_gem_execbuffer2 exec2;
3967 struct drm_i915_gem_exec_object *exec_list = NULL;
3968 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
3969 int ret, i;
3970
3971#if WATCH_EXEC
3972 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3973 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3974#endif
3975
3976 if (args->buffer_count < 1) {
3977 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3978 return -EINVAL;
3979 }
3980
3981 /* Copy in the exec list from userland */
3982 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
3983 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
3984 if (exec_list == NULL || exec2_list == NULL) {
3985 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
3986 args->buffer_count);
3987 drm_free_large(exec_list);
3988 drm_free_large(exec2_list);
3989 return -ENOMEM;
3990 }
3991 ret = copy_from_user(exec_list,
3992 (struct drm_i915_relocation_entry __user *)
3993 (uintptr_t) args->buffers_ptr,
3994 sizeof(*exec_list) * args->buffer_count);
3995 if (ret != 0) {
3996 DRM_ERROR("copy %d exec entries failed %d\n",
3997 args->buffer_count, ret);
3998 drm_free_large(exec_list);
3999 drm_free_large(exec2_list);
4000 return -EFAULT;
4001 }
4002
4003 for (i = 0; i < args->buffer_count; i++) {
4004 exec2_list[i].handle = exec_list[i].handle;
4005 exec2_list[i].relocation_count = exec_list[i].relocation_count;
4006 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
4007 exec2_list[i].alignment = exec_list[i].alignment;
4008 exec2_list[i].offset = exec_list[i].offset;
4009 if (!IS_I965G(dev))
4010 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
4011 else
4012 exec2_list[i].flags = 0;
4013 }
4014
4015 exec2.buffers_ptr = args->buffers_ptr;
4016 exec2.buffer_count = args->buffer_count;
4017 exec2.batch_start_offset = args->batch_start_offset;
4018 exec2.batch_len = args->batch_len;
4019 exec2.DR1 = args->DR1;
4020 exec2.DR4 = args->DR4;
4021 exec2.num_cliprects = args->num_cliprects;
4022 exec2.cliprects_ptr = args->cliprects_ptr;
4023 exec2.flags = 0;
4024
4025 ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
4026 if (!ret) {
4027 /* Copy the new buffer offsets back to the user's exec list. */
4028 for (i = 0; i < args->buffer_count; i++)
4029 exec_list[i].offset = exec2_list[i].offset;
4030 /* ... and back out to userspace */
4031 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
4032 (uintptr_t) args->buffers_ptr,
4033 exec_list,
4034 sizeof(*exec_list) * args->buffer_count);
4035 if (ret) {
4036 ret = -EFAULT;
4037 DRM_ERROR("failed to copy %d exec entries "
4038 "back to user (%d)\n",
4039 args->buffer_count, ret);
4040 }
76446cac
JB
4041 }
4042
4043 drm_free_large(exec_list);
4044 drm_free_large(exec2_list);
4045 return ret;
4046}
4047
4048int
4049i915_gem_execbuffer2(struct drm_device *dev, void *data,
4050 struct drm_file *file_priv)
4051{
4052 struct drm_i915_gem_execbuffer2 *args = data;
4053 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
4054 int ret;
4055
4056#if WATCH_EXEC
4057 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
4058 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
4059#endif
4060
4061 if (args->buffer_count < 1) {
4062 DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
4063 return -EINVAL;
4064 }
4065
4066 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
4067 if (exec2_list == NULL) {
4068 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
4069 args->buffer_count);
4070 return -ENOMEM;
4071 }
4072 ret = copy_from_user(exec2_list,
4073 (struct drm_i915_relocation_entry __user *)
4074 (uintptr_t) args->buffers_ptr,
4075 sizeof(*exec2_list) * args->buffer_count);
4076 if (ret != 0) {
4077 DRM_ERROR("copy %d exec entries failed %d\n",
4078 args->buffer_count, ret);
4079 drm_free_large(exec2_list);
4080 return -EFAULT;
4081 }
4082
4083 ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
4084 if (!ret) {
4085 /* Copy the new buffer offsets back to the user's exec list. */
4086 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
4087 (uintptr_t) args->buffers_ptr,
4088 exec2_list,
4089 sizeof(*exec2_list) * args->buffer_count);
4090 if (ret) {
4091 ret = -EFAULT;
4092 DRM_ERROR("failed to copy %d exec entries "
4093 "back to user (%d)\n",
4094 args->buffer_count, ret);
4095 }
4096 }
4097
4098 drm_free_large(exec2_list);
4099 return ret;
4100}
4101
673a394b
EA
4102int
4103i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
4104{
4105 struct drm_device *dev = obj->dev;
4106 struct drm_i915_gem_object *obj_priv = obj->driver_private;
4107 int ret;
4108
4109 i915_verify_inactive(dev, __FILE__, __LINE__);
4110 if (obj_priv->gtt_space == NULL) {
4111 ret = i915_gem_object_bind_to_gtt(obj, alignment);
9731129c 4112 if (ret)
673a394b 4113 return ret;
22c344e9 4114 }
76446cac 4115
673a394b
EA
4116 obj_priv->pin_count++;
4117
4118 /* If the object is not active and not pending a flush,
4119 * remove it from the inactive list
4120 */
4121 if (obj_priv->pin_count == 1) {
4122 atomic_inc(&dev->pin_count);
4123 atomic_add(obj->size, &dev->pin_memory);
4124 if (!obj_priv->active &&
21d509e3 4125 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
673a394b
EA
4126 !list_empty(&obj_priv->list))
4127 list_del_init(&obj_priv->list);
4128 }
4129 i915_verify_inactive(dev, __FILE__, __LINE__);
4130
4131 return 0;
4132}
4133
4134void
4135i915_gem_object_unpin(struct drm_gem_object *obj)
4136{
4137 struct drm_device *dev = obj->dev;
4138 drm_i915_private_t *dev_priv = dev->dev_private;
4139 struct drm_i915_gem_object *obj_priv = obj->driver_private;
4140
4141 i915_verify_inactive(dev, __FILE__, __LINE__);
4142 obj_priv->pin_count--;
4143 BUG_ON(obj_priv->pin_count < 0);
4144 BUG_ON(obj_priv->gtt_space == NULL);
4145
4146 /* If the object is no longer pinned, and is
4147 * neither active nor being flushed, then stick it on
4148 * the inactive list
4149 */
4150 if (obj_priv->pin_count == 0) {
4151 if (!obj_priv->active &&
21d509e3 4152 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
673a394b
EA
4153 list_move_tail(&obj_priv->list,
4154 &dev_priv->mm.inactive_list);
4155 atomic_dec(&dev->pin_count);
4156 atomic_sub(obj->size, &dev->pin_memory);
4157 }
4158 i915_verify_inactive(dev, __FILE__, __LINE__);
4159}
4160
4161int
4162i915_gem_pin_ioctl(struct drm_device *dev, void *data,
4163 struct drm_file *file_priv)
4164{
4165 struct drm_i915_gem_pin *args = data;
4166 struct drm_gem_object *obj;
4167 struct drm_i915_gem_object *obj_priv;
4168 int ret;
4169
4170 mutex_lock(&dev->struct_mutex);
4171
4172 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4173 if (obj == NULL) {
4174 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
4175 args->handle);
4176 mutex_unlock(&dev->struct_mutex);
4177 return -EBADF;
4178 }
4179 obj_priv = obj->driver_private;
4180
bb6baf76
CW
4181 if (obj_priv->madv != I915_MADV_WILLNEED) {
4182 DRM_ERROR("Attempting to pin a purgeable buffer\n");
3ef94daa
CW
4183 drm_gem_object_unreference(obj);
4184 mutex_unlock(&dev->struct_mutex);
4185 return -EINVAL;
4186 }
4187
79e53945
JB
4188 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
4189 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
4190 args->handle);
96dec61d 4191 drm_gem_object_unreference(obj);
673a394b 4192 mutex_unlock(&dev->struct_mutex);
79e53945
JB
4193 return -EINVAL;
4194 }
4195
4196 obj_priv->user_pin_count++;
4197 obj_priv->pin_filp = file_priv;
4198 if (obj_priv->user_pin_count == 1) {
4199 ret = i915_gem_object_pin(obj, args->alignment);
4200 if (ret != 0) {
4201 drm_gem_object_unreference(obj);
4202 mutex_unlock(&dev->struct_mutex);
4203 return ret;
4204 }
673a394b
EA
4205 }
4206
4207 /* XXX - flush the CPU caches for pinned objects
4208 * as the X server doesn't manage domains yet
4209 */
e47c68e9 4210 i915_gem_object_flush_cpu_write_domain(obj);
673a394b
EA
4211 args->offset = obj_priv->gtt_offset;
4212 drm_gem_object_unreference(obj);
4213 mutex_unlock(&dev->struct_mutex);
4214
4215 return 0;
4216}
4217
4218int
4219i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
4220 struct drm_file *file_priv)
4221{
4222 struct drm_i915_gem_pin *args = data;
4223 struct drm_gem_object *obj;
79e53945 4224 struct drm_i915_gem_object *obj_priv;
673a394b
EA
4225
4226 mutex_lock(&dev->struct_mutex);
4227
4228 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4229 if (obj == NULL) {
4230 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
4231 args->handle);
4232 mutex_unlock(&dev->struct_mutex);
4233 return -EBADF;
4234 }
4235
79e53945
JB
4236 obj_priv = obj->driver_private;
4237 if (obj_priv->pin_filp != file_priv) {
4238 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
4239 args->handle);
4240 drm_gem_object_unreference(obj);
4241 mutex_unlock(&dev->struct_mutex);
4242 return -EINVAL;
4243 }
4244 obj_priv->user_pin_count--;
4245 if (obj_priv->user_pin_count == 0) {
4246 obj_priv->pin_filp = NULL;
4247 i915_gem_object_unpin(obj);
4248 }
673a394b
EA
4249
4250 drm_gem_object_unreference(obj);
4251 mutex_unlock(&dev->struct_mutex);
4252 return 0;
4253}
4254
4255int
4256i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4257 struct drm_file *file_priv)
4258{
4259 struct drm_i915_gem_busy *args = data;
4260 struct drm_gem_object *obj;
4261 struct drm_i915_gem_object *obj_priv;
4262
673a394b
EA
4263 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4264 if (obj == NULL) {
4265 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
4266 args->handle);
673a394b
EA
4267 return -EBADF;
4268 }
4269
b1ce786c 4270 mutex_lock(&dev->struct_mutex);
f21289b3
EA
4271 /* Update the active list for the hardware's current position.
4272 * Otherwise this only updates on a delayed timer or when irqs are
4273 * actually unmasked, and our working set ends up being larger than
4274 * required.
4275 */
4276 i915_gem_retire_requests(dev);
4277
673a394b 4278 obj_priv = obj->driver_private;
c4de0a5d
EA
4279 /* Don't count being on the flushing list against the object being
4280 * done. Otherwise, a buffer left on the flushing list but not getting
4281 * flushed (because nobody's flushing that domain) won't ever return
4282 * unbusy and get reused by libdrm's bo cache. The other expected
4283 * consumer of this interface, OpenGL's occlusion queries, also specs
4284 * that the objects get unbusy "eventually" without any interference.
4285 */
4286 args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
673a394b
EA
4287
4288 drm_gem_object_unreference(obj);
4289 mutex_unlock(&dev->struct_mutex);
4290 return 0;
4291}
4292
4293int
4294i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4295 struct drm_file *file_priv)
4296{
4297 return i915_gem_ring_throttle(dev, file_priv);
4298}
4299
3ef94daa
CW
4300int
4301i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4302 struct drm_file *file_priv)
4303{
4304 struct drm_i915_gem_madvise *args = data;
4305 struct drm_gem_object *obj;
4306 struct drm_i915_gem_object *obj_priv;
4307
4308 switch (args->madv) {
4309 case I915_MADV_DONTNEED:
4310 case I915_MADV_WILLNEED:
4311 break;
4312 default:
4313 return -EINVAL;
4314 }
4315
4316 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4317 if (obj == NULL) {
4318 DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
4319 args->handle);
4320 return -EBADF;
4321 }
4322
4323 mutex_lock(&dev->struct_mutex);
4324 obj_priv = obj->driver_private;
4325
4326 if (obj_priv->pin_count) {
4327 drm_gem_object_unreference(obj);
4328 mutex_unlock(&dev->struct_mutex);
4329
4330 DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
4331 return -EINVAL;
4332 }
4333
bb6baf76
CW
4334 if (obj_priv->madv != __I915_MADV_PURGED)
4335 obj_priv->madv = args->madv;
3ef94daa 4336
2d7ef395
CW
4337 /* if the object is no longer bound, discard its backing storage */
4338 if (i915_gem_object_is_purgeable(obj_priv) &&
4339 obj_priv->gtt_space == NULL)
4340 i915_gem_object_truncate(obj);
4341
bb6baf76
CW
4342 args->retained = obj_priv->madv != __I915_MADV_PURGED;
4343
3ef94daa
CW
4344 drm_gem_object_unreference(obj);
4345 mutex_unlock(&dev->struct_mutex);
4346
4347 return 0;
4348}
4349
673a394b
EA
4350int i915_gem_init_object(struct drm_gem_object *obj)
4351{
4352 struct drm_i915_gem_object *obj_priv;
4353
9a298b2a 4354 obj_priv = kzalloc(sizeof(*obj_priv), GFP_KERNEL);
673a394b
EA
4355 if (obj_priv == NULL)
4356 return -ENOMEM;
4357
4358 /*
4359 * We've just allocated pages from the kernel,
4360 * so they've just been written by the CPU with
4361 * zeros. They'll need to be clflushed before we
4362 * use them with the GPU.
4363 */
4364 obj->write_domain = I915_GEM_DOMAIN_CPU;
4365 obj->read_domains = I915_GEM_DOMAIN_CPU;
4366
ba1eb1d8
KP
4367 obj_priv->agp_type = AGP_USER_MEMORY;
4368
673a394b
EA
4369 obj->driver_private = obj_priv;
4370 obj_priv->obj = obj;
de151cf6 4371 obj_priv->fence_reg = I915_FENCE_REG_NONE;
673a394b 4372 INIT_LIST_HEAD(&obj_priv->list);
a09ba7fa 4373 INIT_LIST_HEAD(&obj_priv->fence_list);
3ef94daa 4374 obj_priv->madv = I915_MADV_WILLNEED;
de151cf6 4375
1c5d22f7 4376 trace_i915_gem_object_create(obj);
de151cf6 4377
673a394b
EA
4378 return 0;
4379}
4380
4381void i915_gem_free_object(struct drm_gem_object *obj)
4382{
de151cf6 4383 struct drm_device *dev = obj->dev;
673a394b
EA
4384 struct drm_i915_gem_object *obj_priv = obj->driver_private;
4385
1c5d22f7
CW
4386 trace_i915_gem_object_destroy(obj);
4387
673a394b
EA
4388 while (obj_priv->pin_count > 0)
4389 i915_gem_object_unpin(obj);
4390
71acb5eb
DA
4391 if (obj_priv->phys_obj)
4392 i915_gem_detach_phys_object(dev, obj);
4393
673a394b
EA
4394 i915_gem_object_unbind(obj);
4395
7e616158
CW
4396 if (obj_priv->mmap_offset)
4397 i915_gem_free_mmap_offset(obj);
de151cf6 4398
9a298b2a 4399 kfree(obj_priv->page_cpu_valid);
280b713b 4400 kfree(obj_priv->bit_17);
9a298b2a 4401 kfree(obj->driver_private);
673a394b
EA
4402}
4403
ab5ee576 4404/** Unbinds all inactive objects. */
673a394b 4405static int
ab5ee576 4406i915_gem_evict_from_inactive_list(struct drm_device *dev)
673a394b 4407{
ab5ee576 4408 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b 4409
ab5ee576
CW
4410 while (!list_empty(&dev_priv->mm.inactive_list)) {
4411 struct drm_gem_object *obj;
4412 int ret;
673a394b 4413
ab5ee576
CW
4414 obj = list_first_entry(&dev_priv->mm.inactive_list,
4415 struct drm_i915_gem_object,
4416 list)->obj;
673a394b
EA
4417
4418 ret = i915_gem_object_unbind(obj);
4419 if (ret != 0) {
ab5ee576 4420 DRM_ERROR("Error unbinding object: %d\n", ret);
673a394b
EA
4421 return ret;
4422 }
4423 }
4424
673a394b
EA
4425 return 0;
4426}
4427
5669fcac 4428int
673a394b
EA
4429i915_gem_idle(struct drm_device *dev)
4430{
4431 drm_i915_private_t *dev_priv = dev->dev_private;
4432 uint32_t seqno, cur_seqno, last_seqno;
4433 int stuck, ret;
4434
6dbe2772
KP
4435 mutex_lock(&dev->struct_mutex);
4436
4437 if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
4438 mutex_unlock(&dev->struct_mutex);
673a394b 4439 return 0;
6dbe2772 4440 }
673a394b
EA
4441
4442 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4443 * We need to replace this with a semaphore, or something.
4444 */
4445 dev_priv->mm.suspended = 1;
f65d9421 4446 del_timer(&dev_priv->hangcheck_timer);
673a394b 4447
6dbe2772
KP
4448 /* Cancel the retire work handler, wait for it to finish if running
4449 */
4450 mutex_unlock(&dev->struct_mutex);
4451 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4452 mutex_lock(&dev->struct_mutex);
4453
673a394b
EA
4454 i915_kernel_lost_context(dev);
4455
4456 /* Flush the GPU along with all non-CPU write domains
4457 */
21d509e3
CW
4458 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
4459 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
673a394b
EA
4460
4461 if (seqno == 0) {
4462 mutex_unlock(&dev->struct_mutex);
4463 return -ENOMEM;
4464 }
4465
4466 dev_priv->mm.waiting_gem_seqno = seqno;
4467 last_seqno = 0;
4468 stuck = 0;
4469 for (;;) {
4470 cur_seqno = i915_get_gem_seqno(dev);
4471 if (i915_seqno_passed(cur_seqno, seqno))
4472 break;
4473 if (last_seqno == cur_seqno) {
4474 if (stuck++ > 100) {
4475 DRM_ERROR("hardware wedged\n");
ba1234d1 4476 atomic_set(&dev_priv->mm.wedged, 1);
673a394b
EA
4477 DRM_WAKEUP(&dev_priv->irq_queue);
4478 break;
4479 }
4480 }
4481 msleep(10);
4482 last_seqno = cur_seqno;
4483 }
4484 dev_priv->mm.waiting_gem_seqno = 0;
4485
4486 i915_gem_retire_requests(dev);
4487
5e118f41 4488 spin_lock(&dev_priv->mm.active_list_lock);
ba1234d1 4489 if (!atomic_read(&dev_priv->mm.wedged)) {
28dfe52a
EA
4490 /* Active and flushing should now be empty as we've
4491 * waited for a sequence higher than any pending execbuffer
4492 */
4493 WARN_ON(!list_empty(&dev_priv->mm.active_list));
4494 WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
4495 /* Request should now be empty as we've also waited
4496 * for the last request in the list
4497 */
4498 WARN_ON(!list_empty(&dev_priv->mm.request_list));
4499 }
673a394b 4500
28dfe52a
EA
4501 /* Empty the active and flushing lists to inactive. If there's
4502 * anything left at this point, it means that we're wedged and
4503 * nothing good's going to happen by leaving them there. So strip
4504 * the GPU domains and just stuff them onto inactive.
673a394b 4505 */
28dfe52a 4506 while (!list_empty(&dev_priv->mm.active_list)) {
1c5d22f7
CW
4507 struct drm_gem_object *obj;
4508 uint32_t old_write_domain;
673a394b 4509
1c5d22f7
CW
4510 obj = list_first_entry(&dev_priv->mm.active_list,
4511 struct drm_i915_gem_object,
4512 list)->obj;
4513 old_write_domain = obj->write_domain;
4514 obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
4515 i915_gem_object_move_to_inactive(obj);
4516
4517 trace_i915_gem_object_change_domain(obj,
4518 obj->read_domains,
4519 old_write_domain);
28dfe52a 4520 }
5e118f41 4521 spin_unlock(&dev_priv->mm.active_list_lock);
28dfe52a
EA
4522
4523 while (!list_empty(&dev_priv->mm.flushing_list)) {
1c5d22f7
CW
4524 struct drm_gem_object *obj;
4525 uint32_t old_write_domain;
28dfe52a 4526
1c5d22f7
CW
4527 obj = list_first_entry(&dev_priv->mm.flushing_list,
4528 struct drm_i915_gem_object,
4529 list)->obj;
4530 old_write_domain = obj->write_domain;
4531 obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
4532 i915_gem_object_move_to_inactive(obj);
4533
4534 trace_i915_gem_object_change_domain(obj,
4535 obj->read_domains,
4536 old_write_domain);
28dfe52a
EA
4537 }
4538
4539
4540 /* Move all inactive buffers out of the GTT. */
ab5ee576 4541 ret = i915_gem_evict_from_inactive_list(dev);
28dfe52a 4542 WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
6dbe2772
KP
4543 if (ret) {
4544 mutex_unlock(&dev->struct_mutex);
673a394b 4545 return ret;
6dbe2772 4546 }
673a394b 4547
6dbe2772
KP
4548 i915_gem_cleanup_ringbuffer(dev);
4549 mutex_unlock(&dev->struct_mutex);
4550
673a394b
EA
4551 return 0;
4552}
4553
4554static int
4555i915_gem_init_hws(struct drm_device *dev)
4556{
4557 drm_i915_private_t *dev_priv = dev->dev_private;
4558 struct drm_gem_object *obj;
4559 struct drm_i915_gem_object *obj_priv;
4560 int ret;
4561
4562 /* If we need a physical address for the status page, it's already
4563 * initialized at driver load time.
4564 */
4565 if (!I915_NEED_GFX_HWS(dev))
4566 return 0;
4567
4568 obj = drm_gem_object_alloc(dev, 4096);
4569 if (obj == NULL) {
4570 DRM_ERROR("Failed to allocate status page\n");
4571 return -ENOMEM;
4572 }
4573 obj_priv = obj->driver_private;
ba1eb1d8 4574 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
673a394b
EA
4575
4576 ret = i915_gem_object_pin(obj, 4096);
4577 if (ret != 0) {
4578 drm_gem_object_unreference(obj);
4579 return ret;
4580 }
4581
4582 dev_priv->status_gfx_addr = obj_priv->gtt_offset;
673a394b 4583
856fa198 4584 dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
ba1eb1d8 4585 if (dev_priv->hw_status_page == NULL) {
673a394b
EA
4586 DRM_ERROR("Failed to map status page.\n");
4587 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
3eb2ee77 4588 i915_gem_object_unpin(obj);
673a394b
EA
4589 drm_gem_object_unreference(obj);
4590 return -EINVAL;
4591 }
4592 dev_priv->hws_obj = obj;
673a394b
EA
4593 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
4594 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
ba1eb1d8 4595 I915_READ(HWS_PGA); /* posting read */
44d98a61 4596 DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
673a394b
EA
4597
4598 return 0;
4599}
4600
85a7bb98
CW
4601static void
4602i915_gem_cleanup_hws(struct drm_device *dev)
4603{
4604 drm_i915_private_t *dev_priv = dev->dev_private;
bab2d1f6
CW
4605 struct drm_gem_object *obj;
4606 struct drm_i915_gem_object *obj_priv;
85a7bb98
CW
4607
4608 if (dev_priv->hws_obj == NULL)
4609 return;
4610
bab2d1f6
CW
4611 obj = dev_priv->hws_obj;
4612 obj_priv = obj->driver_private;
4613
856fa198 4614 kunmap(obj_priv->pages[0]);
85a7bb98
CW
4615 i915_gem_object_unpin(obj);
4616 drm_gem_object_unreference(obj);
4617 dev_priv->hws_obj = NULL;
bab2d1f6 4618
85a7bb98
CW
4619 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
4620 dev_priv->hw_status_page = NULL;
4621
4622 /* Write high address into HWS_PGA when disabling. */
4623 I915_WRITE(HWS_PGA, 0x1ffff000);
4624}
4625
79e53945 4626int
673a394b
EA
4627i915_gem_init_ringbuffer(struct drm_device *dev)
4628{
4629 drm_i915_private_t *dev_priv = dev->dev_private;
4630 struct drm_gem_object *obj;
4631 struct drm_i915_gem_object *obj_priv;
79e53945 4632 drm_i915_ring_buffer_t *ring = &dev_priv->ring;
673a394b 4633 int ret;
50aa253d 4634 u32 head;
673a394b
EA
4635
4636 ret = i915_gem_init_hws(dev);
4637 if (ret != 0)
4638 return ret;
4639
4640 obj = drm_gem_object_alloc(dev, 128 * 1024);
4641 if (obj == NULL) {
4642 DRM_ERROR("Failed to allocate ringbuffer\n");
85a7bb98 4643 i915_gem_cleanup_hws(dev);
673a394b
EA
4644 return -ENOMEM;
4645 }
4646 obj_priv = obj->driver_private;
4647
4648 ret = i915_gem_object_pin(obj, 4096);
4649 if (ret != 0) {
4650 drm_gem_object_unreference(obj);
85a7bb98 4651 i915_gem_cleanup_hws(dev);
673a394b
EA
4652 return ret;
4653 }
4654
4655 /* Set up the kernel mapping for the ring. */
79e53945 4656 ring->Size = obj->size;
673a394b 4657
79e53945
JB
4658 ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
4659 ring->map.size = obj->size;
4660 ring->map.type = 0;
4661 ring->map.flags = 0;
4662 ring->map.mtrr = 0;
673a394b 4663
79e53945
JB
4664 drm_core_ioremap_wc(&ring->map, dev);
4665 if (ring->map.handle == NULL) {
673a394b
EA
4666 DRM_ERROR("Failed to map ringbuffer.\n");
4667 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
47ed185a 4668 i915_gem_object_unpin(obj);
673a394b 4669 drm_gem_object_unreference(obj);
85a7bb98 4670 i915_gem_cleanup_hws(dev);
673a394b
EA
4671 return -EINVAL;
4672 }
79e53945
JB
4673 ring->ring_obj = obj;
4674 ring->virtual_start = ring->map.handle;
673a394b
EA
4675
4676 /* Stop the ring if it's running. */
4677 I915_WRITE(PRB0_CTL, 0);
673a394b 4678 I915_WRITE(PRB0_TAIL, 0);
50aa253d 4679 I915_WRITE(PRB0_HEAD, 0);
673a394b
EA
4680
4681 /* Initialize the ring. */
4682 I915_WRITE(PRB0_START, obj_priv->gtt_offset);
50aa253d
KP
4683 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4684
4685 /* G45 ring initialization fails to reset head to zero */
4686 if (head != 0) {
4687 DRM_ERROR("Ring head not reset to zero "
4688 "ctl %08x head %08x tail %08x start %08x\n",
4689 I915_READ(PRB0_CTL),
4690 I915_READ(PRB0_HEAD),
4691 I915_READ(PRB0_TAIL),
4692 I915_READ(PRB0_START));
4693 I915_WRITE(PRB0_HEAD, 0);
4694
4695 DRM_ERROR("Ring head forced to zero "
4696 "ctl %08x head %08x tail %08x start %08x\n",
4697 I915_READ(PRB0_CTL),
4698 I915_READ(PRB0_HEAD),
4699 I915_READ(PRB0_TAIL),
4700 I915_READ(PRB0_START));
4701 }
4702
673a394b
EA
4703 I915_WRITE(PRB0_CTL,
4704 ((obj->size - 4096) & RING_NR_PAGES) |
4705 RING_NO_REPORT |
4706 RING_VALID);
4707
50aa253d
KP
4708 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4709
4710 /* If the head is still not zero, the ring is dead */
4711 if (head != 0) {
4712 DRM_ERROR("Ring initialization failed "
4713 "ctl %08x head %08x tail %08x start %08x\n",
4714 I915_READ(PRB0_CTL),
4715 I915_READ(PRB0_HEAD),
4716 I915_READ(PRB0_TAIL),
4717 I915_READ(PRB0_START));
4718 return -EIO;
4719 }
4720
673a394b 4721 /* Update our cache of the ring state */
79e53945
JB
4722 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4723 i915_kernel_lost_context(dev);
4724 else {
4725 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4726 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
4727 ring->space = ring->head - (ring->tail + 8);
4728 if (ring->space < 0)
4729 ring->space += ring->Size;
4730 }
673a394b
EA
4731
4732 return 0;
4733}
4734
79e53945 4735void
673a394b
EA
4736i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4737{
4738 drm_i915_private_t *dev_priv = dev->dev_private;
4739
4740 if (dev_priv->ring.ring_obj == NULL)
4741 return;
4742
4743 drm_core_ioremapfree(&dev_priv->ring.map, dev);
4744
4745 i915_gem_object_unpin(dev_priv->ring.ring_obj);
4746 drm_gem_object_unreference(dev_priv->ring.ring_obj);
4747 dev_priv->ring.ring_obj = NULL;
4748 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
4749
85a7bb98 4750 i915_gem_cleanup_hws(dev);
673a394b
EA
4751}
4752
4753int
4754i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4755 struct drm_file *file_priv)
4756{
4757 drm_i915_private_t *dev_priv = dev->dev_private;
4758 int ret;
4759
79e53945
JB
4760 if (drm_core_check_feature(dev, DRIVER_MODESET))
4761 return 0;
4762
ba1234d1 4763 if (atomic_read(&dev_priv->mm.wedged)) {
673a394b 4764 DRM_ERROR("Reenabling wedged hardware, good luck\n");
ba1234d1 4765 atomic_set(&dev_priv->mm.wedged, 0);
673a394b
EA
4766 }
4767
673a394b 4768 mutex_lock(&dev->struct_mutex);
9bb2d6f9
EA
4769 dev_priv->mm.suspended = 0;
4770
4771 ret = i915_gem_init_ringbuffer(dev);
d816f6ac
WF
4772 if (ret != 0) {
4773 mutex_unlock(&dev->struct_mutex);
9bb2d6f9 4774 return ret;
d816f6ac 4775 }
9bb2d6f9 4776
5e118f41 4777 spin_lock(&dev_priv->mm.active_list_lock);
673a394b 4778 BUG_ON(!list_empty(&dev_priv->mm.active_list));
5e118f41
CW
4779 spin_unlock(&dev_priv->mm.active_list_lock);
4780
673a394b
EA
4781 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
4782 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
4783 BUG_ON(!list_empty(&dev_priv->mm.request_list));
673a394b 4784 mutex_unlock(&dev->struct_mutex);
dbb19d30
KH
4785
4786 drm_irq_install(dev);
4787
673a394b
EA
4788 return 0;
4789}
4790
4791int
4792i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4793 struct drm_file *file_priv)
4794{
79e53945
JB
4795 if (drm_core_check_feature(dev, DRIVER_MODESET))
4796 return 0;
4797
dbb19d30 4798 drm_irq_uninstall(dev);
e6890f6f 4799 return i915_gem_idle(dev);
673a394b
EA
4800}
4801
4802void
4803i915_gem_lastclose(struct drm_device *dev)
4804{
4805 int ret;
673a394b 4806
e806b495
EA
4807 if (drm_core_check_feature(dev, DRIVER_MODESET))
4808 return;
4809
6dbe2772
KP
4810 ret = i915_gem_idle(dev);
4811 if (ret)
4812 DRM_ERROR("failed to idle hardware: %d\n", ret);
673a394b
EA
4813}
4814
4815void
4816i915_gem_load(struct drm_device *dev)
4817{
b5aa8a0f 4818 int i;
673a394b
EA
4819 drm_i915_private_t *dev_priv = dev->dev_private;
4820
5e118f41 4821 spin_lock_init(&dev_priv->mm.active_list_lock);
673a394b
EA
4822 INIT_LIST_HEAD(&dev_priv->mm.active_list);
4823 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4824 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4825 INIT_LIST_HEAD(&dev_priv->mm.request_list);
a09ba7fa 4826 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
673a394b
EA
4827 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4828 i915_gem_retire_work_handler);
4829 dev_priv->mm.next_gem_seqno = 1;
4830
31169714
CW
4831 spin_lock(&shrink_list_lock);
4832 list_add(&dev_priv->mm.shrink_list, &shrink_list);
4833 spin_unlock(&shrink_list_lock);
4834
de151cf6
JB
4835 /* Old X drivers will take 0-2 for front, back, depth buffers */
4836 dev_priv->fence_reg_start = 3;
4837
0f973f27 4838 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
de151cf6
JB
4839 dev_priv->num_fence_regs = 16;
4840 else
4841 dev_priv->num_fence_regs = 8;
4842
b5aa8a0f
GH
4843 /* Initialize fence registers to zero */
4844 if (IS_I965G(dev)) {
4845 for (i = 0; i < 16; i++)
4846 I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
4847 } else {
4848 for (i = 0; i < 8; i++)
4849 I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
4850 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4851 for (i = 0; i < 8; i++)
4852 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
4853 }
673a394b 4854 i915_gem_detect_bit_6_swizzle(dev);
6b95a207 4855 init_waitqueue_head(&dev_priv->pending_flip_queue);
673a394b 4856}
71acb5eb
DA
4857
4858/*
4859 * Create a physically contiguous memory object for this object
4860 * e.g. for cursor + overlay regs
4861 */
4862int i915_gem_init_phys_object(struct drm_device *dev,
4863 int id, int size)
4864{
4865 drm_i915_private_t *dev_priv = dev->dev_private;
4866 struct drm_i915_gem_phys_object *phys_obj;
4867 int ret;
4868
4869 if (dev_priv->mm.phys_objs[id - 1] || !size)
4870 return 0;
4871
9a298b2a 4872 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
71acb5eb
DA
4873 if (!phys_obj)
4874 return -ENOMEM;
4875
4876 phys_obj->id = id;
4877
e6be8d9d 4878 phys_obj->handle = drm_pci_alloc(dev, size, 0);
71acb5eb
DA
4879 if (!phys_obj->handle) {
4880 ret = -ENOMEM;
4881 goto kfree_obj;
4882 }
4883#ifdef CONFIG_X86
4884 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4885#endif
4886
4887 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4888
4889 return 0;
4890kfree_obj:
9a298b2a 4891 kfree(phys_obj);
71acb5eb
DA
4892 return ret;
4893}
4894
4895void i915_gem_free_phys_object(struct drm_device *dev, int id)
4896{
4897 drm_i915_private_t *dev_priv = dev->dev_private;
4898 struct drm_i915_gem_phys_object *phys_obj;
4899
4900 if (!dev_priv->mm.phys_objs[id - 1])
4901 return;
4902
4903 phys_obj = dev_priv->mm.phys_objs[id - 1];
4904 if (phys_obj->cur_obj) {
4905 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4906 }
4907
4908#ifdef CONFIG_X86
4909 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4910#endif
4911 drm_pci_free(dev, phys_obj->handle);
4912 kfree(phys_obj);
4913 dev_priv->mm.phys_objs[id - 1] = NULL;
4914}
4915
4916void i915_gem_free_all_phys_object(struct drm_device *dev)
4917{
4918 int i;
4919
260883c8 4920 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
71acb5eb
DA
4921 i915_gem_free_phys_object(dev, i);
4922}
4923
4924void i915_gem_detach_phys_object(struct drm_device *dev,
4925 struct drm_gem_object *obj)
4926{
4927 struct drm_i915_gem_object *obj_priv;
4928 int i;
4929 int ret;
4930 int page_count;
4931
4932 obj_priv = obj->driver_private;
4933 if (!obj_priv->phys_obj)
4934 return;
4935
4bdadb97 4936 ret = i915_gem_object_get_pages(obj, 0);
71acb5eb
DA
4937 if (ret)
4938 goto out;
4939
4940 page_count = obj->size / PAGE_SIZE;
4941
4942 for (i = 0; i < page_count; i++) {
856fa198 4943 char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
71acb5eb
DA
4944 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4945
4946 memcpy(dst, src, PAGE_SIZE);
4947 kunmap_atomic(dst, KM_USER0);
4948 }
856fa198 4949 drm_clflush_pages(obj_priv->pages, page_count);
71acb5eb 4950 drm_agp_chipset_flush(dev);
d78b47b9
CW
4951
4952 i915_gem_object_put_pages(obj);
71acb5eb
DA
4953out:
4954 obj_priv->phys_obj->cur_obj = NULL;
4955 obj_priv->phys_obj = NULL;
4956}
4957
4958int
4959i915_gem_attach_phys_object(struct drm_device *dev,
4960 struct drm_gem_object *obj, int id)
4961{
4962 drm_i915_private_t *dev_priv = dev->dev_private;
4963 struct drm_i915_gem_object *obj_priv;
4964 int ret = 0;
4965 int page_count;
4966 int i;
4967
4968 if (id > I915_MAX_PHYS_OBJECT)
4969 return -EINVAL;
4970
4971 obj_priv = obj->driver_private;
4972
4973 if (obj_priv->phys_obj) {
4974 if (obj_priv->phys_obj->id == id)
4975 return 0;
4976 i915_gem_detach_phys_object(dev, obj);
4977 }
4978
4979
4980 /* create a new object */
4981 if (!dev_priv->mm.phys_objs[id - 1]) {
4982 ret = i915_gem_init_phys_object(dev, id,
4983 obj->size);
4984 if (ret) {
aeb565df 4985 DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
71acb5eb
DA
4986 goto out;
4987 }
4988 }
4989
4990 /* bind to the object */
4991 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
4992 obj_priv->phys_obj->cur_obj = obj;
4993
4bdadb97 4994 ret = i915_gem_object_get_pages(obj, 0);
71acb5eb
DA
4995 if (ret) {
4996 DRM_ERROR("failed to get page list\n");
4997 goto out;
4998 }
4999
5000 page_count = obj->size / PAGE_SIZE;
5001
5002 for (i = 0; i < page_count; i++) {
856fa198 5003 char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
71acb5eb
DA
5004 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
5005
5006 memcpy(dst, src, PAGE_SIZE);
5007 kunmap_atomic(src, KM_USER0);
5008 }
5009
d78b47b9
CW
5010 i915_gem_object_put_pages(obj);
5011
71acb5eb
DA
5012 return 0;
5013out:
5014 return ret;
5015}
5016
5017static int
5018i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
5019 struct drm_i915_gem_pwrite *args,
5020 struct drm_file *file_priv)
5021{
5022 struct drm_i915_gem_object *obj_priv = obj->driver_private;
5023 void *obj_addr;
5024 int ret;
5025 char __user *user_data;
5026
5027 user_data = (char __user *) (uintptr_t) args->data_ptr;
5028 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
5029
44d98a61 5030 DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
71acb5eb
DA
5031 ret = copy_from_user(obj_addr, user_data, args->size);
5032 if (ret)
5033 return -EFAULT;
5034
5035 drm_agp_chipset_flush(dev);
5036 return 0;
5037}
b962442e
EA
5038
5039void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
5040{
5041 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
5042
5043 /* Clean up our request list when the client is going away, so that
5044 * later retire_requests won't dereference our soon-to-be-gone
5045 * file_priv.
5046 */
5047 mutex_lock(&dev->struct_mutex);
5048 while (!list_empty(&i915_file_priv->mm.request_list))
5049 list_del_init(i915_file_priv->mm.request_list.next);
5050 mutex_unlock(&dev->struct_mutex);
5051}
31169714 5052
31169714
CW
5053static int
5054i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
5055{
5056 drm_i915_private_t *dev_priv, *next_dev;
5057 struct drm_i915_gem_object *obj_priv, *next_obj;
5058 int cnt = 0;
5059 int would_deadlock = 1;
5060
5061 /* "fast-path" to count number of available objects */
5062 if (nr_to_scan == 0) {
5063 spin_lock(&shrink_list_lock);
5064 list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
5065 struct drm_device *dev = dev_priv->dev;
5066
5067 if (mutex_trylock(&dev->struct_mutex)) {
5068 list_for_each_entry(obj_priv,
5069 &dev_priv->mm.inactive_list,
5070 list)
5071 cnt++;
5072 mutex_unlock(&dev->struct_mutex);
5073 }
5074 }
5075 spin_unlock(&shrink_list_lock);
5076
5077 return (cnt / 100) * sysctl_vfs_cache_pressure;
5078 }
5079
5080 spin_lock(&shrink_list_lock);
5081
5082 /* first scan for clean buffers */
5083 list_for_each_entry_safe(dev_priv, next_dev,
5084 &shrink_list, mm.shrink_list) {
5085 struct drm_device *dev = dev_priv->dev;
5086
5087 if (! mutex_trylock(&dev->struct_mutex))
5088 continue;
5089
5090 spin_unlock(&shrink_list_lock);
5091
5092 i915_gem_retire_requests(dev);
5093
5094 list_for_each_entry_safe(obj_priv, next_obj,
5095 &dev_priv->mm.inactive_list,
5096 list) {
5097 if (i915_gem_object_is_purgeable(obj_priv)) {
963b4836 5098 i915_gem_object_unbind(obj_priv->obj);
31169714
CW
5099 if (--nr_to_scan <= 0)
5100 break;
5101 }
5102 }
5103
5104 spin_lock(&shrink_list_lock);
5105 mutex_unlock(&dev->struct_mutex);
5106
963b4836
CW
5107 would_deadlock = 0;
5108
31169714
CW
5109 if (nr_to_scan <= 0)
5110 break;
5111 }
5112
5113 /* second pass, evict/count anything still on the inactive list */
5114 list_for_each_entry_safe(dev_priv, next_dev,
5115 &shrink_list, mm.shrink_list) {
5116 struct drm_device *dev = dev_priv->dev;
5117
5118 if (! mutex_trylock(&dev->struct_mutex))
5119 continue;
5120
5121 spin_unlock(&shrink_list_lock);
5122
5123 list_for_each_entry_safe(obj_priv, next_obj,
5124 &dev_priv->mm.inactive_list,
5125 list) {
5126 if (nr_to_scan > 0) {
963b4836 5127 i915_gem_object_unbind(obj_priv->obj);
31169714
CW
5128 nr_to_scan--;
5129 } else
5130 cnt++;
5131 }
5132
5133 spin_lock(&shrink_list_lock);
5134 mutex_unlock(&dev->struct_mutex);
5135
5136 would_deadlock = 0;
5137 }
5138
5139 spin_unlock(&shrink_list_lock);
5140
5141 if (would_deadlock)
5142 return -1;
5143 else if (cnt > 0)
5144 return (cnt / 100) * sysctl_vfs_cache_pressure;
5145 else
5146 return 0;
5147}
5148
5149static struct shrinker shrinker = {
5150 .shrink = i915_gem_shrink,
5151 .seeks = DEFAULT_SEEKS,
5152};
5153
5154__init void
5155i915_gem_shrinker_init(void)
5156{
5157 register_shrinker(&shrinker);
5158}
5159
5160__exit void
5161i915_gem_shrinker_exit(void)
5162{
5163 unregister_shrinker(&shrinker);
5164}