]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/gpu/drm/i915/i915_gem.c
drm/i915: workaround IGD i2c bus issue in kernel side (v2)
[net-next-2.6.git] / drivers / gpu / drm / i915 / i915_gem.c
CommitLineData
673a394b
EA
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "i915_drm.h"
31#include "i915_drv.h"
32#include <linux/swap.h>
79e53945 33#include <linux/pci.h>
673a394b 34
28dfe52a
EA
35#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
36
e47c68e9
EA
37static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
38static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
39static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
e47c68e9
EA
40static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
41 int write);
42static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
43 uint64_t offset,
44 uint64_t size);
45static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
673a394b 46static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
de151cf6
JB
47static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
48 unsigned alignment);
0f973f27 49static int i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write);
de151cf6
JB
50static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
51static int i915_gem_evict_something(struct drm_device *dev);
71acb5eb
DA
52static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
53 struct drm_i915_gem_pwrite *args,
54 struct drm_file *file_priv);
673a394b 55
79e53945
JB
56int i915_gem_do_init(struct drm_device *dev, unsigned long start,
57 unsigned long end)
673a394b
EA
58{
59 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b 60
79e53945
JB
61 if (start >= end ||
62 (start & (PAGE_SIZE - 1)) != 0 ||
63 (end & (PAGE_SIZE - 1)) != 0) {
673a394b
EA
64 return -EINVAL;
65 }
66
79e53945
JB
67 drm_mm_init(&dev_priv->mm.gtt_space, start,
68 end - start);
673a394b 69
79e53945
JB
70 dev->gtt_total = (uint32_t) (end - start);
71
72 return 0;
73}
673a394b 74
79e53945
JB
75int
76i915_gem_init_ioctl(struct drm_device *dev, void *data,
77 struct drm_file *file_priv)
78{
79 struct drm_i915_gem_init *args = data;
80 int ret;
81
82 mutex_lock(&dev->struct_mutex);
83 ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
673a394b
EA
84 mutex_unlock(&dev->struct_mutex);
85
79e53945 86 return ret;
673a394b
EA
87}
88
5a125c3c
EA
89int
90i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
91 struct drm_file *file_priv)
92{
5a125c3c 93 struct drm_i915_gem_get_aperture *args = data;
5a125c3c
EA
94
95 if (!(dev->driver->driver_features & DRIVER_GEM))
96 return -ENODEV;
97
98 args->aper_size = dev->gtt_total;
2678d9d6
KP
99 args->aper_available_size = (args->aper_size -
100 atomic_read(&dev->pin_memory));
5a125c3c
EA
101
102 return 0;
103}
104
673a394b
EA
105
106/**
107 * Creates a new mm object and returns a handle to it.
108 */
109int
110i915_gem_create_ioctl(struct drm_device *dev, void *data,
111 struct drm_file *file_priv)
112{
113 struct drm_i915_gem_create *args = data;
114 struct drm_gem_object *obj;
115 int handle, ret;
116
117 args->size = roundup(args->size, PAGE_SIZE);
118
119 /* Allocate the new object */
120 obj = drm_gem_object_alloc(dev, args->size);
121 if (obj == NULL)
122 return -ENOMEM;
123
124 ret = drm_gem_handle_create(file_priv, obj, &handle);
125 mutex_lock(&dev->struct_mutex);
126 drm_gem_object_handle_unreference(obj);
127 mutex_unlock(&dev->struct_mutex);
128
129 if (ret)
130 return ret;
131
132 args->handle = handle;
133
134 return 0;
135}
136
eb01459f
EA
137static inline int
138fast_shmem_read(struct page **pages,
139 loff_t page_base, int page_offset,
140 char __user *data,
141 int length)
142{
143 char __iomem *vaddr;
2bc43b5c 144 int unwritten;
eb01459f
EA
145
146 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
147 if (vaddr == NULL)
148 return -ENOMEM;
2bc43b5c 149 unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
eb01459f
EA
150 kunmap_atomic(vaddr, KM_USER0);
151
2bc43b5c
FM
152 if (unwritten)
153 return -EFAULT;
154
155 return 0;
eb01459f
EA
156}
157
280b713b
EA
158static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
159{
160 drm_i915_private_t *dev_priv = obj->dev->dev_private;
161 struct drm_i915_gem_object *obj_priv = obj->driver_private;
162
163 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
164 obj_priv->tiling_mode != I915_TILING_NONE;
165}
166
40123c1f
EA
167static inline int
168slow_shmem_copy(struct page *dst_page,
169 int dst_offset,
170 struct page *src_page,
171 int src_offset,
172 int length)
173{
174 char *dst_vaddr, *src_vaddr;
175
176 dst_vaddr = kmap_atomic(dst_page, KM_USER0);
177 if (dst_vaddr == NULL)
178 return -ENOMEM;
179
180 src_vaddr = kmap_atomic(src_page, KM_USER1);
181 if (src_vaddr == NULL) {
182 kunmap_atomic(dst_vaddr, KM_USER0);
183 return -ENOMEM;
184 }
185
186 memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
187
188 kunmap_atomic(src_vaddr, KM_USER1);
189 kunmap_atomic(dst_vaddr, KM_USER0);
190
191 return 0;
192}
193
280b713b
EA
194static inline int
195slow_shmem_bit17_copy(struct page *gpu_page,
196 int gpu_offset,
197 struct page *cpu_page,
198 int cpu_offset,
199 int length,
200 int is_read)
201{
202 char *gpu_vaddr, *cpu_vaddr;
203
204 /* Use the unswizzled path if this page isn't affected. */
205 if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
206 if (is_read)
207 return slow_shmem_copy(cpu_page, cpu_offset,
208 gpu_page, gpu_offset, length);
209 else
210 return slow_shmem_copy(gpu_page, gpu_offset,
211 cpu_page, cpu_offset, length);
212 }
213
214 gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
215 if (gpu_vaddr == NULL)
216 return -ENOMEM;
217
218 cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
219 if (cpu_vaddr == NULL) {
220 kunmap_atomic(gpu_vaddr, KM_USER0);
221 return -ENOMEM;
222 }
223
224 /* Copy the data, XORing A6 with A17 (1). The user already knows he's
225 * XORing with the other bits (A9 for Y, A9 and A10 for X)
226 */
227 while (length > 0) {
228 int cacheline_end = ALIGN(gpu_offset + 1, 64);
229 int this_length = min(cacheline_end - gpu_offset, length);
230 int swizzled_gpu_offset = gpu_offset ^ 64;
231
232 if (is_read) {
233 memcpy(cpu_vaddr + cpu_offset,
234 gpu_vaddr + swizzled_gpu_offset,
235 this_length);
236 } else {
237 memcpy(gpu_vaddr + swizzled_gpu_offset,
238 cpu_vaddr + cpu_offset,
239 this_length);
240 }
241 cpu_offset += this_length;
242 gpu_offset += this_length;
243 length -= this_length;
244 }
245
246 kunmap_atomic(cpu_vaddr, KM_USER1);
247 kunmap_atomic(gpu_vaddr, KM_USER0);
248
249 return 0;
250}
251
eb01459f
EA
252/**
253 * This is the fast shmem pread path, which attempts to copy_from_user directly
254 * from the backing pages of the object to the user's address space. On a
255 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
256 */
257static int
258i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
259 struct drm_i915_gem_pread *args,
260 struct drm_file *file_priv)
261{
262 struct drm_i915_gem_object *obj_priv = obj->driver_private;
263 ssize_t remain;
264 loff_t offset, page_base;
265 char __user *user_data;
266 int page_offset, page_length;
267 int ret;
268
269 user_data = (char __user *) (uintptr_t) args->data_ptr;
270 remain = args->size;
271
272 mutex_lock(&dev->struct_mutex);
273
274 ret = i915_gem_object_get_pages(obj);
275 if (ret != 0)
276 goto fail_unlock;
277
278 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
279 args->size);
280 if (ret != 0)
281 goto fail_put_pages;
282
283 obj_priv = obj->driver_private;
284 offset = args->offset;
285
286 while (remain > 0) {
287 /* Operation in this page
288 *
289 * page_base = page offset within aperture
290 * page_offset = offset within page
291 * page_length = bytes to copy for this page
292 */
293 page_base = (offset & ~(PAGE_SIZE-1));
294 page_offset = offset & (PAGE_SIZE-1);
295 page_length = remain;
296 if ((page_offset + remain) > PAGE_SIZE)
297 page_length = PAGE_SIZE - page_offset;
298
299 ret = fast_shmem_read(obj_priv->pages,
300 page_base, page_offset,
301 user_data, page_length);
302 if (ret)
303 goto fail_put_pages;
304
305 remain -= page_length;
306 user_data += page_length;
307 offset += page_length;
308 }
309
310fail_put_pages:
311 i915_gem_object_put_pages(obj);
312fail_unlock:
313 mutex_unlock(&dev->struct_mutex);
314
315 return ret;
316}
317
318/**
319 * This is the fallback shmem pread path, which allocates temporary storage
320 * in kernel space to copy_to_user into outside of the struct_mutex, so we
321 * can copy out of the object's backing pages while holding the struct mutex
322 * and not take page faults.
323 */
324static int
325i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
326 struct drm_i915_gem_pread *args,
327 struct drm_file *file_priv)
328{
329 struct drm_i915_gem_object *obj_priv = obj->driver_private;
330 struct mm_struct *mm = current->mm;
331 struct page **user_pages;
332 ssize_t remain;
333 loff_t offset, pinned_pages, i;
334 loff_t first_data_page, last_data_page, num_pages;
335 int shmem_page_index, shmem_page_offset;
336 int data_page_index, data_page_offset;
337 int page_length;
338 int ret;
339 uint64_t data_ptr = args->data_ptr;
280b713b 340 int do_bit17_swizzling;
eb01459f
EA
341
342 remain = args->size;
343
344 /* Pin the user pages containing the data. We can't fault while
345 * holding the struct mutex, yet we want to hold it while
346 * dereferencing the user data.
347 */
348 first_data_page = data_ptr / PAGE_SIZE;
349 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
350 num_pages = last_data_page - first_data_page + 1;
351
352 user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
353 if (user_pages == NULL)
354 return -ENOMEM;
355
356 down_read(&mm->mmap_sem);
357 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
e5e9ecde 358 num_pages, 1, 0, user_pages, NULL);
eb01459f
EA
359 up_read(&mm->mmap_sem);
360 if (pinned_pages < num_pages) {
361 ret = -EFAULT;
362 goto fail_put_user_pages;
363 }
364
280b713b
EA
365 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
366
eb01459f
EA
367 mutex_lock(&dev->struct_mutex);
368
369 ret = i915_gem_object_get_pages(obj);
370 if (ret != 0)
371 goto fail_unlock;
372
373 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
374 args->size);
375 if (ret != 0)
376 goto fail_put_pages;
377
378 obj_priv = obj->driver_private;
379 offset = args->offset;
380
381 while (remain > 0) {
382 /* Operation in this page
383 *
384 * shmem_page_index = page number within shmem file
385 * shmem_page_offset = offset within page in shmem file
386 * data_page_index = page number in get_user_pages return
387 * data_page_offset = offset with data_page_index page.
388 * page_length = bytes to copy for this page
389 */
390 shmem_page_index = offset / PAGE_SIZE;
391 shmem_page_offset = offset & ~PAGE_MASK;
392 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
393 data_page_offset = data_ptr & ~PAGE_MASK;
394
395 page_length = remain;
396 if ((shmem_page_offset + page_length) > PAGE_SIZE)
397 page_length = PAGE_SIZE - shmem_page_offset;
398 if ((data_page_offset + page_length) > PAGE_SIZE)
399 page_length = PAGE_SIZE - data_page_offset;
400
280b713b
EA
401 if (do_bit17_swizzling) {
402 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
403 shmem_page_offset,
404 user_pages[data_page_index],
405 data_page_offset,
406 page_length,
407 1);
408 } else {
409 ret = slow_shmem_copy(user_pages[data_page_index],
410 data_page_offset,
411 obj_priv->pages[shmem_page_index],
412 shmem_page_offset,
413 page_length);
414 }
eb01459f
EA
415 if (ret)
416 goto fail_put_pages;
417
418 remain -= page_length;
419 data_ptr += page_length;
420 offset += page_length;
421 }
422
423fail_put_pages:
424 i915_gem_object_put_pages(obj);
425fail_unlock:
426 mutex_unlock(&dev->struct_mutex);
427fail_put_user_pages:
428 for (i = 0; i < pinned_pages; i++) {
429 SetPageDirty(user_pages[i]);
430 page_cache_release(user_pages[i]);
431 }
432 kfree(user_pages);
433
434 return ret;
435}
436
673a394b
EA
437/**
438 * Reads data from the object referenced by handle.
439 *
440 * On error, the contents of *data are undefined.
441 */
442int
443i915_gem_pread_ioctl(struct drm_device *dev, void *data,
444 struct drm_file *file_priv)
445{
446 struct drm_i915_gem_pread *args = data;
447 struct drm_gem_object *obj;
448 struct drm_i915_gem_object *obj_priv;
673a394b
EA
449 int ret;
450
451 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
452 if (obj == NULL)
453 return -EBADF;
454 obj_priv = obj->driver_private;
455
456 /* Bounds check source.
457 *
458 * XXX: This could use review for overflow issues...
459 */
460 if (args->offset > obj->size || args->size > obj->size ||
461 args->offset + args->size > obj->size) {
462 drm_gem_object_unreference(obj);
463 return -EINVAL;
464 }
465
280b713b 466 if (i915_gem_object_needs_bit17_swizzle(obj)) {
eb01459f 467 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
280b713b
EA
468 } else {
469 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
470 if (ret != 0)
471 ret = i915_gem_shmem_pread_slow(dev, obj, args,
472 file_priv);
473 }
673a394b
EA
474
475 drm_gem_object_unreference(obj);
673a394b 476
eb01459f 477 return ret;
673a394b
EA
478}
479
0839ccb8
KP
480/* This is the fast write path which cannot handle
481 * page faults in the source data
9b7530cc 482 */
0839ccb8
KP
483
484static inline int
485fast_user_write(struct io_mapping *mapping,
486 loff_t page_base, int page_offset,
487 char __user *user_data,
488 int length)
9b7530cc 489{
9b7530cc 490 char *vaddr_atomic;
0839ccb8 491 unsigned long unwritten;
9b7530cc 492
0839ccb8
KP
493 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
494 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
495 user_data, length);
496 io_mapping_unmap_atomic(vaddr_atomic);
497 if (unwritten)
498 return -EFAULT;
499 return 0;
500}
501
502/* Here's the write path which can sleep for
503 * page faults
504 */
505
506static inline int
3de09aa3
EA
507slow_kernel_write(struct io_mapping *mapping,
508 loff_t gtt_base, int gtt_offset,
509 struct page *user_page, int user_offset,
510 int length)
0839ccb8 511{
3de09aa3 512 char *src_vaddr, *dst_vaddr;
0839ccb8
KP
513 unsigned long unwritten;
514
3de09aa3
EA
515 dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
516 src_vaddr = kmap_atomic(user_page, KM_USER1);
517 unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
518 src_vaddr + user_offset,
519 length);
520 kunmap_atomic(src_vaddr, KM_USER1);
521 io_mapping_unmap_atomic(dst_vaddr);
0839ccb8
KP
522 if (unwritten)
523 return -EFAULT;
9b7530cc 524 return 0;
9b7530cc
LT
525}
526
40123c1f
EA
527static inline int
528fast_shmem_write(struct page **pages,
529 loff_t page_base, int page_offset,
530 char __user *data,
531 int length)
532{
533 char __iomem *vaddr;
d0088775 534 unsigned long unwritten;
40123c1f
EA
535
536 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
537 if (vaddr == NULL)
538 return -ENOMEM;
d0088775 539 unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
40123c1f
EA
540 kunmap_atomic(vaddr, KM_USER0);
541
d0088775
DA
542 if (unwritten)
543 return -EFAULT;
40123c1f
EA
544 return 0;
545}
546
3de09aa3
EA
547/**
548 * This is the fast pwrite path, where we copy the data directly from the
549 * user into the GTT, uncached.
550 */
673a394b 551static int
3de09aa3
EA
552i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
553 struct drm_i915_gem_pwrite *args,
554 struct drm_file *file_priv)
673a394b
EA
555{
556 struct drm_i915_gem_object *obj_priv = obj->driver_private;
0839ccb8 557 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b 558 ssize_t remain;
0839ccb8 559 loff_t offset, page_base;
673a394b 560 char __user *user_data;
0839ccb8
KP
561 int page_offset, page_length;
562 int ret;
673a394b
EA
563
564 user_data = (char __user *) (uintptr_t) args->data_ptr;
565 remain = args->size;
566 if (!access_ok(VERIFY_READ, user_data, remain))
567 return -EFAULT;
568
569
570 mutex_lock(&dev->struct_mutex);
571 ret = i915_gem_object_pin(obj, 0);
572 if (ret) {
573 mutex_unlock(&dev->struct_mutex);
574 return ret;
575 }
2ef7eeaa 576 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
673a394b
EA
577 if (ret)
578 goto fail;
579
580 obj_priv = obj->driver_private;
581 offset = obj_priv->gtt_offset + args->offset;
673a394b
EA
582
583 while (remain > 0) {
584 /* Operation in this page
585 *
0839ccb8
KP
586 * page_base = page offset within aperture
587 * page_offset = offset within page
588 * page_length = bytes to copy for this page
673a394b 589 */
0839ccb8
KP
590 page_base = (offset & ~(PAGE_SIZE-1));
591 page_offset = offset & (PAGE_SIZE-1);
592 page_length = remain;
593 if ((page_offset + remain) > PAGE_SIZE)
594 page_length = PAGE_SIZE - page_offset;
595
596 ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
597 page_offset, user_data, page_length);
598
599 /* If we get a fault while copying data, then (presumably) our
3de09aa3
EA
600 * source page isn't available. Return the error and we'll
601 * retry in the slow path.
0839ccb8 602 */
3de09aa3
EA
603 if (ret)
604 goto fail;
673a394b 605
0839ccb8
KP
606 remain -= page_length;
607 user_data += page_length;
608 offset += page_length;
673a394b 609 }
673a394b
EA
610
611fail:
612 i915_gem_object_unpin(obj);
613 mutex_unlock(&dev->struct_mutex);
614
615 return ret;
616}
617
3de09aa3
EA
618/**
619 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
620 * the memory and maps it using kmap_atomic for copying.
621 *
622 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
623 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
624 */
3043c60c 625static int
3de09aa3
EA
626i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
627 struct drm_i915_gem_pwrite *args,
628 struct drm_file *file_priv)
673a394b 629{
3de09aa3
EA
630 struct drm_i915_gem_object *obj_priv = obj->driver_private;
631 drm_i915_private_t *dev_priv = dev->dev_private;
632 ssize_t remain;
633 loff_t gtt_page_base, offset;
634 loff_t first_data_page, last_data_page, num_pages;
635 loff_t pinned_pages, i;
636 struct page **user_pages;
637 struct mm_struct *mm = current->mm;
638 int gtt_page_offset, data_page_offset, data_page_index, page_length;
673a394b 639 int ret;
3de09aa3
EA
640 uint64_t data_ptr = args->data_ptr;
641
642 remain = args->size;
643
644 /* Pin the user pages containing the data. We can't fault while
645 * holding the struct mutex, and all of the pwrite implementations
646 * want to hold it while dereferencing the user data.
647 */
648 first_data_page = data_ptr / PAGE_SIZE;
649 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
650 num_pages = last_data_page - first_data_page + 1;
651
652 user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
653 if (user_pages == NULL)
654 return -ENOMEM;
655
656 down_read(&mm->mmap_sem);
657 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
658 num_pages, 0, 0, user_pages, NULL);
659 up_read(&mm->mmap_sem);
660 if (pinned_pages < num_pages) {
661 ret = -EFAULT;
662 goto out_unpin_pages;
663 }
673a394b
EA
664
665 mutex_lock(&dev->struct_mutex);
3de09aa3
EA
666 ret = i915_gem_object_pin(obj, 0);
667 if (ret)
668 goto out_unlock;
669
670 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
671 if (ret)
672 goto out_unpin_object;
673
674 obj_priv = obj->driver_private;
675 offset = obj_priv->gtt_offset + args->offset;
676
677 while (remain > 0) {
678 /* Operation in this page
679 *
680 * gtt_page_base = page offset within aperture
681 * gtt_page_offset = offset within page in aperture
682 * data_page_index = page number in get_user_pages return
683 * data_page_offset = offset with data_page_index page.
684 * page_length = bytes to copy for this page
685 */
686 gtt_page_base = offset & PAGE_MASK;
687 gtt_page_offset = offset & ~PAGE_MASK;
688 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
689 data_page_offset = data_ptr & ~PAGE_MASK;
690
691 page_length = remain;
692 if ((gtt_page_offset + page_length) > PAGE_SIZE)
693 page_length = PAGE_SIZE - gtt_page_offset;
694 if ((data_page_offset + page_length) > PAGE_SIZE)
695 page_length = PAGE_SIZE - data_page_offset;
696
697 ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
698 gtt_page_base, gtt_page_offset,
699 user_pages[data_page_index],
700 data_page_offset,
701 page_length);
702
703 /* If we get a fault while copying data, then (presumably) our
704 * source page isn't available. Return the error and we'll
705 * retry in the slow path.
706 */
707 if (ret)
708 goto out_unpin_object;
709
710 remain -= page_length;
711 offset += page_length;
712 data_ptr += page_length;
713 }
714
715out_unpin_object:
716 i915_gem_object_unpin(obj);
717out_unlock:
718 mutex_unlock(&dev->struct_mutex);
719out_unpin_pages:
720 for (i = 0; i < pinned_pages; i++)
721 page_cache_release(user_pages[i]);
722 kfree(user_pages);
723
724 return ret;
725}
726
40123c1f
EA
727/**
728 * This is the fast shmem pwrite path, which attempts to directly
729 * copy_from_user into the kmapped pages backing the object.
730 */
3043c60c 731static int
40123c1f
EA
732i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
733 struct drm_i915_gem_pwrite *args,
734 struct drm_file *file_priv)
673a394b 735{
40123c1f
EA
736 struct drm_i915_gem_object *obj_priv = obj->driver_private;
737 ssize_t remain;
738 loff_t offset, page_base;
739 char __user *user_data;
740 int page_offset, page_length;
673a394b 741 int ret;
40123c1f
EA
742
743 user_data = (char __user *) (uintptr_t) args->data_ptr;
744 remain = args->size;
673a394b
EA
745
746 mutex_lock(&dev->struct_mutex);
747
40123c1f
EA
748 ret = i915_gem_object_get_pages(obj);
749 if (ret != 0)
750 goto fail_unlock;
673a394b 751
e47c68e9 752 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
40123c1f
EA
753 if (ret != 0)
754 goto fail_put_pages;
755
756 obj_priv = obj->driver_private;
757 offset = args->offset;
758 obj_priv->dirty = 1;
759
760 while (remain > 0) {
761 /* Operation in this page
762 *
763 * page_base = page offset within aperture
764 * page_offset = offset within page
765 * page_length = bytes to copy for this page
766 */
767 page_base = (offset & ~(PAGE_SIZE-1));
768 page_offset = offset & (PAGE_SIZE-1);
769 page_length = remain;
770 if ((page_offset + remain) > PAGE_SIZE)
771 page_length = PAGE_SIZE - page_offset;
772
773 ret = fast_shmem_write(obj_priv->pages,
774 page_base, page_offset,
775 user_data, page_length);
776 if (ret)
777 goto fail_put_pages;
778
779 remain -= page_length;
780 user_data += page_length;
781 offset += page_length;
782 }
783
784fail_put_pages:
785 i915_gem_object_put_pages(obj);
786fail_unlock:
787 mutex_unlock(&dev->struct_mutex);
788
789 return ret;
790}
791
792/**
793 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
794 * the memory and maps it using kmap_atomic for copying.
795 *
796 * This avoids taking mmap_sem for faulting on the user's address while the
797 * struct_mutex is held.
798 */
799static int
800i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
801 struct drm_i915_gem_pwrite *args,
802 struct drm_file *file_priv)
803{
804 struct drm_i915_gem_object *obj_priv = obj->driver_private;
805 struct mm_struct *mm = current->mm;
806 struct page **user_pages;
807 ssize_t remain;
808 loff_t offset, pinned_pages, i;
809 loff_t first_data_page, last_data_page, num_pages;
810 int shmem_page_index, shmem_page_offset;
811 int data_page_index, data_page_offset;
812 int page_length;
813 int ret;
814 uint64_t data_ptr = args->data_ptr;
280b713b 815 int do_bit17_swizzling;
40123c1f
EA
816
817 remain = args->size;
818
819 /* Pin the user pages containing the data. We can't fault while
820 * holding the struct mutex, and all of the pwrite implementations
821 * want to hold it while dereferencing the user data.
822 */
823 first_data_page = data_ptr / PAGE_SIZE;
824 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
825 num_pages = last_data_page - first_data_page + 1;
826
827 user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
828 if (user_pages == NULL)
829 return -ENOMEM;
830
831 down_read(&mm->mmap_sem);
832 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
833 num_pages, 0, 0, user_pages, NULL);
834 up_read(&mm->mmap_sem);
835 if (pinned_pages < num_pages) {
836 ret = -EFAULT;
837 goto fail_put_user_pages;
673a394b
EA
838 }
839
280b713b
EA
840 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
841
40123c1f
EA
842 mutex_lock(&dev->struct_mutex);
843
844 ret = i915_gem_object_get_pages(obj);
845 if (ret != 0)
846 goto fail_unlock;
847
848 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
849 if (ret != 0)
850 goto fail_put_pages;
851
852 obj_priv = obj->driver_private;
673a394b 853 offset = args->offset;
40123c1f 854 obj_priv->dirty = 1;
673a394b 855
40123c1f
EA
856 while (remain > 0) {
857 /* Operation in this page
858 *
859 * shmem_page_index = page number within shmem file
860 * shmem_page_offset = offset within page in shmem file
861 * data_page_index = page number in get_user_pages return
862 * data_page_offset = offset with data_page_index page.
863 * page_length = bytes to copy for this page
864 */
865 shmem_page_index = offset / PAGE_SIZE;
866 shmem_page_offset = offset & ~PAGE_MASK;
867 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
868 data_page_offset = data_ptr & ~PAGE_MASK;
869
870 page_length = remain;
871 if ((shmem_page_offset + page_length) > PAGE_SIZE)
872 page_length = PAGE_SIZE - shmem_page_offset;
873 if ((data_page_offset + page_length) > PAGE_SIZE)
874 page_length = PAGE_SIZE - data_page_offset;
875
280b713b
EA
876 if (do_bit17_swizzling) {
877 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
878 shmem_page_offset,
879 user_pages[data_page_index],
880 data_page_offset,
881 page_length,
882 0);
883 } else {
884 ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
885 shmem_page_offset,
886 user_pages[data_page_index],
887 data_page_offset,
888 page_length);
889 }
40123c1f
EA
890 if (ret)
891 goto fail_put_pages;
892
893 remain -= page_length;
894 data_ptr += page_length;
895 offset += page_length;
673a394b
EA
896 }
897
40123c1f
EA
898fail_put_pages:
899 i915_gem_object_put_pages(obj);
900fail_unlock:
673a394b 901 mutex_unlock(&dev->struct_mutex);
40123c1f
EA
902fail_put_user_pages:
903 for (i = 0; i < pinned_pages; i++)
904 page_cache_release(user_pages[i]);
905 kfree(user_pages);
673a394b 906
40123c1f 907 return ret;
673a394b
EA
908}
909
910/**
911 * Writes data to the object referenced by handle.
912 *
913 * On error, the contents of the buffer that were to be modified are undefined.
914 */
915int
916i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
917 struct drm_file *file_priv)
918{
919 struct drm_i915_gem_pwrite *args = data;
920 struct drm_gem_object *obj;
921 struct drm_i915_gem_object *obj_priv;
922 int ret = 0;
923
924 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
925 if (obj == NULL)
926 return -EBADF;
927 obj_priv = obj->driver_private;
928
929 /* Bounds check destination.
930 *
931 * XXX: This could use review for overflow issues...
932 */
933 if (args->offset > obj->size || args->size > obj->size ||
934 args->offset + args->size > obj->size) {
935 drm_gem_object_unreference(obj);
936 return -EINVAL;
937 }
938
939 /* We can only do the GTT pwrite on untiled buffers, as otherwise
940 * it would end up going through the fenced access, and we'll get
941 * different detiling behavior between reading and writing.
942 * pread/pwrite currently are reading and writing from the CPU
943 * perspective, requiring manual detiling by the client.
944 */
71acb5eb
DA
945 if (obj_priv->phys_obj)
946 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
947 else if (obj_priv->tiling_mode == I915_TILING_NONE &&
3de09aa3
EA
948 dev->gtt_total != 0) {
949 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
950 if (ret == -EFAULT) {
951 ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
952 file_priv);
953 }
280b713b
EA
954 } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
955 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
40123c1f
EA
956 } else {
957 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
958 if (ret == -EFAULT) {
959 ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
960 file_priv);
961 }
962 }
673a394b
EA
963
964#if WATCH_PWRITE
965 if (ret)
966 DRM_INFO("pwrite failed %d\n", ret);
967#endif
968
969 drm_gem_object_unreference(obj);
970
971 return ret;
972}
973
974/**
2ef7eeaa
EA
975 * Called when user space prepares to use an object with the CPU, either
976 * through the mmap ioctl's mapping or a GTT mapping.
673a394b
EA
977 */
978int
979i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
980 struct drm_file *file_priv)
981{
982 struct drm_i915_gem_set_domain *args = data;
983 struct drm_gem_object *obj;
2ef7eeaa
EA
984 uint32_t read_domains = args->read_domains;
985 uint32_t write_domain = args->write_domain;
673a394b
EA
986 int ret;
987
988 if (!(dev->driver->driver_features & DRIVER_GEM))
989 return -ENODEV;
990
2ef7eeaa
EA
991 /* Only handle setting domains to types used by the CPU. */
992 if (write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
993 return -EINVAL;
994
995 if (read_domains & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
996 return -EINVAL;
997
998 /* Having something in the write domain implies it's in the read
999 * domain, and only that read domain. Enforce that in the request.
1000 */
1001 if (write_domain != 0 && read_domains != write_domain)
1002 return -EINVAL;
1003
673a394b
EA
1004 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1005 if (obj == NULL)
1006 return -EBADF;
1007
1008 mutex_lock(&dev->struct_mutex);
1009#if WATCH_BUF
1010 DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
2ef7eeaa 1011 obj, obj->size, read_domains, write_domain);
673a394b 1012#endif
2ef7eeaa
EA
1013 if (read_domains & I915_GEM_DOMAIN_GTT) {
1014 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
02354392
EA
1015
1016 /* Silently promote "you're not bound, there was nothing to do"
1017 * to success, since the client was just asking us to
1018 * make sure everything was done.
1019 */
1020 if (ret == -EINVAL)
1021 ret = 0;
2ef7eeaa 1022 } else {
e47c68e9 1023 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
2ef7eeaa
EA
1024 }
1025
673a394b
EA
1026 drm_gem_object_unreference(obj);
1027 mutex_unlock(&dev->struct_mutex);
1028 return ret;
1029}
1030
1031/**
1032 * Called when user space has done writes to this buffer
1033 */
1034int
1035i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1036 struct drm_file *file_priv)
1037{
1038 struct drm_i915_gem_sw_finish *args = data;
1039 struct drm_gem_object *obj;
1040 struct drm_i915_gem_object *obj_priv;
1041 int ret = 0;
1042
1043 if (!(dev->driver->driver_features & DRIVER_GEM))
1044 return -ENODEV;
1045
1046 mutex_lock(&dev->struct_mutex);
1047 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1048 if (obj == NULL) {
1049 mutex_unlock(&dev->struct_mutex);
1050 return -EBADF;
1051 }
1052
1053#if WATCH_BUF
1054 DRM_INFO("%s: sw_finish %d (%p %d)\n",
1055 __func__, args->handle, obj, obj->size);
1056#endif
1057 obj_priv = obj->driver_private;
1058
1059 /* Pinned buffers may be scanout, so flush the cache */
e47c68e9
EA
1060 if (obj_priv->pin_count)
1061 i915_gem_object_flush_cpu_write_domain(obj);
1062
673a394b
EA
1063 drm_gem_object_unreference(obj);
1064 mutex_unlock(&dev->struct_mutex);
1065 return ret;
1066}
1067
1068/**
1069 * Maps the contents of an object, returning the address it is mapped
1070 * into.
1071 *
1072 * While the mapping holds a reference on the contents of the object, it doesn't
1073 * imply a ref on the object itself.
1074 */
1075int
1076i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1077 struct drm_file *file_priv)
1078{
1079 struct drm_i915_gem_mmap *args = data;
1080 struct drm_gem_object *obj;
1081 loff_t offset;
1082 unsigned long addr;
1083
1084 if (!(dev->driver->driver_features & DRIVER_GEM))
1085 return -ENODEV;
1086
1087 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1088 if (obj == NULL)
1089 return -EBADF;
1090
1091 offset = args->offset;
1092
1093 down_write(&current->mm->mmap_sem);
1094 addr = do_mmap(obj->filp, 0, args->size,
1095 PROT_READ | PROT_WRITE, MAP_SHARED,
1096 args->offset);
1097 up_write(&current->mm->mmap_sem);
1098 mutex_lock(&dev->struct_mutex);
1099 drm_gem_object_unreference(obj);
1100 mutex_unlock(&dev->struct_mutex);
1101 if (IS_ERR((void *)addr))
1102 return addr;
1103
1104 args->addr_ptr = (uint64_t) addr;
1105
1106 return 0;
1107}
1108
de151cf6
JB
1109/**
1110 * i915_gem_fault - fault a page into the GTT
1111 * vma: VMA in question
1112 * vmf: fault info
1113 *
1114 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1115 * from userspace. The fault handler takes care of binding the object to
1116 * the GTT (if needed), allocating and programming a fence register (again,
1117 * only if needed based on whether the old reg is still valid or the object
1118 * is tiled) and inserting a new PTE into the faulting process.
1119 *
1120 * Note that the faulting process may involve evicting existing objects
1121 * from the GTT and/or fence registers to make room. So performance may
1122 * suffer if the GTT working set is large or there are few fence registers
1123 * left.
1124 */
1125int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1126{
1127 struct drm_gem_object *obj = vma->vm_private_data;
1128 struct drm_device *dev = obj->dev;
1129 struct drm_i915_private *dev_priv = dev->dev_private;
1130 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1131 pgoff_t page_offset;
1132 unsigned long pfn;
1133 int ret = 0;
0f973f27 1134 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
de151cf6
JB
1135
1136 /* We don't use vmf->pgoff since that has the fake offset */
1137 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1138 PAGE_SHIFT;
1139
1140 /* Now bind it into the GTT if needed */
1141 mutex_lock(&dev->struct_mutex);
1142 if (!obj_priv->gtt_space) {
1143 ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
1144 if (ret) {
1145 mutex_unlock(&dev->struct_mutex);
1146 return VM_FAULT_SIGBUS;
1147 }
1148 list_add(&obj_priv->list, &dev_priv->mm.inactive_list);
1149 }
1150
1151 /* Need a new fence register? */
1152 if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
d9ddcb96 1153 obj_priv->tiling_mode != I915_TILING_NONE) {
0f973f27 1154 ret = i915_gem_object_get_fence_reg(obj, write);
7d8d58b2
CW
1155 if (ret) {
1156 mutex_unlock(&dev->struct_mutex);
d9ddcb96 1157 return VM_FAULT_SIGBUS;
7d8d58b2 1158 }
d9ddcb96 1159 }
de151cf6
JB
1160
1161 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
1162 page_offset;
1163
1164 /* Finally, remap it using the new GTT offset */
1165 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1166
1167 mutex_unlock(&dev->struct_mutex);
1168
1169 switch (ret) {
1170 case -ENOMEM:
1171 case -EAGAIN:
1172 return VM_FAULT_OOM;
1173 case -EFAULT:
959b887c 1174 case -EINVAL:
de151cf6
JB
1175 return VM_FAULT_SIGBUS;
1176 default:
1177 return VM_FAULT_NOPAGE;
1178 }
1179}
1180
1181/**
1182 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1183 * @obj: obj in question
1184 *
1185 * GEM memory mapping works by handing back to userspace a fake mmap offset
1186 * it can use in a subsequent mmap(2) call. The DRM core code then looks
1187 * up the object based on the offset and sets up the various memory mapping
1188 * structures.
1189 *
1190 * This routine allocates and attaches a fake offset for @obj.
1191 */
1192static int
1193i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1194{
1195 struct drm_device *dev = obj->dev;
1196 struct drm_gem_mm *mm = dev->mm_private;
1197 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1198 struct drm_map_list *list;
f77d390c 1199 struct drm_local_map *map;
de151cf6
JB
1200 int ret = 0;
1201
1202 /* Set the object up for mmap'ing */
1203 list = &obj->map_list;
1204 list->map = drm_calloc(1, sizeof(struct drm_map_list),
1205 DRM_MEM_DRIVER);
1206 if (!list->map)
1207 return -ENOMEM;
1208
1209 map = list->map;
1210 map->type = _DRM_GEM;
1211 map->size = obj->size;
1212 map->handle = obj;
1213
1214 /* Get a DRM GEM mmap offset allocated... */
1215 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
1216 obj->size / PAGE_SIZE, 0, 0);
1217 if (!list->file_offset_node) {
1218 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
1219 ret = -ENOMEM;
1220 goto out_free_list;
1221 }
1222
1223 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
1224 obj->size / PAGE_SIZE, 0);
1225 if (!list->file_offset_node) {
1226 ret = -ENOMEM;
1227 goto out_free_list;
1228 }
1229
1230 list->hash.key = list->file_offset_node->start;
1231 if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
1232 DRM_ERROR("failed to add to map hash\n");
1233 goto out_free_mm;
1234 }
1235
1236 /* By now we should be all set, any drm_mmap request on the offset
1237 * below will get to our mmap & fault handler */
1238 obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
1239
1240 return 0;
1241
1242out_free_mm:
1243 drm_mm_put_block(list->file_offset_node);
1244out_free_list:
1245 drm_free(list->map, sizeof(struct drm_map_list), DRM_MEM_DRIVER);
1246
1247 return ret;
1248}
1249
ab00b3e5
JB
1250static void
1251i915_gem_free_mmap_offset(struct drm_gem_object *obj)
1252{
1253 struct drm_device *dev = obj->dev;
1254 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1255 struct drm_gem_mm *mm = dev->mm_private;
1256 struct drm_map_list *list;
1257
1258 list = &obj->map_list;
1259 drm_ht_remove_item(&mm->offset_hash, &list->hash);
1260
1261 if (list->file_offset_node) {
1262 drm_mm_put_block(list->file_offset_node);
1263 list->file_offset_node = NULL;
1264 }
1265
1266 if (list->map) {
1267 drm_free(list->map, sizeof(struct drm_map), DRM_MEM_DRIVER);
1268 list->map = NULL;
1269 }
1270
1271 obj_priv->mmap_offset = 0;
1272}
1273
de151cf6
JB
1274/**
1275 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1276 * @obj: object to check
1277 *
1278 * Return the required GTT alignment for an object, taking into account
1279 * potential fence register mapping if needed.
1280 */
1281static uint32_t
1282i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
1283{
1284 struct drm_device *dev = obj->dev;
1285 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1286 int start, i;
1287
1288 /*
1289 * Minimum alignment is 4k (GTT page size), but might be greater
1290 * if a fence register is needed for the object.
1291 */
1292 if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
1293 return 4096;
1294
1295 /*
1296 * Previous chips need to be aligned to the size of the smallest
1297 * fence register that can contain the object.
1298 */
1299 if (IS_I9XX(dev))
1300 start = 1024*1024;
1301 else
1302 start = 512*1024;
1303
1304 for (i = start; i < obj->size; i <<= 1)
1305 ;
1306
1307 return i;
1308}
1309
1310/**
1311 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1312 * @dev: DRM device
1313 * @data: GTT mapping ioctl data
1314 * @file_priv: GEM object info
1315 *
1316 * Simply returns the fake offset to userspace so it can mmap it.
1317 * The mmap call will end up in drm_gem_mmap(), which will set things
1318 * up so we can get faults in the handler above.
1319 *
1320 * The fault handler will take care of binding the object into the GTT
1321 * (since it may have been evicted to make room for something), allocating
1322 * a fence register, and mapping the appropriate aperture address into
1323 * userspace.
1324 */
1325int
1326i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1327 struct drm_file *file_priv)
1328{
1329 struct drm_i915_gem_mmap_gtt *args = data;
1330 struct drm_i915_private *dev_priv = dev->dev_private;
1331 struct drm_gem_object *obj;
1332 struct drm_i915_gem_object *obj_priv;
1333 int ret;
1334
1335 if (!(dev->driver->driver_features & DRIVER_GEM))
1336 return -ENODEV;
1337
1338 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1339 if (obj == NULL)
1340 return -EBADF;
1341
1342 mutex_lock(&dev->struct_mutex);
1343
1344 obj_priv = obj->driver_private;
1345
1346 if (!obj_priv->mmap_offset) {
1347 ret = i915_gem_create_mmap_offset(obj);
13af1062
CW
1348 if (ret) {
1349 drm_gem_object_unreference(obj);
1350 mutex_unlock(&dev->struct_mutex);
de151cf6 1351 return ret;
13af1062 1352 }
de151cf6
JB
1353 }
1354
1355 args->offset = obj_priv->mmap_offset;
1356
1357 obj_priv->gtt_alignment = i915_gem_get_gtt_alignment(obj);
1358
1359 /* Make sure the alignment is correct for fence regs etc */
1360 if (obj_priv->agp_mem &&
1361 (obj_priv->gtt_offset & (obj_priv->gtt_alignment - 1))) {
1362 drm_gem_object_unreference(obj);
1363 mutex_unlock(&dev->struct_mutex);
1364 return -EINVAL;
1365 }
1366
1367 /*
1368 * Pull it into the GTT so that we have a page list (makes the
1369 * initial fault faster and any subsequent flushing possible).
1370 */
1371 if (!obj_priv->agp_mem) {
1372 ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
1373 if (ret) {
1374 drm_gem_object_unreference(obj);
1375 mutex_unlock(&dev->struct_mutex);
1376 return ret;
1377 }
1378 list_add(&obj_priv->list, &dev_priv->mm.inactive_list);
1379 }
1380
1381 drm_gem_object_unreference(obj);
1382 mutex_unlock(&dev->struct_mutex);
1383
1384 return 0;
1385}
1386
6911a9b8 1387void
856fa198 1388i915_gem_object_put_pages(struct drm_gem_object *obj)
673a394b
EA
1389{
1390 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1391 int page_count = obj->size / PAGE_SIZE;
1392 int i;
1393
856fa198 1394 BUG_ON(obj_priv->pages_refcount == 0);
673a394b 1395
856fa198
EA
1396 if (--obj_priv->pages_refcount != 0)
1397 return;
673a394b 1398
280b713b
EA
1399 if (obj_priv->tiling_mode != I915_TILING_NONE)
1400 i915_gem_object_save_bit_17_swizzle(obj);
1401
673a394b 1402 for (i = 0; i < page_count; i++)
856fa198 1403 if (obj_priv->pages[i] != NULL) {
673a394b 1404 if (obj_priv->dirty)
856fa198
EA
1405 set_page_dirty(obj_priv->pages[i]);
1406 mark_page_accessed(obj_priv->pages[i]);
1407 page_cache_release(obj_priv->pages[i]);
673a394b
EA
1408 }
1409 obj_priv->dirty = 0;
1410
856fa198 1411 drm_free(obj_priv->pages,
673a394b
EA
1412 page_count * sizeof(struct page *),
1413 DRM_MEM_DRIVER);
856fa198 1414 obj_priv->pages = NULL;
673a394b
EA
1415}
1416
1417static void
ce44b0ea 1418i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
673a394b
EA
1419{
1420 struct drm_device *dev = obj->dev;
1421 drm_i915_private_t *dev_priv = dev->dev_private;
1422 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1423
1424 /* Add a reference if we're newly entering the active list. */
1425 if (!obj_priv->active) {
1426 drm_gem_object_reference(obj);
1427 obj_priv->active = 1;
1428 }
1429 /* Move from whatever list we were on to the tail of execution. */
5e118f41 1430 spin_lock(&dev_priv->mm.active_list_lock);
673a394b
EA
1431 list_move_tail(&obj_priv->list,
1432 &dev_priv->mm.active_list);
5e118f41 1433 spin_unlock(&dev_priv->mm.active_list_lock);
ce44b0ea 1434 obj_priv->last_rendering_seqno = seqno;
673a394b
EA
1435}
1436
ce44b0ea
EA
1437static void
1438i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1439{
1440 struct drm_device *dev = obj->dev;
1441 drm_i915_private_t *dev_priv = dev->dev_private;
1442 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1443
1444 BUG_ON(!obj_priv->active);
1445 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
1446 obj_priv->last_rendering_seqno = 0;
1447}
673a394b
EA
1448
1449static void
1450i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1451{
1452 struct drm_device *dev = obj->dev;
1453 drm_i915_private_t *dev_priv = dev->dev_private;
1454 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1455
1456 i915_verify_inactive(dev, __FILE__, __LINE__);
1457 if (obj_priv->pin_count != 0)
1458 list_del_init(&obj_priv->list);
1459 else
1460 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1461
ce44b0ea 1462 obj_priv->last_rendering_seqno = 0;
673a394b
EA
1463 if (obj_priv->active) {
1464 obj_priv->active = 0;
1465 drm_gem_object_unreference(obj);
1466 }
1467 i915_verify_inactive(dev, __FILE__, __LINE__);
1468}
1469
1470/**
1471 * Creates a new sequence number, emitting a write of it to the status page
1472 * plus an interrupt, which will trigger i915_user_interrupt_handler.
1473 *
1474 * Must be called with struct_lock held.
1475 *
1476 * Returned sequence numbers are nonzero on success.
1477 */
1478static uint32_t
1479i915_add_request(struct drm_device *dev, uint32_t flush_domains)
1480{
1481 drm_i915_private_t *dev_priv = dev->dev_private;
1482 struct drm_i915_gem_request *request;
1483 uint32_t seqno;
1484 int was_empty;
1485 RING_LOCALS;
1486
1487 request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
1488 if (request == NULL)
1489 return 0;
1490
1491 /* Grab the seqno we're going to make this request be, and bump the
1492 * next (skipping 0 so it can be the reserved no-seqno value).
1493 */
1494 seqno = dev_priv->mm.next_gem_seqno;
1495 dev_priv->mm.next_gem_seqno++;
1496 if (dev_priv->mm.next_gem_seqno == 0)
1497 dev_priv->mm.next_gem_seqno++;
1498
1499 BEGIN_LP_RING(4);
1500 OUT_RING(MI_STORE_DWORD_INDEX);
1501 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1502 OUT_RING(seqno);
1503
1504 OUT_RING(MI_USER_INTERRUPT);
1505 ADVANCE_LP_RING();
1506
1507 DRM_DEBUG("%d\n", seqno);
1508
1509 request->seqno = seqno;
1510 request->emitted_jiffies = jiffies;
673a394b
EA
1511 was_empty = list_empty(&dev_priv->mm.request_list);
1512 list_add_tail(&request->list, &dev_priv->mm.request_list);
1513
ce44b0ea
EA
1514 /* Associate any objects on the flushing list matching the write
1515 * domain we're flushing with our flush.
1516 */
1517 if (flush_domains != 0) {
1518 struct drm_i915_gem_object *obj_priv, *next;
1519
1520 list_for_each_entry_safe(obj_priv, next,
1521 &dev_priv->mm.flushing_list, list) {
1522 struct drm_gem_object *obj = obj_priv->obj;
1523
1524 if ((obj->write_domain & flush_domains) ==
1525 obj->write_domain) {
1526 obj->write_domain = 0;
1527 i915_gem_object_move_to_active(obj, seqno);
1528 }
1529 }
1530
1531 }
1532
6dbe2772 1533 if (was_empty && !dev_priv->mm.suspended)
673a394b
EA
1534 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
1535 return seqno;
1536}
1537
1538/**
1539 * Command execution barrier
1540 *
1541 * Ensures that all commands in the ring are finished
1542 * before signalling the CPU
1543 */
3043c60c 1544static uint32_t
673a394b
EA
1545i915_retire_commands(struct drm_device *dev)
1546{
1547 drm_i915_private_t *dev_priv = dev->dev_private;
1548 uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1549 uint32_t flush_domains = 0;
1550 RING_LOCALS;
1551
1552 /* The sampler always gets flushed on i965 (sigh) */
1553 if (IS_I965G(dev))
1554 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
1555 BEGIN_LP_RING(2);
1556 OUT_RING(cmd);
1557 OUT_RING(0); /* noop */
1558 ADVANCE_LP_RING();
1559 return flush_domains;
1560}
1561
1562/**
1563 * Moves buffers associated only with the given active seqno from the active
1564 * to inactive list, potentially freeing them.
1565 */
1566static void
1567i915_gem_retire_request(struct drm_device *dev,
1568 struct drm_i915_gem_request *request)
1569{
1570 drm_i915_private_t *dev_priv = dev->dev_private;
1571
1572 /* Move any buffers on the active list that are no longer referenced
1573 * by the ringbuffer to the flushing/inactive lists as appropriate.
1574 */
5e118f41 1575 spin_lock(&dev_priv->mm.active_list_lock);
673a394b
EA
1576 while (!list_empty(&dev_priv->mm.active_list)) {
1577 struct drm_gem_object *obj;
1578 struct drm_i915_gem_object *obj_priv;
1579
1580 obj_priv = list_first_entry(&dev_priv->mm.active_list,
1581 struct drm_i915_gem_object,
1582 list);
1583 obj = obj_priv->obj;
1584
1585 /* If the seqno being retired doesn't match the oldest in the
1586 * list, then the oldest in the list must still be newer than
1587 * this seqno.
1588 */
1589 if (obj_priv->last_rendering_seqno != request->seqno)
5e118f41 1590 goto out;
de151cf6 1591
673a394b
EA
1592#if WATCH_LRU
1593 DRM_INFO("%s: retire %d moves to inactive list %p\n",
1594 __func__, request->seqno, obj);
1595#endif
1596
ce44b0ea
EA
1597 if (obj->write_domain != 0)
1598 i915_gem_object_move_to_flushing(obj);
68c84342
SL
1599 else {
1600 /* Take a reference on the object so it won't be
1601 * freed while the spinlock is held. The list
1602 * protection for this spinlock is safe when breaking
1603 * the lock like this since the next thing we do
1604 * is just get the head of the list again.
1605 */
1606 drm_gem_object_reference(obj);
673a394b 1607 i915_gem_object_move_to_inactive(obj);
68c84342
SL
1608 spin_unlock(&dev_priv->mm.active_list_lock);
1609 drm_gem_object_unreference(obj);
1610 spin_lock(&dev_priv->mm.active_list_lock);
1611 }
673a394b 1612 }
5e118f41
CW
1613out:
1614 spin_unlock(&dev_priv->mm.active_list_lock);
673a394b
EA
1615}
1616
1617/**
1618 * Returns true if seq1 is later than seq2.
1619 */
1620static int
1621i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1622{
1623 return (int32_t)(seq1 - seq2) >= 0;
1624}
1625
1626uint32_t
1627i915_get_gem_seqno(struct drm_device *dev)
1628{
1629 drm_i915_private_t *dev_priv = dev->dev_private;
1630
1631 return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
1632}
1633
1634/**
1635 * This function clears the request list as sequence numbers are passed.
1636 */
1637void
1638i915_gem_retire_requests(struct drm_device *dev)
1639{
1640 drm_i915_private_t *dev_priv = dev->dev_private;
1641 uint32_t seqno;
1642
6c0594a3
KW
1643 if (!dev_priv->hw_status_page)
1644 return;
1645
673a394b
EA
1646 seqno = i915_get_gem_seqno(dev);
1647
1648 while (!list_empty(&dev_priv->mm.request_list)) {
1649 struct drm_i915_gem_request *request;
1650 uint32_t retiring_seqno;
1651
1652 request = list_first_entry(&dev_priv->mm.request_list,
1653 struct drm_i915_gem_request,
1654 list);
1655 retiring_seqno = request->seqno;
1656
1657 if (i915_seqno_passed(seqno, retiring_seqno) ||
1658 dev_priv->mm.wedged) {
1659 i915_gem_retire_request(dev, request);
1660
1661 list_del(&request->list);
1662 drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
1663 } else
1664 break;
1665 }
1666}
1667
1668void
1669i915_gem_retire_work_handler(struct work_struct *work)
1670{
1671 drm_i915_private_t *dev_priv;
1672 struct drm_device *dev;
1673
1674 dev_priv = container_of(work, drm_i915_private_t,
1675 mm.retire_work.work);
1676 dev = dev_priv->dev;
1677
1678 mutex_lock(&dev->struct_mutex);
1679 i915_gem_retire_requests(dev);
6dbe2772
KP
1680 if (!dev_priv->mm.suspended &&
1681 !list_empty(&dev_priv->mm.request_list))
673a394b
EA
1682 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
1683 mutex_unlock(&dev->struct_mutex);
1684}
1685
1686/**
1687 * Waits for a sequence number to be signaled, and cleans up the
1688 * request and object lists appropriately for that event.
1689 */
3043c60c 1690static int
673a394b
EA
1691i915_wait_request(struct drm_device *dev, uint32_t seqno)
1692{
1693 drm_i915_private_t *dev_priv = dev->dev_private;
1694 int ret = 0;
1695
1696 BUG_ON(seqno == 0);
1697
1698 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
1699 dev_priv->mm.waiting_gem_seqno = seqno;
1700 i915_user_irq_get(dev);
1701 ret = wait_event_interruptible(dev_priv->irq_queue,
1702 i915_seqno_passed(i915_get_gem_seqno(dev),
1703 seqno) ||
1704 dev_priv->mm.wedged);
1705 i915_user_irq_put(dev);
1706 dev_priv->mm.waiting_gem_seqno = 0;
1707 }
1708 if (dev_priv->mm.wedged)
1709 ret = -EIO;
1710
1711 if (ret && ret != -ERESTARTSYS)
1712 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
1713 __func__, ret, seqno, i915_get_gem_seqno(dev));
1714
1715 /* Directly dispatch request retiring. While we have the work queue
1716 * to handle this, the waiter on a request often wants an associated
1717 * buffer to have made it to the inactive list, and we would need
1718 * a separate wait queue to handle that.
1719 */
1720 if (ret == 0)
1721 i915_gem_retire_requests(dev);
1722
1723 return ret;
1724}
1725
1726static void
1727i915_gem_flush(struct drm_device *dev,
1728 uint32_t invalidate_domains,
1729 uint32_t flush_domains)
1730{
1731 drm_i915_private_t *dev_priv = dev->dev_private;
1732 uint32_t cmd;
1733 RING_LOCALS;
1734
1735#if WATCH_EXEC
1736 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
1737 invalidate_domains, flush_domains);
1738#endif
1739
1740 if (flush_domains & I915_GEM_DOMAIN_CPU)
1741 drm_agp_chipset_flush(dev);
1742
1743 if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU |
1744 I915_GEM_DOMAIN_GTT)) {
1745 /*
1746 * read/write caches:
1747 *
1748 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
1749 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
1750 * also flushed at 2d versus 3d pipeline switches.
1751 *
1752 * read-only caches:
1753 *
1754 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
1755 * MI_READ_FLUSH is set, and is always flushed on 965.
1756 *
1757 * I915_GEM_DOMAIN_COMMAND may not exist?
1758 *
1759 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
1760 * invalidated when MI_EXE_FLUSH is set.
1761 *
1762 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
1763 * invalidated with every MI_FLUSH.
1764 *
1765 * TLBs:
1766 *
1767 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
1768 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
1769 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
1770 * are flushed at any MI_FLUSH.
1771 */
1772
1773 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1774 if ((invalidate_domains|flush_domains) &
1775 I915_GEM_DOMAIN_RENDER)
1776 cmd &= ~MI_NO_WRITE_FLUSH;
1777 if (!IS_I965G(dev)) {
1778 /*
1779 * On the 965, the sampler cache always gets flushed
1780 * and this bit is reserved.
1781 */
1782 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
1783 cmd |= MI_READ_FLUSH;
1784 }
1785 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
1786 cmd |= MI_EXE_FLUSH;
1787
1788#if WATCH_EXEC
1789 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
1790#endif
1791 BEGIN_LP_RING(2);
1792 OUT_RING(cmd);
1793 OUT_RING(0); /* noop */
1794 ADVANCE_LP_RING();
1795 }
1796}
1797
1798/**
1799 * Ensures that all rendering to the object has completed and the object is
1800 * safe to unbind from the GTT or access from the CPU.
1801 */
1802static int
1803i915_gem_object_wait_rendering(struct drm_gem_object *obj)
1804{
1805 struct drm_device *dev = obj->dev;
1806 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1807 int ret;
1808
e47c68e9
EA
1809 /* This function only exists to support waiting for existing rendering,
1810 * not for emitting required flushes.
673a394b 1811 */
e47c68e9 1812 BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
673a394b
EA
1813
1814 /* If there is rendering queued on the buffer being evicted, wait for
1815 * it.
1816 */
1817 if (obj_priv->active) {
1818#if WATCH_BUF
1819 DRM_INFO("%s: object %p wait for seqno %08x\n",
1820 __func__, obj, obj_priv->last_rendering_seqno);
1821#endif
1822 ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
1823 if (ret != 0)
1824 return ret;
1825 }
1826
1827 return 0;
1828}
1829
1830/**
1831 * Unbinds an object from the GTT aperture.
1832 */
0f973f27 1833int
673a394b
EA
1834i915_gem_object_unbind(struct drm_gem_object *obj)
1835{
1836 struct drm_device *dev = obj->dev;
1837 struct drm_i915_gem_object *obj_priv = obj->driver_private;
de151cf6 1838 loff_t offset;
673a394b
EA
1839 int ret = 0;
1840
1841#if WATCH_BUF
1842 DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
1843 DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
1844#endif
1845 if (obj_priv->gtt_space == NULL)
1846 return 0;
1847
1848 if (obj_priv->pin_count != 0) {
1849 DRM_ERROR("Attempting to unbind pinned buffer\n");
1850 return -EINVAL;
1851 }
1852
673a394b
EA
1853 /* Move the object to the CPU domain to ensure that
1854 * any possible CPU writes while it's not in the GTT
1855 * are flushed when we go to remap it. This will
1856 * also ensure that all pending GPU writes are finished
1857 * before we unbind.
1858 */
e47c68e9 1859 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
673a394b 1860 if (ret) {
e47c68e9
EA
1861 if (ret != -ERESTARTSYS)
1862 DRM_ERROR("set_domain failed: %d\n", ret);
673a394b
EA
1863 return ret;
1864 }
1865
1866 if (obj_priv->agp_mem != NULL) {
1867 drm_unbind_agp(obj_priv->agp_mem);
1868 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
1869 obj_priv->agp_mem = NULL;
1870 }
1871
1872 BUG_ON(obj_priv->active);
1873
de151cf6
JB
1874 /* blow away mappings if mapped through GTT */
1875 offset = ((loff_t) obj->map_list.hash.key) << PAGE_SHIFT;
79e53945
JB
1876 if (dev->dev_mapping)
1877 unmap_mapping_range(dev->dev_mapping, offset, obj->size, 1);
de151cf6
JB
1878
1879 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
1880 i915_gem_clear_fence_reg(obj);
1881
856fa198 1882 i915_gem_object_put_pages(obj);
673a394b
EA
1883
1884 if (obj_priv->gtt_space) {
1885 atomic_dec(&dev->gtt_count);
1886 atomic_sub(obj->size, &dev->gtt_memory);
1887
1888 drm_mm_put_block(obj_priv->gtt_space);
1889 obj_priv->gtt_space = NULL;
1890 }
1891
1892 /* Remove ourselves from the LRU list if present. */
1893 if (!list_empty(&obj_priv->list))
1894 list_del_init(&obj_priv->list);
1895
1896 return 0;
1897}
1898
1899static int
1900i915_gem_evict_something(struct drm_device *dev)
1901{
1902 drm_i915_private_t *dev_priv = dev->dev_private;
1903 struct drm_gem_object *obj;
1904 struct drm_i915_gem_object *obj_priv;
1905 int ret = 0;
1906
1907 for (;;) {
1908 /* If there's an inactive buffer available now, grab it
1909 * and be done.
1910 */
1911 if (!list_empty(&dev_priv->mm.inactive_list)) {
1912 obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
1913 struct drm_i915_gem_object,
1914 list);
1915 obj = obj_priv->obj;
1916 BUG_ON(obj_priv->pin_count != 0);
1917#if WATCH_LRU
1918 DRM_INFO("%s: evicting %p\n", __func__, obj);
1919#endif
1920 BUG_ON(obj_priv->active);
1921
1922 /* Wait on the rendering and unbind the buffer. */
1923 ret = i915_gem_object_unbind(obj);
1924 break;
1925 }
1926
1927 /* If we didn't get anything, but the ring is still processing
1928 * things, wait for one of those things to finish and hopefully
1929 * leave us a buffer to evict.
1930 */
1931 if (!list_empty(&dev_priv->mm.request_list)) {
1932 struct drm_i915_gem_request *request;
1933
1934 request = list_first_entry(&dev_priv->mm.request_list,
1935 struct drm_i915_gem_request,
1936 list);
1937
1938 ret = i915_wait_request(dev, request->seqno);
1939 if (ret)
1940 break;
1941
1942 /* if waiting caused an object to become inactive,
1943 * then loop around and wait for it. Otherwise, we
1944 * assume that waiting freed and unbound something,
1945 * so there should now be some space in the GTT
1946 */
1947 if (!list_empty(&dev_priv->mm.inactive_list))
1948 continue;
1949 break;
1950 }
1951
1952 /* If we didn't have anything on the request list but there
1953 * are buffers awaiting a flush, emit one and try again.
1954 * When we wait on it, those buffers waiting for that flush
1955 * will get moved to inactive.
1956 */
1957 if (!list_empty(&dev_priv->mm.flushing_list)) {
1958 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
1959 struct drm_i915_gem_object,
1960 list);
1961 obj = obj_priv->obj;
1962
1963 i915_gem_flush(dev,
1964 obj->write_domain,
1965 obj->write_domain);
1966 i915_add_request(dev, obj->write_domain);
1967
1968 obj = NULL;
1969 continue;
1970 }
1971
1972 DRM_ERROR("inactive empty %d request empty %d "
1973 "flushing empty %d\n",
1974 list_empty(&dev_priv->mm.inactive_list),
1975 list_empty(&dev_priv->mm.request_list),
1976 list_empty(&dev_priv->mm.flushing_list));
1977 /* If we didn't do any of the above, there's nothing to be done
1978 * and we just can't fit it in.
1979 */
1980 return -ENOMEM;
1981 }
1982 return ret;
1983}
1984
ac94a962
KP
1985static int
1986i915_gem_evict_everything(struct drm_device *dev)
1987{
1988 int ret;
1989
1990 for (;;) {
1991 ret = i915_gem_evict_something(dev);
1992 if (ret != 0)
1993 break;
1994 }
15c35334
OA
1995 if (ret == -ENOMEM)
1996 return 0;
ac94a962
KP
1997 return ret;
1998}
1999
6911a9b8 2000int
856fa198 2001i915_gem_object_get_pages(struct drm_gem_object *obj)
673a394b
EA
2002{
2003 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2004 int page_count, i;
2005 struct address_space *mapping;
2006 struct inode *inode;
2007 struct page *page;
2008 int ret;
2009
856fa198 2010 if (obj_priv->pages_refcount++ != 0)
673a394b
EA
2011 return 0;
2012
2013 /* Get the list of pages out of our struct file. They'll be pinned
2014 * at this point until we release them.
2015 */
2016 page_count = obj->size / PAGE_SIZE;
856fa198
EA
2017 BUG_ON(obj_priv->pages != NULL);
2018 obj_priv->pages = drm_calloc(page_count, sizeof(struct page *),
2019 DRM_MEM_DRIVER);
2020 if (obj_priv->pages == NULL) {
673a394b 2021 DRM_ERROR("Faled to allocate page list\n");
856fa198 2022 obj_priv->pages_refcount--;
673a394b
EA
2023 return -ENOMEM;
2024 }
2025
2026 inode = obj->filp->f_path.dentry->d_inode;
2027 mapping = inode->i_mapping;
2028 for (i = 0; i < page_count; i++) {
2029 page = read_mapping_page(mapping, i, NULL);
2030 if (IS_ERR(page)) {
2031 ret = PTR_ERR(page);
2032 DRM_ERROR("read_mapping_page failed: %d\n", ret);
856fa198 2033 i915_gem_object_put_pages(obj);
673a394b
EA
2034 return ret;
2035 }
856fa198 2036 obj_priv->pages[i] = page;
673a394b 2037 }
280b713b
EA
2038
2039 if (obj_priv->tiling_mode != I915_TILING_NONE)
2040 i915_gem_object_do_bit_17_swizzle(obj);
2041
673a394b
EA
2042 return 0;
2043}
2044
de151cf6
JB
2045static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
2046{
2047 struct drm_gem_object *obj = reg->obj;
2048 struct drm_device *dev = obj->dev;
2049 drm_i915_private_t *dev_priv = dev->dev_private;
2050 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2051 int regnum = obj_priv->fence_reg;
2052 uint64_t val;
2053
2054 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2055 0xfffff000) << 32;
2056 val |= obj_priv->gtt_offset & 0xfffff000;
2057 val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2058 if (obj_priv->tiling_mode == I915_TILING_Y)
2059 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2060 val |= I965_FENCE_REG_VALID;
2061
2062 I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
2063}
2064
2065static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
2066{
2067 struct drm_gem_object *obj = reg->obj;
2068 struct drm_device *dev = obj->dev;
2069 drm_i915_private_t *dev_priv = dev->dev_private;
2070 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2071 int regnum = obj_priv->fence_reg;
0f973f27 2072 int tile_width;
dc529a4f 2073 uint32_t fence_reg, val;
de151cf6
JB
2074 uint32_t pitch_val;
2075
2076 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
2077 (obj_priv->gtt_offset & (obj->size - 1))) {
f06da264 2078 WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
0f973f27 2079 __func__, obj_priv->gtt_offset, obj->size);
de151cf6
JB
2080 return;
2081 }
2082
0f973f27
JB
2083 if (obj_priv->tiling_mode == I915_TILING_Y &&
2084 HAS_128_BYTE_Y_TILING(dev))
2085 tile_width = 128;
de151cf6 2086 else
0f973f27
JB
2087 tile_width = 512;
2088
2089 /* Note: pitch better be a power of two tile widths */
2090 pitch_val = obj_priv->stride / tile_width;
2091 pitch_val = ffs(pitch_val) - 1;
de151cf6
JB
2092
2093 val = obj_priv->gtt_offset;
2094 if (obj_priv->tiling_mode == I915_TILING_Y)
2095 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2096 val |= I915_FENCE_SIZE_BITS(obj->size);
2097 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2098 val |= I830_FENCE_REG_VALID;
2099
dc529a4f
EA
2100 if (regnum < 8)
2101 fence_reg = FENCE_REG_830_0 + (regnum * 4);
2102 else
2103 fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
2104 I915_WRITE(fence_reg, val);
de151cf6
JB
2105}
2106
2107static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2108{
2109 struct drm_gem_object *obj = reg->obj;
2110 struct drm_device *dev = obj->dev;
2111 drm_i915_private_t *dev_priv = dev->dev_private;
2112 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2113 int regnum = obj_priv->fence_reg;
2114 uint32_t val;
2115 uint32_t pitch_val;
8d7773a3 2116 uint32_t fence_size_bits;
de151cf6 2117
8d7773a3 2118 if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
de151cf6 2119 (obj_priv->gtt_offset & (obj->size - 1))) {
8d7773a3 2120 WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
0f973f27 2121 __func__, obj_priv->gtt_offset);
de151cf6
JB
2122 return;
2123 }
2124
2125 pitch_val = (obj_priv->stride / 128) - 1;
8d7773a3 2126 WARN_ON(pitch_val & ~0x0000000f);
de151cf6
JB
2127 val = obj_priv->gtt_offset;
2128 if (obj_priv->tiling_mode == I915_TILING_Y)
2129 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
8d7773a3
DV
2130 fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
2131 WARN_ON(fence_size_bits & ~0x00000f00);
2132 val |= fence_size_bits;
de151cf6
JB
2133 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2134 val |= I830_FENCE_REG_VALID;
2135
2136 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
2137
2138}
2139
2140/**
2141 * i915_gem_object_get_fence_reg - set up a fence reg for an object
2142 * @obj: object to map through a fence reg
0f973f27 2143 * @write: object is about to be written
de151cf6
JB
2144 *
2145 * When mapping objects through the GTT, userspace wants to be able to write
2146 * to them without having to worry about swizzling if the object is tiled.
2147 *
2148 * This function walks the fence regs looking for a free one for @obj,
2149 * stealing one if it can't find any.
2150 *
2151 * It then sets up the reg based on the object's properties: address, pitch
2152 * and tiling format.
2153 */
d9ddcb96 2154static int
0f973f27 2155i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write)
de151cf6
JB
2156{
2157 struct drm_device *dev = obj->dev;
79e53945 2158 struct drm_i915_private *dev_priv = dev->dev_private;
de151cf6
JB
2159 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2160 struct drm_i915_fence_reg *reg = NULL;
fc7170ba
CW
2161 struct drm_i915_gem_object *old_obj_priv = NULL;
2162 int i, ret, avail;
de151cf6
JB
2163
2164 switch (obj_priv->tiling_mode) {
2165 case I915_TILING_NONE:
2166 WARN(1, "allocating a fence for non-tiled object?\n");
2167 break;
2168 case I915_TILING_X:
0f973f27
JB
2169 if (!obj_priv->stride)
2170 return -EINVAL;
2171 WARN((obj_priv->stride & (512 - 1)),
2172 "object 0x%08x is X tiled but has non-512B pitch\n",
2173 obj_priv->gtt_offset);
de151cf6
JB
2174 break;
2175 case I915_TILING_Y:
0f973f27
JB
2176 if (!obj_priv->stride)
2177 return -EINVAL;
2178 WARN((obj_priv->stride & (128 - 1)),
2179 "object 0x%08x is Y tiled but has non-128B pitch\n",
2180 obj_priv->gtt_offset);
de151cf6
JB
2181 break;
2182 }
2183
2184 /* First try to find a free reg */
9b2412f9 2185try_again:
fc7170ba 2186 avail = 0;
de151cf6
JB
2187 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2188 reg = &dev_priv->fence_regs[i];
2189 if (!reg->obj)
2190 break;
fc7170ba
CW
2191
2192 old_obj_priv = reg->obj->driver_private;
2193 if (!old_obj_priv->pin_count)
2194 avail++;
de151cf6
JB
2195 }
2196
2197 /* None available, try to steal one or wait for a user to finish */
2198 if (i == dev_priv->num_fence_regs) {
d7619c4b 2199 uint32_t seqno = dev_priv->mm.next_gem_seqno;
de151cf6
JB
2200 loff_t offset;
2201
fc7170ba
CW
2202 if (avail == 0)
2203 return -ENOMEM;
2204
de151cf6
JB
2205 for (i = dev_priv->fence_reg_start;
2206 i < dev_priv->num_fence_regs; i++) {
d7619c4b
CW
2207 uint32_t this_seqno;
2208
de151cf6
JB
2209 reg = &dev_priv->fence_regs[i];
2210 old_obj_priv = reg->obj->driver_private;
d7619c4b
CW
2211
2212 if (old_obj_priv->pin_count)
2213 continue;
2214
2215 /* i915 uses fences for GPU access to tiled buffers */
2216 if (IS_I965G(dev) || !old_obj_priv->active)
de151cf6 2217 break;
d7619c4b
CW
2218
2219 /* find the seqno of the first available fence */
2220 this_seqno = old_obj_priv->last_rendering_seqno;
2221 if (this_seqno != 0 &&
2222 reg->obj->write_domain == 0 &&
2223 i915_seqno_passed(seqno, this_seqno))
2224 seqno = this_seqno;
de151cf6
JB
2225 }
2226
2227 /*
2228 * Now things get ugly... we have to wait for one of the
2229 * objects to finish before trying again.
2230 */
2231 if (i == dev_priv->num_fence_regs) {
d7619c4b
CW
2232 if (seqno == dev_priv->mm.next_gem_seqno) {
2233 i915_gem_flush(dev,
2234 I915_GEM_GPU_DOMAINS,
2235 I915_GEM_GPU_DOMAINS);
2236 seqno = i915_add_request(dev,
2237 I915_GEM_GPU_DOMAINS);
2238 if (seqno == 0)
2239 return -ENOMEM;
de151cf6 2240 }
d7619c4b
CW
2241
2242 ret = i915_wait_request(dev, seqno);
2243 if (ret)
2244 return ret;
de151cf6
JB
2245 goto try_again;
2246 }
2247
d7619c4b
CW
2248 BUG_ON(old_obj_priv->active ||
2249 (reg->obj->write_domain & I915_GEM_GPU_DOMAINS));
2250
de151cf6
JB
2251 /*
2252 * Zap this virtual mapping so we can set up a fence again
2253 * for this object next time we need it.
2254 */
2255 offset = ((loff_t) reg->obj->map_list.hash.key) << PAGE_SHIFT;
79e53945
JB
2256 if (dev->dev_mapping)
2257 unmap_mapping_range(dev->dev_mapping, offset,
2258 reg->obj->size, 1);
de151cf6
JB
2259 old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
2260 }
2261
2262 obj_priv->fence_reg = i;
2263 reg->obj = obj;
2264
2265 if (IS_I965G(dev))
2266 i965_write_fence_reg(reg);
2267 else if (IS_I9XX(dev))
2268 i915_write_fence_reg(reg);
2269 else
2270 i830_write_fence_reg(reg);
d9ddcb96
EA
2271
2272 return 0;
de151cf6
JB
2273}
2274
2275/**
2276 * i915_gem_clear_fence_reg - clear out fence register info
2277 * @obj: object to clear
2278 *
2279 * Zeroes out the fence register itself and clears out the associated
2280 * data structures in dev_priv and obj_priv.
2281 */
2282static void
2283i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2284{
2285 struct drm_device *dev = obj->dev;
79e53945 2286 drm_i915_private_t *dev_priv = dev->dev_private;
de151cf6
JB
2287 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2288
2289 if (IS_I965G(dev))
2290 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
dc529a4f
EA
2291 else {
2292 uint32_t fence_reg;
2293
2294 if (obj_priv->fence_reg < 8)
2295 fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
2296 else
2297 fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg -
2298 8) * 4;
2299
2300 I915_WRITE(fence_reg, 0);
2301 }
de151cf6
JB
2302
2303 dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
2304 obj_priv->fence_reg = I915_FENCE_REG_NONE;
2305}
2306
673a394b
EA
2307/**
2308 * Finds free space in the GTT aperture and binds the object there.
2309 */
2310static int
2311i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2312{
2313 struct drm_device *dev = obj->dev;
2314 drm_i915_private_t *dev_priv = dev->dev_private;
2315 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2316 struct drm_mm_node *free_space;
2317 int page_count, ret;
2318
9bb2d6f9
EA
2319 if (dev_priv->mm.suspended)
2320 return -EBUSY;
673a394b 2321 if (alignment == 0)
0f973f27 2322 alignment = i915_gem_get_gtt_alignment(obj);
8d7773a3 2323 if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
673a394b
EA
2324 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2325 return -EINVAL;
2326 }
2327
2328 search_free:
2329 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2330 obj->size, alignment, 0);
2331 if (free_space != NULL) {
2332 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
2333 alignment);
2334 if (obj_priv->gtt_space != NULL) {
2335 obj_priv->gtt_space->private = obj;
2336 obj_priv->gtt_offset = obj_priv->gtt_space->start;
2337 }
2338 }
2339 if (obj_priv->gtt_space == NULL) {
5e118f41
CW
2340 bool lists_empty;
2341
673a394b
EA
2342 /* If the gtt is empty and we're still having trouble
2343 * fitting our object in, we're out of memory.
2344 */
2345#if WATCH_LRU
2346 DRM_INFO("%s: GTT full, evicting something\n", __func__);
2347#endif
5e118f41
CW
2348 spin_lock(&dev_priv->mm.active_list_lock);
2349 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2350 list_empty(&dev_priv->mm.flushing_list) &&
2351 list_empty(&dev_priv->mm.active_list));
2352 spin_unlock(&dev_priv->mm.active_list_lock);
2353 if (lists_empty) {
673a394b
EA
2354 DRM_ERROR("GTT full, but LRU list empty\n");
2355 return -ENOMEM;
2356 }
2357
2358 ret = i915_gem_evict_something(dev);
2359 if (ret != 0) {
ac94a962
KP
2360 if (ret != -ERESTARTSYS)
2361 DRM_ERROR("Failed to evict a buffer %d\n", ret);
673a394b
EA
2362 return ret;
2363 }
2364 goto search_free;
2365 }
2366
2367#if WATCH_BUF
2368 DRM_INFO("Binding object of size %d at 0x%08x\n",
2369 obj->size, obj_priv->gtt_offset);
2370#endif
856fa198 2371 ret = i915_gem_object_get_pages(obj);
673a394b
EA
2372 if (ret) {
2373 drm_mm_put_block(obj_priv->gtt_space);
2374 obj_priv->gtt_space = NULL;
2375 return ret;
2376 }
2377
2378 page_count = obj->size / PAGE_SIZE;
2379 /* Create an AGP memory structure pointing at our pages, and bind it
2380 * into the GTT.
2381 */
2382 obj_priv->agp_mem = drm_agp_bind_pages(dev,
856fa198 2383 obj_priv->pages,
673a394b 2384 page_count,
ba1eb1d8
KP
2385 obj_priv->gtt_offset,
2386 obj_priv->agp_type);
673a394b 2387 if (obj_priv->agp_mem == NULL) {
856fa198 2388 i915_gem_object_put_pages(obj);
673a394b
EA
2389 drm_mm_put_block(obj_priv->gtt_space);
2390 obj_priv->gtt_space = NULL;
2391 return -ENOMEM;
2392 }
2393 atomic_inc(&dev->gtt_count);
2394 atomic_add(obj->size, &dev->gtt_memory);
2395
2396 /* Assert that the object is not currently in any GPU domain. As it
2397 * wasn't in the GTT, there shouldn't be any way it could have been in
2398 * a GPU cache
2399 */
2400 BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
2401 BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
2402
2403 return 0;
2404}
2405
2406void
2407i915_gem_clflush_object(struct drm_gem_object *obj)
2408{
2409 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2410
2411 /* If we don't have a page list set up, then we're not pinned
2412 * to GPU, and we can ignore the cache flush because it'll happen
2413 * again at bind time.
2414 */
856fa198 2415 if (obj_priv->pages == NULL)
673a394b
EA
2416 return;
2417
856fa198 2418 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
673a394b
EA
2419}
2420
e47c68e9
EA
2421/** Flushes any GPU write domain for the object if it's dirty. */
2422static void
2423i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2424{
2425 struct drm_device *dev = obj->dev;
2426 uint32_t seqno;
2427
2428 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
2429 return;
2430
2431 /* Queue the GPU write cache flushing we need. */
2432 i915_gem_flush(dev, 0, obj->write_domain);
2433 seqno = i915_add_request(dev, obj->write_domain);
2434 obj->write_domain = 0;
2435 i915_gem_object_move_to_active(obj, seqno);
2436}
2437
2438/** Flushes the GTT write domain for the object if it's dirty. */
2439static void
2440i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
2441{
2442 if (obj->write_domain != I915_GEM_DOMAIN_GTT)
2443 return;
2444
2445 /* No actual flushing is required for the GTT write domain. Writes
2446 * to it immediately go to main memory as far as we know, so there's
2447 * no chipset flush. It also doesn't land in render cache.
2448 */
2449 obj->write_domain = 0;
2450}
2451
2452/** Flushes the CPU write domain for the object if it's dirty. */
2453static void
2454i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2455{
2456 struct drm_device *dev = obj->dev;
2457
2458 if (obj->write_domain != I915_GEM_DOMAIN_CPU)
2459 return;
2460
2461 i915_gem_clflush_object(obj);
2462 drm_agp_chipset_flush(dev);
2463 obj->write_domain = 0;
2464}
2465
2ef7eeaa
EA
2466/**
2467 * Moves a single object to the GTT read, and possibly write domain.
2468 *
2469 * This function returns when the move is complete, including waiting on
2470 * flushes to occur.
2471 */
79e53945 2472int
2ef7eeaa
EA
2473i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2474{
2ef7eeaa 2475 struct drm_i915_gem_object *obj_priv = obj->driver_private;
e47c68e9 2476 int ret;
2ef7eeaa 2477
02354392
EA
2478 /* Not valid to be called on unbound objects. */
2479 if (obj_priv->gtt_space == NULL)
2480 return -EINVAL;
2481
e47c68e9
EA
2482 i915_gem_object_flush_gpu_write_domain(obj);
2483 /* Wait on any GPU rendering and flushing to occur. */
2484 ret = i915_gem_object_wait_rendering(obj);
2485 if (ret != 0)
2486 return ret;
2487
2488 /* If we're writing through the GTT domain, then CPU and GPU caches
2489 * will need to be invalidated at next use.
2ef7eeaa 2490 */
e47c68e9
EA
2491 if (write)
2492 obj->read_domains &= I915_GEM_DOMAIN_GTT;
2ef7eeaa 2493
e47c68e9 2494 i915_gem_object_flush_cpu_write_domain(obj);
2ef7eeaa 2495
e47c68e9
EA
2496 /* It should now be out of any other write domains, and we can update
2497 * the domain values for our changes.
2498 */
2499 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2500 obj->read_domains |= I915_GEM_DOMAIN_GTT;
2501 if (write) {
2502 obj->write_domain = I915_GEM_DOMAIN_GTT;
2503 obj_priv->dirty = 1;
2ef7eeaa
EA
2504 }
2505
e47c68e9
EA
2506 return 0;
2507}
2508
2509/**
2510 * Moves a single object to the CPU read, and possibly write domain.
2511 *
2512 * This function returns when the move is complete, including waiting on
2513 * flushes to occur.
2514 */
2515static int
2516i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2517{
e47c68e9
EA
2518 int ret;
2519
2520 i915_gem_object_flush_gpu_write_domain(obj);
2ef7eeaa 2521 /* Wait on any GPU rendering and flushing to occur. */
e47c68e9
EA
2522 ret = i915_gem_object_wait_rendering(obj);
2523 if (ret != 0)
2524 return ret;
2ef7eeaa 2525
e47c68e9 2526 i915_gem_object_flush_gtt_write_domain(obj);
2ef7eeaa 2527
e47c68e9
EA
2528 /* If we have a partially-valid cache of the object in the CPU,
2529 * finish invalidating it and free the per-page flags.
2ef7eeaa 2530 */
e47c68e9 2531 i915_gem_object_set_to_full_cpu_read_domain(obj);
2ef7eeaa 2532
e47c68e9
EA
2533 /* Flush the CPU cache if it's still invalid. */
2534 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2ef7eeaa 2535 i915_gem_clflush_object(obj);
2ef7eeaa 2536
e47c68e9 2537 obj->read_domains |= I915_GEM_DOMAIN_CPU;
2ef7eeaa
EA
2538 }
2539
2540 /* It should now be out of any other write domains, and we can update
2541 * the domain values for our changes.
2542 */
e47c68e9
EA
2543 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2544
2545 /* If we're writing through the CPU, then the GPU read domains will
2546 * need to be invalidated at next use.
2547 */
2548 if (write) {
2549 obj->read_domains &= I915_GEM_DOMAIN_CPU;
2550 obj->write_domain = I915_GEM_DOMAIN_CPU;
2551 }
2ef7eeaa
EA
2552
2553 return 0;
2554}
2555
673a394b
EA
2556/*
2557 * Set the next domain for the specified object. This
2558 * may not actually perform the necessary flushing/invaliding though,
2559 * as that may want to be batched with other set_domain operations
2560 *
2561 * This is (we hope) the only really tricky part of gem. The goal
2562 * is fairly simple -- track which caches hold bits of the object
2563 * and make sure they remain coherent. A few concrete examples may
2564 * help to explain how it works. For shorthand, we use the notation
2565 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
2566 * a pair of read and write domain masks.
2567 *
2568 * Case 1: the batch buffer
2569 *
2570 * 1. Allocated
2571 * 2. Written by CPU
2572 * 3. Mapped to GTT
2573 * 4. Read by GPU
2574 * 5. Unmapped from GTT
2575 * 6. Freed
2576 *
2577 * Let's take these a step at a time
2578 *
2579 * 1. Allocated
2580 * Pages allocated from the kernel may still have
2581 * cache contents, so we set them to (CPU, CPU) always.
2582 * 2. Written by CPU (using pwrite)
2583 * The pwrite function calls set_domain (CPU, CPU) and
2584 * this function does nothing (as nothing changes)
2585 * 3. Mapped by GTT
2586 * This function asserts that the object is not
2587 * currently in any GPU-based read or write domains
2588 * 4. Read by GPU
2589 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
2590 * As write_domain is zero, this function adds in the
2591 * current read domains (CPU+COMMAND, 0).
2592 * flush_domains is set to CPU.
2593 * invalidate_domains is set to COMMAND
2594 * clflush is run to get data out of the CPU caches
2595 * then i915_dev_set_domain calls i915_gem_flush to
2596 * emit an MI_FLUSH and drm_agp_chipset_flush
2597 * 5. Unmapped from GTT
2598 * i915_gem_object_unbind calls set_domain (CPU, CPU)
2599 * flush_domains and invalidate_domains end up both zero
2600 * so no flushing/invalidating happens
2601 * 6. Freed
2602 * yay, done
2603 *
2604 * Case 2: The shared render buffer
2605 *
2606 * 1. Allocated
2607 * 2. Mapped to GTT
2608 * 3. Read/written by GPU
2609 * 4. set_domain to (CPU,CPU)
2610 * 5. Read/written by CPU
2611 * 6. Read/written by GPU
2612 *
2613 * 1. Allocated
2614 * Same as last example, (CPU, CPU)
2615 * 2. Mapped to GTT
2616 * Nothing changes (assertions find that it is not in the GPU)
2617 * 3. Read/written by GPU
2618 * execbuffer calls set_domain (RENDER, RENDER)
2619 * flush_domains gets CPU
2620 * invalidate_domains gets GPU
2621 * clflush (obj)
2622 * MI_FLUSH and drm_agp_chipset_flush
2623 * 4. set_domain (CPU, CPU)
2624 * flush_domains gets GPU
2625 * invalidate_domains gets CPU
2626 * wait_rendering (obj) to make sure all drawing is complete.
2627 * This will include an MI_FLUSH to get the data from GPU
2628 * to memory
2629 * clflush (obj) to invalidate the CPU cache
2630 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
2631 * 5. Read/written by CPU
2632 * cache lines are loaded and dirtied
2633 * 6. Read written by GPU
2634 * Same as last GPU access
2635 *
2636 * Case 3: The constant buffer
2637 *
2638 * 1. Allocated
2639 * 2. Written by CPU
2640 * 3. Read by GPU
2641 * 4. Updated (written) by CPU again
2642 * 5. Read by GPU
2643 *
2644 * 1. Allocated
2645 * (CPU, CPU)
2646 * 2. Written by CPU
2647 * (CPU, CPU)
2648 * 3. Read by GPU
2649 * (CPU+RENDER, 0)
2650 * flush_domains = CPU
2651 * invalidate_domains = RENDER
2652 * clflush (obj)
2653 * MI_FLUSH
2654 * drm_agp_chipset_flush
2655 * 4. Updated (written) by CPU again
2656 * (CPU, CPU)
2657 * flush_domains = 0 (no previous write domain)
2658 * invalidate_domains = 0 (no new read domains)
2659 * 5. Read by GPU
2660 * (CPU+RENDER, 0)
2661 * flush_domains = CPU
2662 * invalidate_domains = RENDER
2663 * clflush (obj)
2664 * MI_FLUSH
2665 * drm_agp_chipset_flush
2666 */
c0d90829 2667static void
8b0e378a 2668i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
673a394b
EA
2669{
2670 struct drm_device *dev = obj->dev;
2671 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2672 uint32_t invalidate_domains = 0;
2673 uint32_t flush_domains = 0;
e47c68e9 2674
8b0e378a
EA
2675 BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
2676 BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
673a394b
EA
2677
2678#if WATCH_BUF
2679 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
2680 __func__, obj,
8b0e378a
EA
2681 obj->read_domains, obj->pending_read_domains,
2682 obj->write_domain, obj->pending_write_domain);
673a394b
EA
2683#endif
2684 /*
2685 * If the object isn't moving to a new write domain,
2686 * let the object stay in multiple read domains
2687 */
8b0e378a
EA
2688 if (obj->pending_write_domain == 0)
2689 obj->pending_read_domains |= obj->read_domains;
673a394b
EA
2690 else
2691 obj_priv->dirty = 1;
2692
2693 /*
2694 * Flush the current write domain if
2695 * the new read domains don't match. Invalidate
2696 * any read domains which differ from the old
2697 * write domain
2698 */
8b0e378a
EA
2699 if (obj->write_domain &&
2700 obj->write_domain != obj->pending_read_domains) {
673a394b 2701 flush_domains |= obj->write_domain;
8b0e378a
EA
2702 invalidate_domains |=
2703 obj->pending_read_domains & ~obj->write_domain;
673a394b
EA
2704 }
2705 /*
2706 * Invalidate any read caches which may have
2707 * stale data. That is, any new read domains.
2708 */
8b0e378a 2709 invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
673a394b
EA
2710 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
2711#if WATCH_BUF
2712 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
2713 __func__, flush_domains, invalidate_domains);
2714#endif
673a394b
EA
2715 i915_gem_clflush_object(obj);
2716 }
2717
efbeed96
EA
2718 /* The actual obj->write_domain will be updated with
2719 * pending_write_domain after we emit the accumulated flush for all
2720 * of our domain changes in execbuffers (which clears objects'
2721 * write_domains). So if we have a current write domain that we
2722 * aren't changing, set pending_write_domain to that.
2723 */
2724 if (flush_domains == 0 && obj->pending_write_domain == 0)
2725 obj->pending_write_domain = obj->write_domain;
8b0e378a 2726 obj->read_domains = obj->pending_read_domains;
673a394b
EA
2727
2728 dev->invalidate_domains |= invalidate_domains;
2729 dev->flush_domains |= flush_domains;
2730#if WATCH_BUF
2731 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
2732 __func__,
2733 obj->read_domains, obj->write_domain,
2734 dev->invalidate_domains, dev->flush_domains);
2735#endif
673a394b
EA
2736}
2737
2738/**
e47c68e9 2739 * Moves the object from a partially CPU read to a full one.
673a394b 2740 *
e47c68e9
EA
2741 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
2742 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
673a394b 2743 */
e47c68e9
EA
2744static void
2745i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
673a394b
EA
2746{
2747 struct drm_i915_gem_object *obj_priv = obj->driver_private;
673a394b 2748
e47c68e9
EA
2749 if (!obj_priv->page_cpu_valid)
2750 return;
2751
2752 /* If we're partially in the CPU read domain, finish moving it in.
2753 */
2754 if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
2755 int i;
2756
2757 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
2758 if (obj_priv->page_cpu_valid[i])
2759 continue;
856fa198 2760 drm_clflush_pages(obj_priv->pages + i, 1);
e47c68e9 2761 }
e47c68e9
EA
2762 }
2763
2764 /* Free the page_cpu_valid mappings which are now stale, whether
2765 * or not we've got I915_GEM_DOMAIN_CPU.
2766 */
2767 drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
2768 DRM_MEM_DRIVER);
2769 obj_priv->page_cpu_valid = NULL;
2770}
2771
2772/**
2773 * Set the CPU read domain on a range of the object.
2774 *
2775 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
2776 * not entirely valid. The page_cpu_valid member of the object flags which
2777 * pages have been flushed, and will be respected by
2778 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
2779 * of the whole object.
2780 *
2781 * This function returns when the move is complete, including waiting on
2782 * flushes to occur.
2783 */
2784static int
2785i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
2786 uint64_t offset, uint64_t size)
2787{
2788 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2789 int i, ret;
673a394b 2790
e47c68e9
EA
2791 if (offset == 0 && size == obj->size)
2792 return i915_gem_object_set_to_cpu_domain(obj, 0);
673a394b 2793
e47c68e9
EA
2794 i915_gem_object_flush_gpu_write_domain(obj);
2795 /* Wait on any GPU rendering and flushing to occur. */
6a47baa6 2796 ret = i915_gem_object_wait_rendering(obj);
e47c68e9 2797 if (ret != 0)
6a47baa6 2798 return ret;
e47c68e9
EA
2799 i915_gem_object_flush_gtt_write_domain(obj);
2800
2801 /* If we're already fully in the CPU read domain, we're done. */
2802 if (obj_priv->page_cpu_valid == NULL &&
2803 (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
2804 return 0;
673a394b 2805
e47c68e9
EA
2806 /* Otherwise, create/clear the per-page CPU read domain flag if we're
2807 * newly adding I915_GEM_DOMAIN_CPU
2808 */
673a394b
EA
2809 if (obj_priv->page_cpu_valid == NULL) {
2810 obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
2811 DRM_MEM_DRIVER);
e47c68e9
EA
2812 if (obj_priv->page_cpu_valid == NULL)
2813 return -ENOMEM;
2814 } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
2815 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
673a394b
EA
2816
2817 /* Flush the cache on any pages that are still invalid from the CPU's
2818 * perspective.
2819 */
e47c68e9
EA
2820 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
2821 i++) {
673a394b
EA
2822 if (obj_priv->page_cpu_valid[i])
2823 continue;
2824
856fa198 2825 drm_clflush_pages(obj_priv->pages + i, 1);
673a394b
EA
2826
2827 obj_priv->page_cpu_valid[i] = 1;
2828 }
2829
e47c68e9
EA
2830 /* It should now be out of any other write domains, and we can update
2831 * the domain values for our changes.
2832 */
2833 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2834
2835 obj->read_domains |= I915_GEM_DOMAIN_CPU;
2836
673a394b
EA
2837 return 0;
2838}
2839
673a394b
EA
2840/**
2841 * Pin an object to the GTT and evaluate the relocations landing in it.
2842 */
2843static int
2844i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2845 struct drm_file *file_priv,
40a5f0de
EA
2846 struct drm_i915_gem_exec_object *entry,
2847 struct drm_i915_gem_relocation_entry *relocs)
673a394b
EA
2848{
2849 struct drm_device *dev = obj->dev;
0839ccb8 2850 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b
EA
2851 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2852 int i, ret;
0839ccb8 2853 void __iomem *reloc_page;
673a394b
EA
2854
2855 /* Choose the GTT offset for our buffer and put it there. */
2856 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
2857 if (ret)
2858 return ret;
2859
2860 entry->offset = obj_priv->gtt_offset;
2861
673a394b
EA
2862 /* Apply the relocations, using the GTT aperture to avoid cache
2863 * flushing requirements.
2864 */
2865 for (i = 0; i < entry->relocation_count; i++) {
40a5f0de 2866 struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
673a394b
EA
2867 struct drm_gem_object *target_obj;
2868 struct drm_i915_gem_object *target_obj_priv;
3043c60c
EA
2869 uint32_t reloc_val, reloc_offset;
2870 uint32_t __iomem *reloc_entry;
673a394b 2871
673a394b 2872 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
40a5f0de 2873 reloc->target_handle);
673a394b
EA
2874 if (target_obj == NULL) {
2875 i915_gem_object_unpin(obj);
2876 return -EBADF;
2877 }
2878 target_obj_priv = target_obj->driver_private;
2879
2880 /* The target buffer should have appeared before us in the
2881 * exec_object list, so it should have a GTT space bound by now.
2882 */
2883 if (target_obj_priv->gtt_space == NULL) {
2884 DRM_ERROR("No GTT space found for object %d\n",
40a5f0de 2885 reloc->target_handle);
673a394b
EA
2886 drm_gem_object_unreference(target_obj);
2887 i915_gem_object_unpin(obj);
2888 return -EINVAL;
2889 }
2890
40a5f0de 2891 if (reloc->offset > obj->size - 4) {
673a394b
EA
2892 DRM_ERROR("Relocation beyond object bounds: "
2893 "obj %p target %d offset %d size %d.\n",
40a5f0de
EA
2894 obj, reloc->target_handle,
2895 (int) reloc->offset, (int) obj->size);
673a394b
EA
2896 drm_gem_object_unreference(target_obj);
2897 i915_gem_object_unpin(obj);
2898 return -EINVAL;
2899 }
40a5f0de 2900 if (reloc->offset & 3) {
673a394b
EA
2901 DRM_ERROR("Relocation not 4-byte aligned: "
2902 "obj %p target %d offset %d.\n",
40a5f0de
EA
2903 obj, reloc->target_handle,
2904 (int) reloc->offset);
673a394b
EA
2905 drm_gem_object_unreference(target_obj);
2906 i915_gem_object_unpin(obj);
2907 return -EINVAL;
2908 }
2909
40a5f0de
EA
2910 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
2911 reloc->read_domains & I915_GEM_DOMAIN_CPU) {
e47c68e9
EA
2912 DRM_ERROR("reloc with read/write CPU domains: "
2913 "obj %p target %d offset %d "
2914 "read %08x write %08x",
40a5f0de
EA
2915 obj, reloc->target_handle,
2916 (int) reloc->offset,
2917 reloc->read_domains,
2918 reloc->write_domain);
491152b8
CW
2919 drm_gem_object_unreference(target_obj);
2920 i915_gem_object_unpin(obj);
e47c68e9
EA
2921 return -EINVAL;
2922 }
2923
40a5f0de
EA
2924 if (reloc->write_domain && target_obj->pending_write_domain &&
2925 reloc->write_domain != target_obj->pending_write_domain) {
673a394b
EA
2926 DRM_ERROR("Write domain conflict: "
2927 "obj %p target %d offset %d "
2928 "new %08x old %08x\n",
40a5f0de
EA
2929 obj, reloc->target_handle,
2930 (int) reloc->offset,
2931 reloc->write_domain,
673a394b
EA
2932 target_obj->pending_write_domain);
2933 drm_gem_object_unreference(target_obj);
2934 i915_gem_object_unpin(obj);
2935 return -EINVAL;
2936 }
2937
2938#if WATCH_RELOC
2939 DRM_INFO("%s: obj %p offset %08x target %d "
2940 "read %08x write %08x gtt %08x "
2941 "presumed %08x delta %08x\n",
2942 __func__,
2943 obj,
40a5f0de
EA
2944 (int) reloc->offset,
2945 (int) reloc->target_handle,
2946 (int) reloc->read_domains,
2947 (int) reloc->write_domain,
673a394b 2948 (int) target_obj_priv->gtt_offset,
40a5f0de
EA
2949 (int) reloc->presumed_offset,
2950 reloc->delta);
673a394b
EA
2951#endif
2952
40a5f0de
EA
2953 target_obj->pending_read_domains |= reloc->read_domains;
2954 target_obj->pending_write_domain |= reloc->write_domain;
673a394b
EA
2955
2956 /* If the relocation already has the right value in it, no
2957 * more work needs to be done.
2958 */
40a5f0de 2959 if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
673a394b
EA
2960 drm_gem_object_unreference(target_obj);
2961 continue;
2962 }
2963
2ef7eeaa
EA
2964 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
2965 if (ret != 0) {
2966 drm_gem_object_unreference(target_obj);
2967 i915_gem_object_unpin(obj);
2968 return -EINVAL;
673a394b
EA
2969 }
2970
2971 /* Map the page containing the relocation we're going to
2972 * perform.
2973 */
40a5f0de 2974 reloc_offset = obj_priv->gtt_offset + reloc->offset;
0839ccb8
KP
2975 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
2976 (reloc_offset &
2977 ~(PAGE_SIZE - 1)));
3043c60c 2978 reloc_entry = (uint32_t __iomem *)(reloc_page +
0839ccb8 2979 (reloc_offset & (PAGE_SIZE - 1)));
40a5f0de 2980 reloc_val = target_obj_priv->gtt_offset + reloc->delta;
673a394b
EA
2981
2982#if WATCH_BUF
2983 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
40a5f0de 2984 obj, (unsigned int) reloc->offset,
673a394b
EA
2985 readl(reloc_entry), reloc_val);
2986#endif
2987 writel(reloc_val, reloc_entry);
0839ccb8 2988 io_mapping_unmap_atomic(reloc_page);
673a394b 2989
40a5f0de
EA
2990 /* The updated presumed offset for this entry will be
2991 * copied back out to the user.
673a394b 2992 */
40a5f0de 2993 reloc->presumed_offset = target_obj_priv->gtt_offset;
673a394b
EA
2994
2995 drm_gem_object_unreference(target_obj);
2996 }
2997
673a394b
EA
2998#if WATCH_BUF
2999 if (0)
3000 i915_gem_dump_object(obj, 128, __func__, ~0);
3001#endif
3002 return 0;
3003}
3004
3005/** Dispatch a batchbuffer to the ring
3006 */
3007static int
3008i915_dispatch_gem_execbuffer(struct drm_device *dev,
3009 struct drm_i915_gem_execbuffer *exec,
201361a5 3010 struct drm_clip_rect *cliprects,
673a394b
EA
3011 uint64_t exec_offset)
3012{
3013 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b
EA
3014 int nbox = exec->num_cliprects;
3015 int i = 0, count;
3016 uint32_t exec_start, exec_len;
3017 RING_LOCALS;
3018
3019 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3020 exec_len = (uint32_t) exec->batch_len;
3021
3022 if ((exec_start | exec_len) & 0x7) {
3023 DRM_ERROR("alignment\n");
3024 return -EINVAL;
3025 }
3026
3027 if (!exec_start)
3028 return -EINVAL;
3029
3030 count = nbox ? nbox : 1;
3031
3032 for (i = 0; i < count; i++) {
3033 if (i < nbox) {
201361a5 3034 int ret = i915_emit_box(dev, cliprects, i,
673a394b
EA
3035 exec->DR1, exec->DR4);
3036 if (ret)
3037 return ret;
3038 }
3039
3040 if (IS_I830(dev) || IS_845G(dev)) {
3041 BEGIN_LP_RING(4);
3042 OUT_RING(MI_BATCH_BUFFER);
3043 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
3044 OUT_RING(exec_start + exec_len - 4);
3045 OUT_RING(0);
3046 ADVANCE_LP_RING();
3047 } else {
3048 BEGIN_LP_RING(2);
3049 if (IS_I965G(dev)) {
3050 OUT_RING(MI_BATCH_BUFFER_START |
3051 (2 << 6) |
3052 MI_BATCH_NON_SECURE_I965);
3053 OUT_RING(exec_start);
3054 } else {
3055 OUT_RING(MI_BATCH_BUFFER_START |
3056 (2 << 6));
3057 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
3058 }
3059 ADVANCE_LP_RING();
3060 }
3061 }
3062
3063 /* XXX breadcrumb */
3064 return 0;
3065}
3066
3067/* Throttle our rendering by waiting until the ring has completed our requests
3068 * emitted over 20 msec ago.
3069 *
3070 * This should get us reasonable parallelism between CPU and GPU but also
3071 * relatively low latency when blocking on a particular request to finish.
3072 */
3073static int
3074i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
3075{
3076 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
3077 int ret = 0;
3078 uint32_t seqno;
3079
3080 mutex_lock(&dev->struct_mutex);
3081 seqno = i915_file_priv->mm.last_gem_throttle_seqno;
3082 i915_file_priv->mm.last_gem_throttle_seqno =
3083 i915_file_priv->mm.last_gem_seqno;
3084 if (seqno)
3085 ret = i915_wait_request(dev, seqno);
3086 mutex_unlock(&dev->struct_mutex);
3087 return ret;
3088}
3089
40a5f0de
EA
3090static int
3091i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
3092 uint32_t buffer_count,
3093 struct drm_i915_gem_relocation_entry **relocs)
3094{
3095 uint32_t reloc_count = 0, reloc_index = 0, i;
3096 int ret;
3097
3098 *relocs = NULL;
3099 for (i = 0; i < buffer_count; i++) {
3100 if (reloc_count + exec_list[i].relocation_count < reloc_count)
3101 return -EINVAL;
3102 reloc_count += exec_list[i].relocation_count;
3103 }
3104
3105 *relocs = drm_calloc(reloc_count, sizeof(**relocs), DRM_MEM_DRIVER);
3106 if (*relocs == NULL)
3107 return -ENOMEM;
3108
3109 for (i = 0; i < buffer_count; i++) {
3110 struct drm_i915_gem_relocation_entry __user *user_relocs;
3111
3112 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3113
3114 ret = copy_from_user(&(*relocs)[reloc_index],
3115 user_relocs,
3116 exec_list[i].relocation_count *
3117 sizeof(**relocs));
3118 if (ret != 0) {
3119 drm_free(*relocs, reloc_count * sizeof(**relocs),
3120 DRM_MEM_DRIVER);
3121 *relocs = NULL;
2bc43b5c 3122 return -EFAULT;
40a5f0de
EA
3123 }
3124
3125 reloc_index += exec_list[i].relocation_count;
3126 }
3127
2bc43b5c 3128 return 0;
40a5f0de
EA
3129}
3130
3131static int
3132i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
3133 uint32_t buffer_count,
3134 struct drm_i915_gem_relocation_entry *relocs)
3135{
3136 uint32_t reloc_count = 0, i;
2bc43b5c 3137 int ret = 0;
40a5f0de
EA
3138
3139 for (i = 0; i < buffer_count; i++) {
3140 struct drm_i915_gem_relocation_entry __user *user_relocs;
2bc43b5c 3141 int unwritten;
40a5f0de
EA
3142
3143 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3144
2bc43b5c
FM
3145 unwritten = copy_to_user(user_relocs,
3146 &relocs[reloc_count],
3147 exec_list[i].relocation_count *
3148 sizeof(*relocs));
3149
3150 if (unwritten) {
3151 ret = -EFAULT;
3152 goto err;
40a5f0de
EA
3153 }
3154
3155 reloc_count += exec_list[i].relocation_count;
3156 }
3157
2bc43b5c 3158err:
40a5f0de
EA
3159 drm_free(relocs, reloc_count * sizeof(*relocs), DRM_MEM_DRIVER);
3160
3161 return ret;
3162}
3163
673a394b
EA
3164int
3165i915_gem_execbuffer(struct drm_device *dev, void *data,
3166 struct drm_file *file_priv)
3167{
3168 drm_i915_private_t *dev_priv = dev->dev_private;
3169 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
3170 struct drm_i915_gem_execbuffer *args = data;
3171 struct drm_i915_gem_exec_object *exec_list = NULL;
3172 struct drm_gem_object **object_list = NULL;
3173 struct drm_gem_object *batch_obj;
b70d11da 3174 struct drm_i915_gem_object *obj_priv;
201361a5 3175 struct drm_clip_rect *cliprects = NULL;
40a5f0de
EA
3176 struct drm_i915_gem_relocation_entry *relocs;
3177 int ret, ret2, i, pinned = 0;
673a394b 3178 uint64_t exec_offset;
40a5f0de 3179 uint32_t seqno, flush_domains, reloc_index;
ac94a962 3180 int pin_tries;
673a394b
EA
3181
3182#if WATCH_EXEC
3183 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3184 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3185#endif
3186
4f481ed2
EA
3187 if (args->buffer_count < 1) {
3188 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3189 return -EINVAL;
3190 }
673a394b
EA
3191 /* Copy in the exec list from userland */
3192 exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count,
3193 DRM_MEM_DRIVER);
3194 object_list = drm_calloc(sizeof(*object_list), args->buffer_count,
3195 DRM_MEM_DRIVER);
3196 if (exec_list == NULL || object_list == NULL) {
3197 DRM_ERROR("Failed to allocate exec or object list "
3198 "for %d buffers\n",
3199 args->buffer_count);
3200 ret = -ENOMEM;
3201 goto pre_mutex_err;
3202 }
3203 ret = copy_from_user(exec_list,
3204 (struct drm_i915_relocation_entry __user *)
3205 (uintptr_t) args->buffers_ptr,
3206 sizeof(*exec_list) * args->buffer_count);
3207 if (ret != 0) {
3208 DRM_ERROR("copy %d exec entries failed %d\n",
3209 args->buffer_count, ret);
3210 goto pre_mutex_err;
3211 }
3212
201361a5
EA
3213 if (args->num_cliprects != 0) {
3214 cliprects = drm_calloc(args->num_cliprects, sizeof(*cliprects),
3215 DRM_MEM_DRIVER);
3216 if (cliprects == NULL)
3217 goto pre_mutex_err;
3218
3219 ret = copy_from_user(cliprects,
3220 (struct drm_clip_rect __user *)
3221 (uintptr_t) args->cliprects_ptr,
3222 sizeof(*cliprects) * args->num_cliprects);
3223 if (ret != 0) {
3224 DRM_ERROR("copy %d cliprects failed: %d\n",
3225 args->num_cliprects, ret);
3226 goto pre_mutex_err;
3227 }
3228 }
3229
40a5f0de
EA
3230 ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
3231 &relocs);
3232 if (ret != 0)
3233 goto pre_mutex_err;
3234
673a394b
EA
3235 mutex_lock(&dev->struct_mutex);
3236
3237 i915_verify_inactive(dev, __FILE__, __LINE__);
3238
3239 if (dev_priv->mm.wedged) {
3240 DRM_ERROR("Execbuf while wedged\n");
3241 mutex_unlock(&dev->struct_mutex);
a198bc80
CW
3242 ret = -EIO;
3243 goto pre_mutex_err;
673a394b
EA
3244 }
3245
3246 if (dev_priv->mm.suspended) {
3247 DRM_ERROR("Execbuf while VT-switched.\n");
3248 mutex_unlock(&dev->struct_mutex);
a198bc80
CW
3249 ret = -EBUSY;
3250 goto pre_mutex_err;
673a394b
EA
3251 }
3252
ac94a962 3253 /* Look up object handles */
673a394b
EA
3254 for (i = 0; i < args->buffer_count; i++) {
3255 object_list[i] = drm_gem_object_lookup(dev, file_priv,
3256 exec_list[i].handle);
3257 if (object_list[i] == NULL) {
3258 DRM_ERROR("Invalid object handle %d at index %d\n",
3259 exec_list[i].handle, i);
3260 ret = -EBADF;
3261 goto err;
3262 }
b70d11da
KH
3263
3264 obj_priv = object_list[i]->driver_private;
3265 if (obj_priv->in_execbuffer) {
3266 DRM_ERROR("Object %p appears more than once in object list\n",
3267 object_list[i]);
3268 ret = -EBADF;
3269 goto err;
3270 }
3271 obj_priv->in_execbuffer = true;
ac94a962 3272 }
673a394b 3273
ac94a962
KP
3274 /* Pin and relocate */
3275 for (pin_tries = 0; ; pin_tries++) {
3276 ret = 0;
40a5f0de
EA
3277 reloc_index = 0;
3278
ac94a962
KP
3279 for (i = 0; i < args->buffer_count; i++) {
3280 object_list[i]->pending_read_domains = 0;
3281 object_list[i]->pending_write_domain = 0;
3282 ret = i915_gem_object_pin_and_relocate(object_list[i],
3283 file_priv,
40a5f0de
EA
3284 &exec_list[i],
3285 &relocs[reloc_index]);
ac94a962
KP
3286 if (ret)
3287 break;
3288 pinned = i + 1;
40a5f0de 3289 reloc_index += exec_list[i].relocation_count;
ac94a962
KP
3290 }
3291 /* success */
3292 if (ret == 0)
3293 break;
3294
3295 /* error other than GTT full, or we've already tried again */
3296 if (ret != -ENOMEM || pin_tries >= 1) {
f1acec93
EA
3297 if (ret != -ERESTARTSYS)
3298 DRM_ERROR("Failed to pin buffers %d\n", ret);
673a394b
EA
3299 goto err;
3300 }
ac94a962
KP
3301
3302 /* unpin all of our buffers */
3303 for (i = 0; i < pinned; i++)
3304 i915_gem_object_unpin(object_list[i]);
b1177636 3305 pinned = 0;
ac94a962
KP
3306
3307 /* evict everyone we can from the aperture */
3308 ret = i915_gem_evict_everything(dev);
3309 if (ret)
3310 goto err;
673a394b
EA
3311 }
3312
3313 /* Set the pending read domains for the batch buffer to COMMAND */
3314 batch_obj = object_list[args->buffer_count-1];
3315 batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
3316 batch_obj->pending_write_domain = 0;
3317
3318 i915_verify_inactive(dev, __FILE__, __LINE__);
3319
646f0f6e
KP
3320 /* Zero the global flush/invalidate flags. These
3321 * will be modified as new domains are computed
3322 * for each object
3323 */
3324 dev->invalidate_domains = 0;
3325 dev->flush_domains = 0;
3326
673a394b
EA
3327 for (i = 0; i < args->buffer_count; i++) {
3328 struct drm_gem_object *obj = object_list[i];
673a394b 3329
646f0f6e 3330 /* Compute new gpu domains and update invalidate/flush */
8b0e378a 3331 i915_gem_object_set_to_gpu_domain(obj);
673a394b
EA
3332 }
3333
3334 i915_verify_inactive(dev, __FILE__, __LINE__);
3335
646f0f6e
KP
3336 if (dev->invalidate_domains | dev->flush_domains) {
3337#if WATCH_EXEC
3338 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3339 __func__,
3340 dev->invalidate_domains,
3341 dev->flush_domains);
3342#endif
3343 i915_gem_flush(dev,
3344 dev->invalidate_domains,
3345 dev->flush_domains);
3346 if (dev->flush_domains)
3347 (void)i915_add_request(dev, dev->flush_domains);
3348 }
673a394b 3349
efbeed96
EA
3350 for (i = 0; i < args->buffer_count; i++) {
3351 struct drm_gem_object *obj = object_list[i];
3352
3353 obj->write_domain = obj->pending_write_domain;
3354 }
3355
673a394b
EA
3356 i915_verify_inactive(dev, __FILE__, __LINE__);
3357
3358#if WATCH_COHERENCY
3359 for (i = 0; i < args->buffer_count; i++) {
3360 i915_gem_object_check_coherency(object_list[i],
3361 exec_list[i].handle);
3362 }
3363#endif
3364
3365 exec_offset = exec_list[args->buffer_count - 1].offset;
3366
3367#if WATCH_EXEC
6911a9b8 3368 i915_gem_dump_object(batch_obj,
673a394b
EA
3369 args->batch_len,
3370 __func__,
3371 ~0);
3372#endif
3373
673a394b 3374 /* Exec the batchbuffer */
201361a5 3375 ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
673a394b
EA
3376 if (ret) {
3377 DRM_ERROR("dispatch failed %d\n", ret);
3378 goto err;
3379 }
3380
3381 /*
3382 * Ensure that the commands in the batch buffer are
3383 * finished before the interrupt fires
3384 */
3385 flush_domains = i915_retire_commands(dev);
3386
3387 i915_verify_inactive(dev, __FILE__, __LINE__);
3388
3389 /*
3390 * Get a seqno representing the execution of the current buffer,
3391 * which we can wait on. We would like to mitigate these interrupts,
3392 * likely by only creating seqnos occasionally (so that we have
3393 * *some* interrupts representing completion of buffers that we can
3394 * wait on when trying to clear up gtt space).
3395 */
3396 seqno = i915_add_request(dev, flush_domains);
3397 BUG_ON(seqno == 0);
3398 i915_file_priv->mm.last_gem_seqno = seqno;
3399 for (i = 0; i < args->buffer_count; i++) {
3400 struct drm_gem_object *obj = object_list[i];
673a394b 3401
ce44b0ea 3402 i915_gem_object_move_to_active(obj, seqno);
673a394b
EA
3403#if WATCH_LRU
3404 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
3405#endif
3406 }
3407#if WATCH_LRU
3408 i915_dump_lru(dev, __func__);
3409#endif
3410
3411 i915_verify_inactive(dev, __FILE__, __LINE__);
3412
673a394b 3413err:
aad87dff
JL
3414 for (i = 0; i < pinned; i++)
3415 i915_gem_object_unpin(object_list[i]);
3416
b70d11da
KH
3417 for (i = 0; i < args->buffer_count; i++) {
3418 if (object_list[i]) {
3419 obj_priv = object_list[i]->driver_private;
3420 obj_priv->in_execbuffer = false;
3421 }
aad87dff 3422 drm_gem_object_unreference(object_list[i]);
b70d11da 3423 }
673a394b 3424
673a394b
EA
3425 mutex_unlock(&dev->struct_mutex);
3426
a35f2e2b
RD
3427 if (!ret) {
3428 /* Copy the new buffer offsets back to the user's exec list. */
3429 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
3430 (uintptr_t) args->buffers_ptr,
3431 exec_list,
3432 sizeof(*exec_list) * args->buffer_count);
2bc43b5c
FM
3433 if (ret) {
3434 ret = -EFAULT;
a35f2e2b
RD
3435 DRM_ERROR("failed to copy %d exec entries "
3436 "back to user (%d)\n",
3437 args->buffer_count, ret);
2bc43b5c 3438 }
a35f2e2b
RD
3439 }
3440
40a5f0de
EA
3441 /* Copy the updated relocations out regardless of current error
3442 * state. Failure to update the relocs would mean that the next
3443 * time userland calls execbuf, it would do so with presumed offset
3444 * state that didn't match the actual object state.
3445 */
3446 ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
3447 relocs);
3448 if (ret2 != 0) {
3449 DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
3450
3451 if (ret == 0)
3452 ret = ret2;
3453 }
3454
673a394b
EA
3455pre_mutex_err:
3456 drm_free(object_list, sizeof(*object_list) * args->buffer_count,
3457 DRM_MEM_DRIVER);
3458 drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
3459 DRM_MEM_DRIVER);
201361a5
EA
3460 drm_free(cliprects, sizeof(*cliprects) * args->num_cliprects,
3461 DRM_MEM_DRIVER);
673a394b
EA
3462
3463 return ret;
3464}
3465
3466int
3467i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
3468{
3469 struct drm_device *dev = obj->dev;
3470 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3471 int ret;
3472
3473 i915_verify_inactive(dev, __FILE__, __LINE__);
3474 if (obj_priv->gtt_space == NULL) {
3475 ret = i915_gem_object_bind_to_gtt(obj, alignment);
3476 if (ret != 0) {
9bb2d6f9 3477 if (ret != -EBUSY && ret != -ERESTARTSYS)
0fce81e3 3478 DRM_ERROR("Failure to bind: %d\n", ret);
673a394b
EA
3479 return ret;
3480 }
22c344e9
CW
3481 }
3482 /*
3483 * Pre-965 chips need a fence register set up in order to
3484 * properly handle tiled surfaces.
3485 */
3486 if (!IS_I965G(dev) &&
3487 obj_priv->fence_reg == I915_FENCE_REG_NONE &&
3488 obj_priv->tiling_mode != I915_TILING_NONE) {
3489 ret = i915_gem_object_get_fence_reg(obj, true);
3490 if (ret != 0) {
3491 if (ret != -EBUSY && ret != -ERESTARTSYS)
3492 DRM_ERROR("Failure to install fence: %d\n",
3493 ret);
3494 return ret;
3495 }
673a394b
EA
3496 }
3497 obj_priv->pin_count++;
3498
3499 /* If the object is not active and not pending a flush,
3500 * remove it from the inactive list
3501 */
3502 if (obj_priv->pin_count == 1) {
3503 atomic_inc(&dev->pin_count);
3504 atomic_add(obj->size, &dev->pin_memory);
3505 if (!obj_priv->active &&
3506 (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
3507 I915_GEM_DOMAIN_GTT)) == 0 &&
3508 !list_empty(&obj_priv->list))
3509 list_del_init(&obj_priv->list);
3510 }
3511 i915_verify_inactive(dev, __FILE__, __LINE__);
3512
3513 return 0;
3514}
3515
3516void
3517i915_gem_object_unpin(struct drm_gem_object *obj)
3518{
3519 struct drm_device *dev = obj->dev;
3520 drm_i915_private_t *dev_priv = dev->dev_private;
3521 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3522
3523 i915_verify_inactive(dev, __FILE__, __LINE__);
3524 obj_priv->pin_count--;
3525 BUG_ON(obj_priv->pin_count < 0);
3526 BUG_ON(obj_priv->gtt_space == NULL);
3527
3528 /* If the object is no longer pinned, and is
3529 * neither active nor being flushed, then stick it on
3530 * the inactive list
3531 */
3532 if (obj_priv->pin_count == 0) {
3533 if (!obj_priv->active &&
3534 (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
3535 I915_GEM_DOMAIN_GTT)) == 0)
3536 list_move_tail(&obj_priv->list,
3537 &dev_priv->mm.inactive_list);
3538 atomic_dec(&dev->pin_count);
3539 atomic_sub(obj->size, &dev->pin_memory);
3540 }
3541 i915_verify_inactive(dev, __FILE__, __LINE__);
3542}
3543
3544int
3545i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3546 struct drm_file *file_priv)
3547{
3548 struct drm_i915_gem_pin *args = data;
3549 struct drm_gem_object *obj;
3550 struct drm_i915_gem_object *obj_priv;
3551 int ret;
3552
3553 mutex_lock(&dev->struct_mutex);
3554
3555 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3556 if (obj == NULL) {
3557 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
3558 args->handle);
3559 mutex_unlock(&dev->struct_mutex);
3560 return -EBADF;
3561 }
3562 obj_priv = obj->driver_private;
3563
79e53945
JB
3564 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
3565 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3566 args->handle);
96dec61d 3567 drm_gem_object_unreference(obj);
673a394b 3568 mutex_unlock(&dev->struct_mutex);
79e53945
JB
3569 return -EINVAL;
3570 }
3571
3572 obj_priv->user_pin_count++;
3573 obj_priv->pin_filp = file_priv;
3574 if (obj_priv->user_pin_count == 1) {
3575 ret = i915_gem_object_pin(obj, args->alignment);
3576 if (ret != 0) {
3577 drm_gem_object_unreference(obj);
3578 mutex_unlock(&dev->struct_mutex);
3579 return ret;
3580 }
673a394b
EA
3581 }
3582
3583 /* XXX - flush the CPU caches for pinned objects
3584 * as the X server doesn't manage domains yet
3585 */
e47c68e9 3586 i915_gem_object_flush_cpu_write_domain(obj);
673a394b
EA
3587 args->offset = obj_priv->gtt_offset;
3588 drm_gem_object_unreference(obj);
3589 mutex_unlock(&dev->struct_mutex);
3590
3591 return 0;
3592}
3593
3594int
3595i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3596 struct drm_file *file_priv)
3597{
3598 struct drm_i915_gem_pin *args = data;
3599 struct drm_gem_object *obj;
79e53945 3600 struct drm_i915_gem_object *obj_priv;
673a394b
EA
3601
3602 mutex_lock(&dev->struct_mutex);
3603
3604 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3605 if (obj == NULL) {
3606 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
3607 args->handle);
3608 mutex_unlock(&dev->struct_mutex);
3609 return -EBADF;
3610 }
3611
79e53945
JB
3612 obj_priv = obj->driver_private;
3613 if (obj_priv->pin_filp != file_priv) {
3614 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3615 args->handle);
3616 drm_gem_object_unreference(obj);
3617 mutex_unlock(&dev->struct_mutex);
3618 return -EINVAL;
3619 }
3620 obj_priv->user_pin_count--;
3621 if (obj_priv->user_pin_count == 0) {
3622 obj_priv->pin_filp = NULL;
3623 i915_gem_object_unpin(obj);
3624 }
673a394b
EA
3625
3626 drm_gem_object_unreference(obj);
3627 mutex_unlock(&dev->struct_mutex);
3628 return 0;
3629}
3630
3631int
3632i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3633 struct drm_file *file_priv)
3634{
3635 struct drm_i915_gem_busy *args = data;
3636 struct drm_gem_object *obj;
3637 struct drm_i915_gem_object *obj_priv;
3638
3639 mutex_lock(&dev->struct_mutex);
3640 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3641 if (obj == NULL) {
3642 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
3643 args->handle);
3644 mutex_unlock(&dev->struct_mutex);
3645 return -EBADF;
3646 }
3647
f21289b3
EA
3648 /* Update the active list for the hardware's current position.
3649 * Otherwise this only updates on a delayed timer or when irqs are
3650 * actually unmasked, and our working set ends up being larger than
3651 * required.
3652 */
3653 i915_gem_retire_requests(dev);
3654
673a394b 3655 obj_priv = obj->driver_private;
c4de0a5d
EA
3656 /* Don't count being on the flushing list against the object being
3657 * done. Otherwise, a buffer left on the flushing list but not getting
3658 * flushed (because nobody's flushing that domain) won't ever return
3659 * unbusy and get reused by libdrm's bo cache. The other expected
3660 * consumer of this interface, OpenGL's occlusion queries, also specs
3661 * that the objects get unbusy "eventually" without any interference.
3662 */
3663 args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
673a394b
EA
3664
3665 drm_gem_object_unreference(obj);
3666 mutex_unlock(&dev->struct_mutex);
3667 return 0;
3668}
3669
3670int
3671i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3672 struct drm_file *file_priv)
3673{
3674 return i915_gem_ring_throttle(dev, file_priv);
3675}
3676
3677int i915_gem_init_object(struct drm_gem_object *obj)
3678{
3679 struct drm_i915_gem_object *obj_priv;
3680
3681 obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
3682 if (obj_priv == NULL)
3683 return -ENOMEM;
3684
3685 /*
3686 * We've just allocated pages from the kernel,
3687 * so they've just been written by the CPU with
3688 * zeros. They'll need to be clflushed before we
3689 * use them with the GPU.
3690 */
3691 obj->write_domain = I915_GEM_DOMAIN_CPU;
3692 obj->read_domains = I915_GEM_DOMAIN_CPU;
3693
ba1eb1d8
KP
3694 obj_priv->agp_type = AGP_USER_MEMORY;
3695
673a394b
EA
3696 obj->driver_private = obj_priv;
3697 obj_priv->obj = obj;
de151cf6 3698 obj_priv->fence_reg = I915_FENCE_REG_NONE;
673a394b 3699 INIT_LIST_HEAD(&obj_priv->list);
de151cf6 3700
673a394b
EA
3701 return 0;
3702}
3703
3704void i915_gem_free_object(struct drm_gem_object *obj)
3705{
de151cf6 3706 struct drm_device *dev = obj->dev;
673a394b
EA
3707 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3708
3709 while (obj_priv->pin_count > 0)
3710 i915_gem_object_unpin(obj);
3711
71acb5eb
DA
3712 if (obj_priv->phys_obj)
3713 i915_gem_detach_phys_object(dev, obj);
3714
673a394b
EA
3715 i915_gem_object_unbind(obj);
3716
ab00b3e5 3717 i915_gem_free_mmap_offset(obj);
de151cf6 3718
673a394b 3719 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
280b713b 3720 kfree(obj_priv->bit_17);
673a394b
EA
3721 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
3722}
3723
673a394b
EA
3724/** Unbinds all objects that are on the given buffer list. */
3725static int
3726i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
3727{
3728 struct drm_gem_object *obj;
3729 struct drm_i915_gem_object *obj_priv;
3730 int ret;
3731
3732 while (!list_empty(head)) {
3733 obj_priv = list_first_entry(head,
3734 struct drm_i915_gem_object,
3735 list);
3736 obj = obj_priv->obj;
3737
3738 if (obj_priv->pin_count != 0) {
3739 DRM_ERROR("Pinned object in unbind list\n");
3740 mutex_unlock(&dev->struct_mutex);
3741 return -EINVAL;
3742 }
3743
3744 ret = i915_gem_object_unbind(obj);
3745 if (ret != 0) {
3746 DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
3747 ret);
3748 mutex_unlock(&dev->struct_mutex);
3749 return ret;
3750 }
3751 }
3752
3753
3754 return 0;
3755}
3756
5669fcac 3757int
673a394b
EA
3758i915_gem_idle(struct drm_device *dev)
3759{
3760 drm_i915_private_t *dev_priv = dev->dev_private;
3761 uint32_t seqno, cur_seqno, last_seqno;
3762 int stuck, ret;
3763
6dbe2772
KP
3764 mutex_lock(&dev->struct_mutex);
3765
3766 if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
3767 mutex_unlock(&dev->struct_mutex);
673a394b 3768 return 0;
6dbe2772 3769 }
673a394b
EA
3770
3771 /* Hack! Don't let anybody do execbuf while we don't control the chip.
3772 * We need to replace this with a semaphore, or something.
3773 */
3774 dev_priv->mm.suspended = 1;
3775
6dbe2772
KP
3776 /* Cancel the retire work handler, wait for it to finish if running
3777 */
3778 mutex_unlock(&dev->struct_mutex);
3779 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3780 mutex_lock(&dev->struct_mutex);
3781
673a394b
EA
3782 i915_kernel_lost_context(dev);
3783
3784 /* Flush the GPU along with all non-CPU write domains
3785 */
3786 i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
3787 ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
de151cf6 3788 seqno = i915_add_request(dev, ~I915_GEM_DOMAIN_CPU);
673a394b
EA
3789
3790 if (seqno == 0) {
3791 mutex_unlock(&dev->struct_mutex);
3792 return -ENOMEM;
3793 }
3794
3795 dev_priv->mm.waiting_gem_seqno = seqno;
3796 last_seqno = 0;
3797 stuck = 0;
3798 for (;;) {
3799 cur_seqno = i915_get_gem_seqno(dev);
3800 if (i915_seqno_passed(cur_seqno, seqno))
3801 break;
3802 if (last_seqno == cur_seqno) {
3803 if (stuck++ > 100) {
3804 DRM_ERROR("hardware wedged\n");
3805 dev_priv->mm.wedged = 1;
3806 DRM_WAKEUP(&dev_priv->irq_queue);
3807 break;
3808 }
3809 }
3810 msleep(10);
3811 last_seqno = cur_seqno;
3812 }
3813 dev_priv->mm.waiting_gem_seqno = 0;
3814
3815 i915_gem_retire_requests(dev);
3816
5e118f41 3817 spin_lock(&dev_priv->mm.active_list_lock);
28dfe52a
EA
3818 if (!dev_priv->mm.wedged) {
3819 /* Active and flushing should now be empty as we've
3820 * waited for a sequence higher than any pending execbuffer
3821 */
3822 WARN_ON(!list_empty(&dev_priv->mm.active_list));
3823 WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
3824 /* Request should now be empty as we've also waited
3825 * for the last request in the list
3826 */
3827 WARN_ON(!list_empty(&dev_priv->mm.request_list));
3828 }
673a394b 3829
28dfe52a
EA
3830 /* Empty the active and flushing lists to inactive. If there's
3831 * anything left at this point, it means that we're wedged and
3832 * nothing good's going to happen by leaving them there. So strip
3833 * the GPU domains and just stuff them onto inactive.
673a394b 3834 */
28dfe52a
EA
3835 while (!list_empty(&dev_priv->mm.active_list)) {
3836 struct drm_i915_gem_object *obj_priv;
673a394b 3837
28dfe52a
EA
3838 obj_priv = list_first_entry(&dev_priv->mm.active_list,
3839 struct drm_i915_gem_object,
3840 list);
3841 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
3842 i915_gem_object_move_to_inactive(obj_priv->obj);
3843 }
5e118f41 3844 spin_unlock(&dev_priv->mm.active_list_lock);
28dfe52a
EA
3845
3846 while (!list_empty(&dev_priv->mm.flushing_list)) {
3847 struct drm_i915_gem_object *obj_priv;
3848
151903d5 3849 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
28dfe52a
EA
3850 struct drm_i915_gem_object,
3851 list);
3852 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
3853 i915_gem_object_move_to_inactive(obj_priv->obj);
3854 }
3855
3856
3857 /* Move all inactive buffers out of the GTT. */
673a394b 3858 ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
28dfe52a 3859 WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
6dbe2772
KP
3860 if (ret) {
3861 mutex_unlock(&dev->struct_mutex);
673a394b 3862 return ret;
6dbe2772 3863 }
673a394b 3864
6dbe2772
KP
3865 i915_gem_cleanup_ringbuffer(dev);
3866 mutex_unlock(&dev->struct_mutex);
3867
673a394b
EA
3868 return 0;
3869}
3870
3871static int
3872i915_gem_init_hws(struct drm_device *dev)
3873{
3874 drm_i915_private_t *dev_priv = dev->dev_private;
3875 struct drm_gem_object *obj;
3876 struct drm_i915_gem_object *obj_priv;
3877 int ret;
3878
3879 /* If we need a physical address for the status page, it's already
3880 * initialized at driver load time.
3881 */
3882 if (!I915_NEED_GFX_HWS(dev))
3883 return 0;
3884
3885 obj = drm_gem_object_alloc(dev, 4096);
3886 if (obj == NULL) {
3887 DRM_ERROR("Failed to allocate status page\n");
3888 return -ENOMEM;
3889 }
3890 obj_priv = obj->driver_private;
ba1eb1d8 3891 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
673a394b
EA
3892
3893 ret = i915_gem_object_pin(obj, 4096);
3894 if (ret != 0) {
3895 drm_gem_object_unreference(obj);
3896 return ret;
3897 }
3898
3899 dev_priv->status_gfx_addr = obj_priv->gtt_offset;
673a394b 3900
856fa198 3901 dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
ba1eb1d8 3902 if (dev_priv->hw_status_page == NULL) {
673a394b
EA
3903 DRM_ERROR("Failed to map status page.\n");
3904 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
3eb2ee77 3905 i915_gem_object_unpin(obj);
673a394b
EA
3906 drm_gem_object_unreference(obj);
3907 return -EINVAL;
3908 }
3909 dev_priv->hws_obj = obj;
673a394b
EA
3910 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
3911 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
ba1eb1d8 3912 I915_READ(HWS_PGA); /* posting read */
673a394b
EA
3913 DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
3914
3915 return 0;
3916}
3917
85a7bb98
CW
3918static void
3919i915_gem_cleanup_hws(struct drm_device *dev)
3920{
3921 drm_i915_private_t *dev_priv = dev->dev_private;
bab2d1f6
CW
3922 struct drm_gem_object *obj;
3923 struct drm_i915_gem_object *obj_priv;
85a7bb98
CW
3924
3925 if (dev_priv->hws_obj == NULL)
3926 return;
3927
bab2d1f6
CW
3928 obj = dev_priv->hws_obj;
3929 obj_priv = obj->driver_private;
3930
856fa198 3931 kunmap(obj_priv->pages[0]);
85a7bb98
CW
3932 i915_gem_object_unpin(obj);
3933 drm_gem_object_unreference(obj);
3934 dev_priv->hws_obj = NULL;
bab2d1f6 3935
85a7bb98
CW
3936 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
3937 dev_priv->hw_status_page = NULL;
3938
3939 /* Write high address into HWS_PGA when disabling. */
3940 I915_WRITE(HWS_PGA, 0x1ffff000);
3941}
3942
79e53945 3943int
673a394b
EA
3944i915_gem_init_ringbuffer(struct drm_device *dev)
3945{
3946 drm_i915_private_t *dev_priv = dev->dev_private;
3947 struct drm_gem_object *obj;
3948 struct drm_i915_gem_object *obj_priv;
79e53945 3949 drm_i915_ring_buffer_t *ring = &dev_priv->ring;
673a394b 3950 int ret;
50aa253d 3951 u32 head;
673a394b
EA
3952
3953 ret = i915_gem_init_hws(dev);
3954 if (ret != 0)
3955 return ret;
3956
3957 obj = drm_gem_object_alloc(dev, 128 * 1024);
3958 if (obj == NULL) {
3959 DRM_ERROR("Failed to allocate ringbuffer\n");
85a7bb98 3960 i915_gem_cleanup_hws(dev);
673a394b
EA
3961 return -ENOMEM;
3962 }
3963 obj_priv = obj->driver_private;
3964
3965 ret = i915_gem_object_pin(obj, 4096);
3966 if (ret != 0) {
3967 drm_gem_object_unreference(obj);
85a7bb98 3968 i915_gem_cleanup_hws(dev);
673a394b
EA
3969 return ret;
3970 }
3971
3972 /* Set up the kernel mapping for the ring. */
79e53945
JB
3973 ring->Size = obj->size;
3974 ring->tail_mask = obj->size - 1;
673a394b 3975
79e53945
JB
3976 ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
3977 ring->map.size = obj->size;
3978 ring->map.type = 0;
3979 ring->map.flags = 0;
3980 ring->map.mtrr = 0;
673a394b 3981
79e53945
JB
3982 drm_core_ioremap_wc(&ring->map, dev);
3983 if (ring->map.handle == NULL) {
673a394b
EA
3984 DRM_ERROR("Failed to map ringbuffer.\n");
3985 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
47ed185a 3986 i915_gem_object_unpin(obj);
673a394b 3987 drm_gem_object_unreference(obj);
85a7bb98 3988 i915_gem_cleanup_hws(dev);
673a394b
EA
3989 return -EINVAL;
3990 }
79e53945
JB
3991 ring->ring_obj = obj;
3992 ring->virtual_start = ring->map.handle;
673a394b
EA
3993
3994 /* Stop the ring if it's running. */
3995 I915_WRITE(PRB0_CTL, 0);
673a394b 3996 I915_WRITE(PRB0_TAIL, 0);
50aa253d 3997 I915_WRITE(PRB0_HEAD, 0);
673a394b
EA
3998
3999 /* Initialize the ring. */
4000 I915_WRITE(PRB0_START, obj_priv->gtt_offset);
50aa253d
KP
4001 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4002
4003 /* G45 ring initialization fails to reset head to zero */
4004 if (head != 0) {
4005 DRM_ERROR("Ring head not reset to zero "
4006 "ctl %08x head %08x tail %08x start %08x\n",
4007 I915_READ(PRB0_CTL),
4008 I915_READ(PRB0_HEAD),
4009 I915_READ(PRB0_TAIL),
4010 I915_READ(PRB0_START));
4011 I915_WRITE(PRB0_HEAD, 0);
4012
4013 DRM_ERROR("Ring head forced to zero "
4014 "ctl %08x head %08x tail %08x start %08x\n",
4015 I915_READ(PRB0_CTL),
4016 I915_READ(PRB0_HEAD),
4017 I915_READ(PRB0_TAIL),
4018 I915_READ(PRB0_START));
4019 }
4020
673a394b
EA
4021 I915_WRITE(PRB0_CTL,
4022 ((obj->size - 4096) & RING_NR_PAGES) |
4023 RING_NO_REPORT |
4024 RING_VALID);
4025
50aa253d
KP
4026 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4027
4028 /* If the head is still not zero, the ring is dead */
4029 if (head != 0) {
4030 DRM_ERROR("Ring initialization failed "
4031 "ctl %08x head %08x tail %08x start %08x\n",
4032 I915_READ(PRB0_CTL),
4033 I915_READ(PRB0_HEAD),
4034 I915_READ(PRB0_TAIL),
4035 I915_READ(PRB0_START));
4036 return -EIO;
4037 }
4038
673a394b 4039 /* Update our cache of the ring state */
79e53945
JB
4040 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4041 i915_kernel_lost_context(dev);
4042 else {
4043 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4044 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
4045 ring->space = ring->head - (ring->tail + 8);
4046 if (ring->space < 0)
4047 ring->space += ring->Size;
4048 }
673a394b
EA
4049
4050 return 0;
4051}
4052
79e53945 4053void
673a394b
EA
4054i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4055{
4056 drm_i915_private_t *dev_priv = dev->dev_private;
4057
4058 if (dev_priv->ring.ring_obj == NULL)
4059 return;
4060
4061 drm_core_ioremapfree(&dev_priv->ring.map, dev);
4062
4063 i915_gem_object_unpin(dev_priv->ring.ring_obj);
4064 drm_gem_object_unreference(dev_priv->ring.ring_obj);
4065 dev_priv->ring.ring_obj = NULL;
4066 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
4067
85a7bb98 4068 i915_gem_cleanup_hws(dev);
673a394b
EA
4069}
4070
4071int
4072i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4073 struct drm_file *file_priv)
4074{
4075 drm_i915_private_t *dev_priv = dev->dev_private;
4076 int ret;
4077
79e53945
JB
4078 if (drm_core_check_feature(dev, DRIVER_MODESET))
4079 return 0;
4080
673a394b
EA
4081 if (dev_priv->mm.wedged) {
4082 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4083 dev_priv->mm.wedged = 0;
4084 }
4085
673a394b 4086 mutex_lock(&dev->struct_mutex);
9bb2d6f9
EA
4087 dev_priv->mm.suspended = 0;
4088
4089 ret = i915_gem_init_ringbuffer(dev);
d816f6ac
WF
4090 if (ret != 0) {
4091 mutex_unlock(&dev->struct_mutex);
9bb2d6f9 4092 return ret;
d816f6ac 4093 }
9bb2d6f9 4094
5e118f41 4095 spin_lock(&dev_priv->mm.active_list_lock);
673a394b 4096 BUG_ON(!list_empty(&dev_priv->mm.active_list));
5e118f41
CW
4097 spin_unlock(&dev_priv->mm.active_list_lock);
4098
673a394b
EA
4099 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
4100 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
4101 BUG_ON(!list_empty(&dev_priv->mm.request_list));
673a394b 4102 mutex_unlock(&dev->struct_mutex);
dbb19d30
KH
4103
4104 drm_irq_install(dev);
4105
673a394b
EA
4106 return 0;
4107}
4108
4109int
4110i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4111 struct drm_file *file_priv)
4112{
4113 int ret;
4114
79e53945
JB
4115 if (drm_core_check_feature(dev, DRIVER_MODESET))
4116 return 0;
4117
673a394b 4118 ret = i915_gem_idle(dev);
dbb19d30
KH
4119 drm_irq_uninstall(dev);
4120
6dbe2772 4121 return ret;
673a394b
EA
4122}
4123
4124void
4125i915_gem_lastclose(struct drm_device *dev)
4126{
4127 int ret;
673a394b 4128
e806b495
EA
4129 if (drm_core_check_feature(dev, DRIVER_MODESET))
4130 return;
4131
6dbe2772
KP
4132 ret = i915_gem_idle(dev);
4133 if (ret)
4134 DRM_ERROR("failed to idle hardware: %d\n", ret);
673a394b
EA
4135}
4136
4137void
4138i915_gem_load(struct drm_device *dev)
4139{
4140 drm_i915_private_t *dev_priv = dev->dev_private;
4141
5e118f41 4142 spin_lock_init(&dev_priv->mm.active_list_lock);
673a394b
EA
4143 INIT_LIST_HEAD(&dev_priv->mm.active_list);
4144 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4145 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4146 INIT_LIST_HEAD(&dev_priv->mm.request_list);
4147 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4148 i915_gem_retire_work_handler);
4149 dev_priv->mm.next_gem_seqno = 1;
4150
de151cf6
JB
4151 /* Old X drivers will take 0-2 for front, back, depth buffers */
4152 dev_priv->fence_reg_start = 3;
4153
0f973f27 4154 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
de151cf6
JB
4155 dev_priv->num_fence_regs = 16;
4156 else
4157 dev_priv->num_fence_regs = 8;
4158
673a394b
EA
4159 i915_gem_detect_bit_6_swizzle(dev);
4160}
71acb5eb
DA
4161
4162/*
4163 * Create a physically contiguous memory object for this object
4164 * e.g. for cursor + overlay regs
4165 */
4166int i915_gem_init_phys_object(struct drm_device *dev,
4167 int id, int size)
4168{
4169 drm_i915_private_t *dev_priv = dev->dev_private;
4170 struct drm_i915_gem_phys_object *phys_obj;
4171 int ret;
4172
4173 if (dev_priv->mm.phys_objs[id - 1] || !size)
4174 return 0;
4175
4176 phys_obj = drm_calloc(1, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER);
4177 if (!phys_obj)
4178 return -ENOMEM;
4179
4180 phys_obj->id = id;
4181
4182 phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff);
4183 if (!phys_obj->handle) {
4184 ret = -ENOMEM;
4185 goto kfree_obj;
4186 }
4187#ifdef CONFIG_X86
4188 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4189#endif
4190
4191 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4192
4193 return 0;
4194kfree_obj:
4195 drm_free(phys_obj, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER);
4196 return ret;
4197}
4198
4199void i915_gem_free_phys_object(struct drm_device *dev, int id)
4200{
4201 drm_i915_private_t *dev_priv = dev->dev_private;
4202 struct drm_i915_gem_phys_object *phys_obj;
4203
4204 if (!dev_priv->mm.phys_objs[id - 1])
4205 return;
4206
4207 phys_obj = dev_priv->mm.phys_objs[id - 1];
4208 if (phys_obj->cur_obj) {
4209 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4210 }
4211
4212#ifdef CONFIG_X86
4213 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4214#endif
4215 drm_pci_free(dev, phys_obj->handle);
4216 kfree(phys_obj);
4217 dev_priv->mm.phys_objs[id - 1] = NULL;
4218}
4219
4220void i915_gem_free_all_phys_object(struct drm_device *dev)
4221{
4222 int i;
4223
260883c8 4224 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
71acb5eb
DA
4225 i915_gem_free_phys_object(dev, i);
4226}
4227
4228void i915_gem_detach_phys_object(struct drm_device *dev,
4229 struct drm_gem_object *obj)
4230{
4231 struct drm_i915_gem_object *obj_priv;
4232 int i;
4233 int ret;
4234 int page_count;
4235
4236 obj_priv = obj->driver_private;
4237 if (!obj_priv->phys_obj)
4238 return;
4239
856fa198 4240 ret = i915_gem_object_get_pages(obj);
71acb5eb
DA
4241 if (ret)
4242 goto out;
4243
4244 page_count = obj->size / PAGE_SIZE;
4245
4246 for (i = 0; i < page_count; i++) {
856fa198 4247 char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
71acb5eb
DA
4248 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4249
4250 memcpy(dst, src, PAGE_SIZE);
4251 kunmap_atomic(dst, KM_USER0);
4252 }
856fa198 4253 drm_clflush_pages(obj_priv->pages, page_count);
71acb5eb
DA
4254 drm_agp_chipset_flush(dev);
4255out:
4256 obj_priv->phys_obj->cur_obj = NULL;
4257 obj_priv->phys_obj = NULL;
4258}
4259
4260int
4261i915_gem_attach_phys_object(struct drm_device *dev,
4262 struct drm_gem_object *obj, int id)
4263{
4264 drm_i915_private_t *dev_priv = dev->dev_private;
4265 struct drm_i915_gem_object *obj_priv;
4266 int ret = 0;
4267 int page_count;
4268 int i;
4269
4270 if (id > I915_MAX_PHYS_OBJECT)
4271 return -EINVAL;
4272
4273 obj_priv = obj->driver_private;
4274
4275 if (obj_priv->phys_obj) {
4276 if (obj_priv->phys_obj->id == id)
4277 return 0;
4278 i915_gem_detach_phys_object(dev, obj);
4279 }
4280
4281
4282 /* create a new object */
4283 if (!dev_priv->mm.phys_objs[id - 1]) {
4284 ret = i915_gem_init_phys_object(dev, id,
4285 obj->size);
4286 if (ret) {
aeb565df 4287 DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
71acb5eb
DA
4288 goto out;
4289 }
4290 }
4291
4292 /* bind to the object */
4293 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
4294 obj_priv->phys_obj->cur_obj = obj;
4295
856fa198 4296 ret = i915_gem_object_get_pages(obj);
71acb5eb
DA
4297 if (ret) {
4298 DRM_ERROR("failed to get page list\n");
4299 goto out;
4300 }
4301
4302 page_count = obj->size / PAGE_SIZE;
4303
4304 for (i = 0; i < page_count; i++) {
856fa198 4305 char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
71acb5eb
DA
4306 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4307
4308 memcpy(dst, src, PAGE_SIZE);
4309 kunmap_atomic(src, KM_USER0);
4310 }
4311
4312 return 0;
4313out:
4314 return ret;
4315}
4316
4317static int
4318i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4319 struct drm_i915_gem_pwrite *args,
4320 struct drm_file *file_priv)
4321{
4322 struct drm_i915_gem_object *obj_priv = obj->driver_private;
4323 void *obj_addr;
4324 int ret;
4325 char __user *user_data;
4326
4327 user_data = (char __user *) (uintptr_t) args->data_ptr;
4328 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
4329
e08fb4f6 4330 DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size);
71acb5eb
DA
4331 ret = copy_from_user(obj_addr, user_data, args->size);
4332 if (ret)
4333 return -EFAULT;
4334
4335 drm_agp_chipset_flush(dev);
4336 return 0;
4337}