]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/gpu/drm/i915/i915_gem.c
Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[net-next-2.6.git] / drivers / gpu / drm / i915 / i915_gem.c
CommitLineData
673a394b
EA
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "i915_drm.h"
31#include "i915_drv.h"
32#include <linux/swap.h>
79e53945 33#include <linux/pci.h>
673a394b 34
28dfe52a
EA
35#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
36
e47c68e9
EA
37static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
38static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
39static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
e47c68e9
EA
40static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
41 int write);
42static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
43 uint64_t offset,
44 uint64_t size);
45static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
673a394b 46static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
de151cf6
JB
47static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
48 unsigned alignment);
0f973f27 49static int i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write);
de151cf6
JB
50static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
51static int i915_gem_evict_something(struct drm_device *dev);
71acb5eb
DA
52static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
53 struct drm_i915_gem_pwrite *args,
54 struct drm_file *file_priv);
673a394b 55
79e53945
JB
56int i915_gem_do_init(struct drm_device *dev, unsigned long start,
57 unsigned long end)
673a394b
EA
58{
59 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b 60
79e53945
JB
61 if (start >= end ||
62 (start & (PAGE_SIZE - 1)) != 0 ||
63 (end & (PAGE_SIZE - 1)) != 0) {
673a394b
EA
64 return -EINVAL;
65 }
66
79e53945
JB
67 drm_mm_init(&dev_priv->mm.gtt_space, start,
68 end - start);
673a394b 69
79e53945
JB
70 dev->gtt_total = (uint32_t) (end - start);
71
72 return 0;
73}
673a394b 74
79e53945
JB
75int
76i915_gem_init_ioctl(struct drm_device *dev, void *data,
77 struct drm_file *file_priv)
78{
79 struct drm_i915_gem_init *args = data;
80 int ret;
81
82 mutex_lock(&dev->struct_mutex);
83 ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
673a394b
EA
84 mutex_unlock(&dev->struct_mutex);
85
79e53945 86 return ret;
673a394b
EA
87}
88
5a125c3c
EA
89int
90i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
91 struct drm_file *file_priv)
92{
5a125c3c 93 struct drm_i915_gem_get_aperture *args = data;
5a125c3c
EA
94
95 if (!(dev->driver->driver_features & DRIVER_GEM))
96 return -ENODEV;
97
98 args->aper_size = dev->gtt_total;
2678d9d6
KP
99 args->aper_available_size = (args->aper_size -
100 atomic_read(&dev->pin_memory));
5a125c3c
EA
101
102 return 0;
103}
104
673a394b
EA
105
106/**
107 * Creates a new mm object and returns a handle to it.
108 */
109int
110i915_gem_create_ioctl(struct drm_device *dev, void *data,
111 struct drm_file *file_priv)
112{
113 struct drm_i915_gem_create *args = data;
114 struct drm_gem_object *obj;
115 int handle, ret;
116
117 args->size = roundup(args->size, PAGE_SIZE);
118
119 /* Allocate the new object */
120 obj = drm_gem_object_alloc(dev, args->size);
121 if (obj == NULL)
122 return -ENOMEM;
123
124 ret = drm_gem_handle_create(file_priv, obj, &handle);
125 mutex_lock(&dev->struct_mutex);
126 drm_gem_object_handle_unreference(obj);
127 mutex_unlock(&dev->struct_mutex);
128
129 if (ret)
130 return ret;
131
132 args->handle = handle;
133
134 return 0;
135}
136
eb01459f
EA
137static inline int
138fast_shmem_read(struct page **pages,
139 loff_t page_base, int page_offset,
140 char __user *data,
141 int length)
142{
143 char __iomem *vaddr;
2bc43b5c 144 int unwritten;
eb01459f
EA
145
146 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
147 if (vaddr == NULL)
148 return -ENOMEM;
2bc43b5c 149 unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
eb01459f
EA
150 kunmap_atomic(vaddr, KM_USER0);
151
2bc43b5c
FM
152 if (unwritten)
153 return -EFAULT;
154
155 return 0;
eb01459f
EA
156}
157
280b713b
EA
158static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
159{
160 drm_i915_private_t *dev_priv = obj->dev->dev_private;
161 struct drm_i915_gem_object *obj_priv = obj->driver_private;
162
163 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
164 obj_priv->tiling_mode != I915_TILING_NONE;
165}
166
40123c1f
EA
167static inline int
168slow_shmem_copy(struct page *dst_page,
169 int dst_offset,
170 struct page *src_page,
171 int src_offset,
172 int length)
173{
174 char *dst_vaddr, *src_vaddr;
175
176 dst_vaddr = kmap_atomic(dst_page, KM_USER0);
177 if (dst_vaddr == NULL)
178 return -ENOMEM;
179
180 src_vaddr = kmap_atomic(src_page, KM_USER1);
181 if (src_vaddr == NULL) {
182 kunmap_atomic(dst_vaddr, KM_USER0);
183 return -ENOMEM;
184 }
185
186 memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
187
188 kunmap_atomic(src_vaddr, KM_USER1);
189 kunmap_atomic(dst_vaddr, KM_USER0);
190
191 return 0;
192}
193
280b713b
EA
194static inline int
195slow_shmem_bit17_copy(struct page *gpu_page,
196 int gpu_offset,
197 struct page *cpu_page,
198 int cpu_offset,
199 int length,
200 int is_read)
201{
202 char *gpu_vaddr, *cpu_vaddr;
203
204 /* Use the unswizzled path if this page isn't affected. */
205 if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
206 if (is_read)
207 return slow_shmem_copy(cpu_page, cpu_offset,
208 gpu_page, gpu_offset, length);
209 else
210 return slow_shmem_copy(gpu_page, gpu_offset,
211 cpu_page, cpu_offset, length);
212 }
213
214 gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
215 if (gpu_vaddr == NULL)
216 return -ENOMEM;
217
218 cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
219 if (cpu_vaddr == NULL) {
220 kunmap_atomic(gpu_vaddr, KM_USER0);
221 return -ENOMEM;
222 }
223
224 /* Copy the data, XORing A6 with A17 (1). The user already knows he's
225 * XORing with the other bits (A9 for Y, A9 and A10 for X)
226 */
227 while (length > 0) {
228 int cacheline_end = ALIGN(gpu_offset + 1, 64);
229 int this_length = min(cacheline_end - gpu_offset, length);
230 int swizzled_gpu_offset = gpu_offset ^ 64;
231
232 if (is_read) {
233 memcpy(cpu_vaddr + cpu_offset,
234 gpu_vaddr + swizzled_gpu_offset,
235 this_length);
236 } else {
237 memcpy(gpu_vaddr + swizzled_gpu_offset,
238 cpu_vaddr + cpu_offset,
239 this_length);
240 }
241 cpu_offset += this_length;
242 gpu_offset += this_length;
243 length -= this_length;
244 }
245
246 kunmap_atomic(cpu_vaddr, KM_USER1);
247 kunmap_atomic(gpu_vaddr, KM_USER0);
248
249 return 0;
250}
251
eb01459f
EA
252/**
253 * This is the fast shmem pread path, which attempts to copy_from_user directly
254 * from the backing pages of the object to the user's address space. On a
255 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
256 */
257static int
258i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
259 struct drm_i915_gem_pread *args,
260 struct drm_file *file_priv)
261{
262 struct drm_i915_gem_object *obj_priv = obj->driver_private;
263 ssize_t remain;
264 loff_t offset, page_base;
265 char __user *user_data;
266 int page_offset, page_length;
267 int ret;
268
269 user_data = (char __user *) (uintptr_t) args->data_ptr;
270 remain = args->size;
271
272 mutex_lock(&dev->struct_mutex);
273
274 ret = i915_gem_object_get_pages(obj);
275 if (ret != 0)
276 goto fail_unlock;
277
278 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
279 args->size);
280 if (ret != 0)
281 goto fail_put_pages;
282
283 obj_priv = obj->driver_private;
284 offset = args->offset;
285
286 while (remain > 0) {
287 /* Operation in this page
288 *
289 * page_base = page offset within aperture
290 * page_offset = offset within page
291 * page_length = bytes to copy for this page
292 */
293 page_base = (offset & ~(PAGE_SIZE-1));
294 page_offset = offset & (PAGE_SIZE-1);
295 page_length = remain;
296 if ((page_offset + remain) > PAGE_SIZE)
297 page_length = PAGE_SIZE - page_offset;
298
299 ret = fast_shmem_read(obj_priv->pages,
300 page_base, page_offset,
301 user_data, page_length);
302 if (ret)
303 goto fail_put_pages;
304
305 remain -= page_length;
306 user_data += page_length;
307 offset += page_length;
308 }
309
310fail_put_pages:
311 i915_gem_object_put_pages(obj);
312fail_unlock:
313 mutex_unlock(&dev->struct_mutex);
314
315 return ret;
316}
317
318/**
319 * This is the fallback shmem pread path, which allocates temporary storage
320 * in kernel space to copy_to_user into outside of the struct_mutex, so we
321 * can copy out of the object's backing pages while holding the struct mutex
322 * and not take page faults.
323 */
324static int
325i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
326 struct drm_i915_gem_pread *args,
327 struct drm_file *file_priv)
328{
329 struct drm_i915_gem_object *obj_priv = obj->driver_private;
330 struct mm_struct *mm = current->mm;
331 struct page **user_pages;
332 ssize_t remain;
333 loff_t offset, pinned_pages, i;
334 loff_t first_data_page, last_data_page, num_pages;
335 int shmem_page_index, shmem_page_offset;
336 int data_page_index, data_page_offset;
337 int page_length;
338 int ret;
339 uint64_t data_ptr = args->data_ptr;
280b713b 340 int do_bit17_swizzling;
eb01459f
EA
341
342 remain = args->size;
343
344 /* Pin the user pages containing the data. We can't fault while
345 * holding the struct mutex, yet we want to hold it while
346 * dereferencing the user data.
347 */
348 first_data_page = data_ptr / PAGE_SIZE;
349 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
350 num_pages = last_data_page - first_data_page + 1;
351
8e7d2b2c 352 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
eb01459f
EA
353 if (user_pages == NULL)
354 return -ENOMEM;
355
356 down_read(&mm->mmap_sem);
357 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
e5e9ecde 358 num_pages, 1, 0, user_pages, NULL);
eb01459f
EA
359 up_read(&mm->mmap_sem);
360 if (pinned_pages < num_pages) {
361 ret = -EFAULT;
362 goto fail_put_user_pages;
363 }
364
280b713b
EA
365 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
366
eb01459f
EA
367 mutex_lock(&dev->struct_mutex);
368
369 ret = i915_gem_object_get_pages(obj);
370 if (ret != 0)
371 goto fail_unlock;
372
373 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
374 args->size);
375 if (ret != 0)
376 goto fail_put_pages;
377
378 obj_priv = obj->driver_private;
379 offset = args->offset;
380
381 while (remain > 0) {
382 /* Operation in this page
383 *
384 * shmem_page_index = page number within shmem file
385 * shmem_page_offset = offset within page in shmem file
386 * data_page_index = page number in get_user_pages return
387 * data_page_offset = offset with data_page_index page.
388 * page_length = bytes to copy for this page
389 */
390 shmem_page_index = offset / PAGE_SIZE;
391 shmem_page_offset = offset & ~PAGE_MASK;
392 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
393 data_page_offset = data_ptr & ~PAGE_MASK;
394
395 page_length = remain;
396 if ((shmem_page_offset + page_length) > PAGE_SIZE)
397 page_length = PAGE_SIZE - shmem_page_offset;
398 if ((data_page_offset + page_length) > PAGE_SIZE)
399 page_length = PAGE_SIZE - data_page_offset;
400
280b713b
EA
401 if (do_bit17_swizzling) {
402 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
403 shmem_page_offset,
404 user_pages[data_page_index],
405 data_page_offset,
406 page_length,
407 1);
408 } else {
409 ret = slow_shmem_copy(user_pages[data_page_index],
410 data_page_offset,
411 obj_priv->pages[shmem_page_index],
412 shmem_page_offset,
413 page_length);
414 }
eb01459f
EA
415 if (ret)
416 goto fail_put_pages;
417
418 remain -= page_length;
419 data_ptr += page_length;
420 offset += page_length;
421 }
422
423fail_put_pages:
424 i915_gem_object_put_pages(obj);
425fail_unlock:
426 mutex_unlock(&dev->struct_mutex);
427fail_put_user_pages:
428 for (i = 0; i < pinned_pages; i++) {
429 SetPageDirty(user_pages[i]);
430 page_cache_release(user_pages[i]);
431 }
8e7d2b2c 432 drm_free_large(user_pages);
eb01459f
EA
433
434 return ret;
435}
436
673a394b
EA
437/**
438 * Reads data from the object referenced by handle.
439 *
440 * On error, the contents of *data are undefined.
441 */
442int
443i915_gem_pread_ioctl(struct drm_device *dev, void *data,
444 struct drm_file *file_priv)
445{
446 struct drm_i915_gem_pread *args = data;
447 struct drm_gem_object *obj;
448 struct drm_i915_gem_object *obj_priv;
673a394b
EA
449 int ret;
450
451 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
452 if (obj == NULL)
453 return -EBADF;
454 obj_priv = obj->driver_private;
455
456 /* Bounds check source.
457 *
458 * XXX: This could use review for overflow issues...
459 */
460 if (args->offset > obj->size || args->size > obj->size ||
461 args->offset + args->size > obj->size) {
462 drm_gem_object_unreference(obj);
463 return -EINVAL;
464 }
465
280b713b 466 if (i915_gem_object_needs_bit17_swizzle(obj)) {
eb01459f 467 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
280b713b
EA
468 } else {
469 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
470 if (ret != 0)
471 ret = i915_gem_shmem_pread_slow(dev, obj, args,
472 file_priv);
473 }
673a394b
EA
474
475 drm_gem_object_unreference(obj);
673a394b 476
eb01459f 477 return ret;
673a394b
EA
478}
479
0839ccb8
KP
480/* This is the fast write path which cannot handle
481 * page faults in the source data
9b7530cc 482 */
0839ccb8
KP
483
484static inline int
485fast_user_write(struct io_mapping *mapping,
486 loff_t page_base, int page_offset,
487 char __user *user_data,
488 int length)
9b7530cc 489{
9b7530cc 490 char *vaddr_atomic;
0839ccb8 491 unsigned long unwritten;
9b7530cc 492
0839ccb8
KP
493 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
494 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
495 user_data, length);
496 io_mapping_unmap_atomic(vaddr_atomic);
497 if (unwritten)
498 return -EFAULT;
499 return 0;
500}
501
502/* Here's the write path which can sleep for
503 * page faults
504 */
505
506static inline int
3de09aa3
EA
507slow_kernel_write(struct io_mapping *mapping,
508 loff_t gtt_base, int gtt_offset,
509 struct page *user_page, int user_offset,
510 int length)
0839ccb8 511{
3de09aa3 512 char *src_vaddr, *dst_vaddr;
0839ccb8
KP
513 unsigned long unwritten;
514
3de09aa3
EA
515 dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
516 src_vaddr = kmap_atomic(user_page, KM_USER1);
517 unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
518 src_vaddr + user_offset,
519 length);
520 kunmap_atomic(src_vaddr, KM_USER1);
521 io_mapping_unmap_atomic(dst_vaddr);
0839ccb8
KP
522 if (unwritten)
523 return -EFAULT;
9b7530cc 524 return 0;
9b7530cc
LT
525}
526
40123c1f
EA
527static inline int
528fast_shmem_write(struct page **pages,
529 loff_t page_base, int page_offset,
530 char __user *data,
531 int length)
532{
533 char __iomem *vaddr;
d0088775 534 unsigned long unwritten;
40123c1f
EA
535
536 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
537 if (vaddr == NULL)
538 return -ENOMEM;
d0088775 539 unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
40123c1f
EA
540 kunmap_atomic(vaddr, KM_USER0);
541
d0088775
DA
542 if (unwritten)
543 return -EFAULT;
40123c1f
EA
544 return 0;
545}
546
3de09aa3
EA
547/**
548 * This is the fast pwrite path, where we copy the data directly from the
549 * user into the GTT, uncached.
550 */
673a394b 551static int
3de09aa3
EA
552i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
553 struct drm_i915_gem_pwrite *args,
554 struct drm_file *file_priv)
673a394b
EA
555{
556 struct drm_i915_gem_object *obj_priv = obj->driver_private;
0839ccb8 557 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b 558 ssize_t remain;
0839ccb8 559 loff_t offset, page_base;
673a394b 560 char __user *user_data;
0839ccb8
KP
561 int page_offset, page_length;
562 int ret;
673a394b
EA
563
564 user_data = (char __user *) (uintptr_t) args->data_ptr;
565 remain = args->size;
566 if (!access_ok(VERIFY_READ, user_data, remain))
567 return -EFAULT;
568
569
570 mutex_lock(&dev->struct_mutex);
571 ret = i915_gem_object_pin(obj, 0);
572 if (ret) {
573 mutex_unlock(&dev->struct_mutex);
574 return ret;
575 }
2ef7eeaa 576 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
673a394b
EA
577 if (ret)
578 goto fail;
579
580 obj_priv = obj->driver_private;
581 offset = obj_priv->gtt_offset + args->offset;
673a394b
EA
582
583 while (remain > 0) {
584 /* Operation in this page
585 *
0839ccb8
KP
586 * page_base = page offset within aperture
587 * page_offset = offset within page
588 * page_length = bytes to copy for this page
673a394b 589 */
0839ccb8
KP
590 page_base = (offset & ~(PAGE_SIZE-1));
591 page_offset = offset & (PAGE_SIZE-1);
592 page_length = remain;
593 if ((page_offset + remain) > PAGE_SIZE)
594 page_length = PAGE_SIZE - page_offset;
595
596 ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
597 page_offset, user_data, page_length);
598
599 /* If we get a fault while copying data, then (presumably) our
3de09aa3
EA
600 * source page isn't available. Return the error and we'll
601 * retry in the slow path.
0839ccb8 602 */
3de09aa3
EA
603 if (ret)
604 goto fail;
673a394b 605
0839ccb8
KP
606 remain -= page_length;
607 user_data += page_length;
608 offset += page_length;
673a394b 609 }
673a394b
EA
610
611fail:
612 i915_gem_object_unpin(obj);
613 mutex_unlock(&dev->struct_mutex);
614
615 return ret;
616}
617
3de09aa3
EA
618/**
619 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
620 * the memory and maps it using kmap_atomic for copying.
621 *
622 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
623 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
624 */
3043c60c 625static int
3de09aa3
EA
626i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
627 struct drm_i915_gem_pwrite *args,
628 struct drm_file *file_priv)
673a394b 629{
3de09aa3
EA
630 struct drm_i915_gem_object *obj_priv = obj->driver_private;
631 drm_i915_private_t *dev_priv = dev->dev_private;
632 ssize_t remain;
633 loff_t gtt_page_base, offset;
634 loff_t first_data_page, last_data_page, num_pages;
635 loff_t pinned_pages, i;
636 struct page **user_pages;
637 struct mm_struct *mm = current->mm;
638 int gtt_page_offset, data_page_offset, data_page_index, page_length;
673a394b 639 int ret;
3de09aa3
EA
640 uint64_t data_ptr = args->data_ptr;
641
642 remain = args->size;
643
644 /* Pin the user pages containing the data. We can't fault while
645 * holding the struct mutex, and all of the pwrite implementations
646 * want to hold it while dereferencing the user data.
647 */
648 first_data_page = data_ptr / PAGE_SIZE;
649 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
650 num_pages = last_data_page - first_data_page + 1;
651
8e7d2b2c 652 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
3de09aa3
EA
653 if (user_pages == NULL)
654 return -ENOMEM;
655
656 down_read(&mm->mmap_sem);
657 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
658 num_pages, 0, 0, user_pages, NULL);
659 up_read(&mm->mmap_sem);
660 if (pinned_pages < num_pages) {
661 ret = -EFAULT;
662 goto out_unpin_pages;
663 }
673a394b
EA
664
665 mutex_lock(&dev->struct_mutex);
3de09aa3
EA
666 ret = i915_gem_object_pin(obj, 0);
667 if (ret)
668 goto out_unlock;
669
670 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
671 if (ret)
672 goto out_unpin_object;
673
674 obj_priv = obj->driver_private;
675 offset = obj_priv->gtt_offset + args->offset;
676
677 while (remain > 0) {
678 /* Operation in this page
679 *
680 * gtt_page_base = page offset within aperture
681 * gtt_page_offset = offset within page in aperture
682 * data_page_index = page number in get_user_pages return
683 * data_page_offset = offset with data_page_index page.
684 * page_length = bytes to copy for this page
685 */
686 gtt_page_base = offset & PAGE_MASK;
687 gtt_page_offset = offset & ~PAGE_MASK;
688 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
689 data_page_offset = data_ptr & ~PAGE_MASK;
690
691 page_length = remain;
692 if ((gtt_page_offset + page_length) > PAGE_SIZE)
693 page_length = PAGE_SIZE - gtt_page_offset;
694 if ((data_page_offset + page_length) > PAGE_SIZE)
695 page_length = PAGE_SIZE - data_page_offset;
696
697 ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
698 gtt_page_base, gtt_page_offset,
699 user_pages[data_page_index],
700 data_page_offset,
701 page_length);
702
703 /* If we get a fault while copying data, then (presumably) our
704 * source page isn't available. Return the error and we'll
705 * retry in the slow path.
706 */
707 if (ret)
708 goto out_unpin_object;
709
710 remain -= page_length;
711 offset += page_length;
712 data_ptr += page_length;
713 }
714
715out_unpin_object:
716 i915_gem_object_unpin(obj);
717out_unlock:
718 mutex_unlock(&dev->struct_mutex);
719out_unpin_pages:
720 for (i = 0; i < pinned_pages; i++)
721 page_cache_release(user_pages[i]);
8e7d2b2c 722 drm_free_large(user_pages);
3de09aa3
EA
723
724 return ret;
725}
726
40123c1f
EA
727/**
728 * This is the fast shmem pwrite path, which attempts to directly
729 * copy_from_user into the kmapped pages backing the object.
730 */
3043c60c 731static int
40123c1f
EA
732i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
733 struct drm_i915_gem_pwrite *args,
734 struct drm_file *file_priv)
673a394b 735{
40123c1f
EA
736 struct drm_i915_gem_object *obj_priv = obj->driver_private;
737 ssize_t remain;
738 loff_t offset, page_base;
739 char __user *user_data;
740 int page_offset, page_length;
673a394b 741 int ret;
40123c1f
EA
742
743 user_data = (char __user *) (uintptr_t) args->data_ptr;
744 remain = args->size;
673a394b
EA
745
746 mutex_lock(&dev->struct_mutex);
747
40123c1f
EA
748 ret = i915_gem_object_get_pages(obj);
749 if (ret != 0)
750 goto fail_unlock;
673a394b 751
e47c68e9 752 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
40123c1f
EA
753 if (ret != 0)
754 goto fail_put_pages;
755
756 obj_priv = obj->driver_private;
757 offset = args->offset;
758 obj_priv->dirty = 1;
759
760 while (remain > 0) {
761 /* Operation in this page
762 *
763 * page_base = page offset within aperture
764 * page_offset = offset within page
765 * page_length = bytes to copy for this page
766 */
767 page_base = (offset & ~(PAGE_SIZE-1));
768 page_offset = offset & (PAGE_SIZE-1);
769 page_length = remain;
770 if ((page_offset + remain) > PAGE_SIZE)
771 page_length = PAGE_SIZE - page_offset;
772
773 ret = fast_shmem_write(obj_priv->pages,
774 page_base, page_offset,
775 user_data, page_length);
776 if (ret)
777 goto fail_put_pages;
778
779 remain -= page_length;
780 user_data += page_length;
781 offset += page_length;
782 }
783
784fail_put_pages:
785 i915_gem_object_put_pages(obj);
786fail_unlock:
787 mutex_unlock(&dev->struct_mutex);
788
789 return ret;
790}
791
792/**
793 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
794 * the memory and maps it using kmap_atomic for copying.
795 *
796 * This avoids taking mmap_sem for faulting on the user's address while the
797 * struct_mutex is held.
798 */
799static int
800i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
801 struct drm_i915_gem_pwrite *args,
802 struct drm_file *file_priv)
803{
804 struct drm_i915_gem_object *obj_priv = obj->driver_private;
805 struct mm_struct *mm = current->mm;
806 struct page **user_pages;
807 ssize_t remain;
808 loff_t offset, pinned_pages, i;
809 loff_t first_data_page, last_data_page, num_pages;
810 int shmem_page_index, shmem_page_offset;
811 int data_page_index, data_page_offset;
812 int page_length;
813 int ret;
814 uint64_t data_ptr = args->data_ptr;
280b713b 815 int do_bit17_swizzling;
40123c1f
EA
816
817 remain = args->size;
818
819 /* Pin the user pages containing the data. We can't fault while
820 * holding the struct mutex, and all of the pwrite implementations
821 * want to hold it while dereferencing the user data.
822 */
823 first_data_page = data_ptr / PAGE_SIZE;
824 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
825 num_pages = last_data_page - first_data_page + 1;
826
8e7d2b2c 827 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
40123c1f
EA
828 if (user_pages == NULL)
829 return -ENOMEM;
830
831 down_read(&mm->mmap_sem);
832 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
833 num_pages, 0, 0, user_pages, NULL);
834 up_read(&mm->mmap_sem);
835 if (pinned_pages < num_pages) {
836 ret = -EFAULT;
837 goto fail_put_user_pages;
673a394b
EA
838 }
839
280b713b
EA
840 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
841
40123c1f
EA
842 mutex_lock(&dev->struct_mutex);
843
844 ret = i915_gem_object_get_pages(obj);
845 if (ret != 0)
846 goto fail_unlock;
847
848 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
849 if (ret != 0)
850 goto fail_put_pages;
851
852 obj_priv = obj->driver_private;
673a394b 853 offset = args->offset;
40123c1f 854 obj_priv->dirty = 1;
673a394b 855
40123c1f
EA
856 while (remain > 0) {
857 /* Operation in this page
858 *
859 * shmem_page_index = page number within shmem file
860 * shmem_page_offset = offset within page in shmem file
861 * data_page_index = page number in get_user_pages return
862 * data_page_offset = offset with data_page_index page.
863 * page_length = bytes to copy for this page
864 */
865 shmem_page_index = offset / PAGE_SIZE;
866 shmem_page_offset = offset & ~PAGE_MASK;
867 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
868 data_page_offset = data_ptr & ~PAGE_MASK;
869
870 page_length = remain;
871 if ((shmem_page_offset + page_length) > PAGE_SIZE)
872 page_length = PAGE_SIZE - shmem_page_offset;
873 if ((data_page_offset + page_length) > PAGE_SIZE)
874 page_length = PAGE_SIZE - data_page_offset;
875
280b713b
EA
876 if (do_bit17_swizzling) {
877 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
878 shmem_page_offset,
879 user_pages[data_page_index],
880 data_page_offset,
881 page_length,
882 0);
883 } else {
884 ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
885 shmem_page_offset,
886 user_pages[data_page_index],
887 data_page_offset,
888 page_length);
889 }
40123c1f
EA
890 if (ret)
891 goto fail_put_pages;
892
893 remain -= page_length;
894 data_ptr += page_length;
895 offset += page_length;
673a394b
EA
896 }
897
40123c1f
EA
898fail_put_pages:
899 i915_gem_object_put_pages(obj);
900fail_unlock:
673a394b 901 mutex_unlock(&dev->struct_mutex);
40123c1f
EA
902fail_put_user_pages:
903 for (i = 0; i < pinned_pages; i++)
904 page_cache_release(user_pages[i]);
8e7d2b2c 905 drm_free_large(user_pages);
673a394b 906
40123c1f 907 return ret;
673a394b
EA
908}
909
910/**
911 * Writes data to the object referenced by handle.
912 *
913 * On error, the contents of the buffer that were to be modified are undefined.
914 */
915int
916i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
917 struct drm_file *file_priv)
918{
919 struct drm_i915_gem_pwrite *args = data;
920 struct drm_gem_object *obj;
921 struct drm_i915_gem_object *obj_priv;
922 int ret = 0;
923
924 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
925 if (obj == NULL)
926 return -EBADF;
927 obj_priv = obj->driver_private;
928
929 /* Bounds check destination.
930 *
931 * XXX: This could use review for overflow issues...
932 */
933 if (args->offset > obj->size || args->size > obj->size ||
934 args->offset + args->size > obj->size) {
935 drm_gem_object_unreference(obj);
936 return -EINVAL;
937 }
938
939 /* We can only do the GTT pwrite on untiled buffers, as otherwise
940 * it would end up going through the fenced access, and we'll get
941 * different detiling behavior between reading and writing.
942 * pread/pwrite currently are reading and writing from the CPU
943 * perspective, requiring manual detiling by the client.
944 */
71acb5eb
DA
945 if (obj_priv->phys_obj)
946 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
947 else if (obj_priv->tiling_mode == I915_TILING_NONE &&
3de09aa3
EA
948 dev->gtt_total != 0) {
949 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
950 if (ret == -EFAULT) {
951 ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
952 file_priv);
953 }
280b713b
EA
954 } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
955 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
40123c1f
EA
956 } else {
957 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
958 if (ret == -EFAULT) {
959 ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
960 file_priv);
961 }
962 }
673a394b
EA
963
964#if WATCH_PWRITE
965 if (ret)
966 DRM_INFO("pwrite failed %d\n", ret);
967#endif
968
969 drm_gem_object_unreference(obj);
970
971 return ret;
972}
973
974/**
2ef7eeaa
EA
975 * Called when user space prepares to use an object with the CPU, either
976 * through the mmap ioctl's mapping or a GTT mapping.
673a394b
EA
977 */
978int
979i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
980 struct drm_file *file_priv)
981{
982 struct drm_i915_gem_set_domain *args = data;
983 struct drm_gem_object *obj;
2ef7eeaa
EA
984 uint32_t read_domains = args->read_domains;
985 uint32_t write_domain = args->write_domain;
673a394b
EA
986 int ret;
987
988 if (!(dev->driver->driver_features & DRIVER_GEM))
989 return -ENODEV;
990
2ef7eeaa
EA
991 /* Only handle setting domains to types used by the CPU. */
992 if (write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
993 return -EINVAL;
994
995 if (read_domains & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
996 return -EINVAL;
997
998 /* Having something in the write domain implies it's in the read
999 * domain, and only that read domain. Enforce that in the request.
1000 */
1001 if (write_domain != 0 && read_domains != write_domain)
1002 return -EINVAL;
1003
673a394b
EA
1004 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1005 if (obj == NULL)
1006 return -EBADF;
1007
1008 mutex_lock(&dev->struct_mutex);
1009#if WATCH_BUF
1010 DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
2ef7eeaa 1011 obj, obj->size, read_domains, write_domain);
673a394b 1012#endif
2ef7eeaa
EA
1013 if (read_domains & I915_GEM_DOMAIN_GTT) {
1014 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
02354392
EA
1015
1016 /* Silently promote "you're not bound, there was nothing to do"
1017 * to success, since the client was just asking us to
1018 * make sure everything was done.
1019 */
1020 if (ret == -EINVAL)
1021 ret = 0;
2ef7eeaa 1022 } else {
e47c68e9 1023 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
2ef7eeaa
EA
1024 }
1025
673a394b
EA
1026 drm_gem_object_unreference(obj);
1027 mutex_unlock(&dev->struct_mutex);
1028 return ret;
1029}
1030
1031/**
1032 * Called when user space has done writes to this buffer
1033 */
1034int
1035i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1036 struct drm_file *file_priv)
1037{
1038 struct drm_i915_gem_sw_finish *args = data;
1039 struct drm_gem_object *obj;
1040 struct drm_i915_gem_object *obj_priv;
1041 int ret = 0;
1042
1043 if (!(dev->driver->driver_features & DRIVER_GEM))
1044 return -ENODEV;
1045
1046 mutex_lock(&dev->struct_mutex);
1047 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1048 if (obj == NULL) {
1049 mutex_unlock(&dev->struct_mutex);
1050 return -EBADF;
1051 }
1052
1053#if WATCH_BUF
1054 DRM_INFO("%s: sw_finish %d (%p %d)\n",
1055 __func__, args->handle, obj, obj->size);
1056#endif
1057 obj_priv = obj->driver_private;
1058
1059 /* Pinned buffers may be scanout, so flush the cache */
e47c68e9
EA
1060 if (obj_priv->pin_count)
1061 i915_gem_object_flush_cpu_write_domain(obj);
1062
673a394b
EA
1063 drm_gem_object_unreference(obj);
1064 mutex_unlock(&dev->struct_mutex);
1065 return ret;
1066}
1067
1068/**
1069 * Maps the contents of an object, returning the address it is mapped
1070 * into.
1071 *
1072 * While the mapping holds a reference on the contents of the object, it doesn't
1073 * imply a ref on the object itself.
1074 */
1075int
1076i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1077 struct drm_file *file_priv)
1078{
1079 struct drm_i915_gem_mmap *args = data;
1080 struct drm_gem_object *obj;
1081 loff_t offset;
1082 unsigned long addr;
1083
1084 if (!(dev->driver->driver_features & DRIVER_GEM))
1085 return -ENODEV;
1086
1087 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1088 if (obj == NULL)
1089 return -EBADF;
1090
1091 offset = args->offset;
1092
1093 down_write(&current->mm->mmap_sem);
1094 addr = do_mmap(obj->filp, 0, args->size,
1095 PROT_READ | PROT_WRITE, MAP_SHARED,
1096 args->offset);
1097 up_write(&current->mm->mmap_sem);
1098 mutex_lock(&dev->struct_mutex);
1099 drm_gem_object_unreference(obj);
1100 mutex_unlock(&dev->struct_mutex);
1101 if (IS_ERR((void *)addr))
1102 return addr;
1103
1104 args->addr_ptr = (uint64_t) addr;
1105
1106 return 0;
1107}
1108
de151cf6
JB
1109/**
1110 * i915_gem_fault - fault a page into the GTT
1111 * vma: VMA in question
1112 * vmf: fault info
1113 *
1114 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1115 * from userspace. The fault handler takes care of binding the object to
1116 * the GTT (if needed), allocating and programming a fence register (again,
1117 * only if needed based on whether the old reg is still valid or the object
1118 * is tiled) and inserting a new PTE into the faulting process.
1119 *
1120 * Note that the faulting process may involve evicting existing objects
1121 * from the GTT and/or fence registers to make room. So performance may
1122 * suffer if the GTT working set is large or there are few fence registers
1123 * left.
1124 */
1125int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1126{
1127 struct drm_gem_object *obj = vma->vm_private_data;
1128 struct drm_device *dev = obj->dev;
1129 struct drm_i915_private *dev_priv = dev->dev_private;
1130 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1131 pgoff_t page_offset;
1132 unsigned long pfn;
1133 int ret = 0;
0f973f27 1134 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
de151cf6
JB
1135
1136 /* We don't use vmf->pgoff since that has the fake offset */
1137 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1138 PAGE_SHIFT;
1139
1140 /* Now bind it into the GTT if needed */
1141 mutex_lock(&dev->struct_mutex);
1142 if (!obj_priv->gtt_space) {
1143 ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
1144 if (ret) {
1145 mutex_unlock(&dev->struct_mutex);
1146 return VM_FAULT_SIGBUS;
1147 }
14b60391 1148 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
de151cf6
JB
1149 }
1150
1151 /* Need a new fence register? */
1152 if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
d9ddcb96 1153 obj_priv->tiling_mode != I915_TILING_NONE) {
0f973f27 1154 ret = i915_gem_object_get_fence_reg(obj, write);
7d8d58b2
CW
1155 if (ret) {
1156 mutex_unlock(&dev->struct_mutex);
d9ddcb96 1157 return VM_FAULT_SIGBUS;
7d8d58b2 1158 }
d9ddcb96 1159 }
de151cf6
JB
1160
1161 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
1162 page_offset;
1163
1164 /* Finally, remap it using the new GTT offset */
1165 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1166
1167 mutex_unlock(&dev->struct_mutex);
1168
1169 switch (ret) {
1170 case -ENOMEM:
1171 case -EAGAIN:
1172 return VM_FAULT_OOM;
1173 case -EFAULT:
959b887c 1174 case -EINVAL:
de151cf6
JB
1175 return VM_FAULT_SIGBUS;
1176 default:
1177 return VM_FAULT_NOPAGE;
1178 }
1179}
1180
1181/**
1182 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1183 * @obj: obj in question
1184 *
1185 * GEM memory mapping works by handing back to userspace a fake mmap offset
1186 * it can use in a subsequent mmap(2) call. The DRM core code then looks
1187 * up the object based on the offset and sets up the various memory mapping
1188 * structures.
1189 *
1190 * This routine allocates and attaches a fake offset for @obj.
1191 */
1192static int
1193i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1194{
1195 struct drm_device *dev = obj->dev;
1196 struct drm_gem_mm *mm = dev->mm_private;
1197 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1198 struct drm_map_list *list;
f77d390c 1199 struct drm_local_map *map;
de151cf6
JB
1200 int ret = 0;
1201
1202 /* Set the object up for mmap'ing */
1203 list = &obj->map_list;
1204 list->map = drm_calloc(1, sizeof(struct drm_map_list),
1205 DRM_MEM_DRIVER);
1206 if (!list->map)
1207 return -ENOMEM;
1208
1209 map = list->map;
1210 map->type = _DRM_GEM;
1211 map->size = obj->size;
1212 map->handle = obj;
1213
1214 /* Get a DRM GEM mmap offset allocated... */
1215 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
1216 obj->size / PAGE_SIZE, 0, 0);
1217 if (!list->file_offset_node) {
1218 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
1219 ret = -ENOMEM;
1220 goto out_free_list;
1221 }
1222
1223 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
1224 obj->size / PAGE_SIZE, 0);
1225 if (!list->file_offset_node) {
1226 ret = -ENOMEM;
1227 goto out_free_list;
1228 }
1229
1230 list->hash.key = list->file_offset_node->start;
1231 if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
1232 DRM_ERROR("failed to add to map hash\n");
1233 goto out_free_mm;
1234 }
1235
1236 /* By now we should be all set, any drm_mmap request on the offset
1237 * below will get to our mmap & fault handler */
1238 obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
1239
1240 return 0;
1241
1242out_free_mm:
1243 drm_mm_put_block(list->file_offset_node);
1244out_free_list:
1245 drm_free(list->map, sizeof(struct drm_map_list), DRM_MEM_DRIVER);
1246
1247 return ret;
1248}
1249
ab00b3e5
JB
1250static void
1251i915_gem_free_mmap_offset(struct drm_gem_object *obj)
1252{
1253 struct drm_device *dev = obj->dev;
1254 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1255 struct drm_gem_mm *mm = dev->mm_private;
1256 struct drm_map_list *list;
1257
1258 list = &obj->map_list;
1259 drm_ht_remove_item(&mm->offset_hash, &list->hash);
1260
1261 if (list->file_offset_node) {
1262 drm_mm_put_block(list->file_offset_node);
1263 list->file_offset_node = NULL;
1264 }
1265
1266 if (list->map) {
1267 drm_free(list->map, sizeof(struct drm_map), DRM_MEM_DRIVER);
1268 list->map = NULL;
1269 }
1270
1271 obj_priv->mmap_offset = 0;
1272}
1273
de151cf6
JB
1274/**
1275 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1276 * @obj: object to check
1277 *
1278 * Return the required GTT alignment for an object, taking into account
1279 * potential fence register mapping if needed.
1280 */
1281static uint32_t
1282i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
1283{
1284 struct drm_device *dev = obj->dev;
1285 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1286 int start, i;
1287
1288 /*
1289 * Minimum alignment is 4k (GTT page size), but might be greater
1290 * if a fence register is needed for the object.
1291 */
1292 if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
1293 return 4096;
1294
1295 /*
1296 * Previous chips need to be aligned to the size of the smallest
1297 * fence register that can contain the object.
1298 */
1299 if (IS_I9XX(dev))
1300 start = 1024*1024;
1301 else
1302 start = 512*1024;
1303
1304 for (i = start; i < obj->size; i <<= 1)
1305 ;
1306
1307 return i;
1308}
1309
1310/**
1311 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1312 * @dev: DRM device
1313 * @data: GTT mapping ioctl data
1314 * @file_priv: GEM object info
1315 *
1316 * Simply returns the fake offset to userspace so it can mmap it.
1317 * The mmap call will end up in drm_gem_mmap(), which will set things
1318 * up so we can get faults in the handler above.
1319 *
1320 * The fault handler will take care of binding the object into the GTT
1321 * (since it may have been evicted to make room for something), allocating
1322 * a fence register, and mapping the appropriate aperture address into
1323 * userspace.
1324 */
1325int
1326i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1327 struct drm_file *file_priv)
1328{
1329 struct drm_i915_gem_mmap_gtt *args = data;
1330 struct drm_i915_private *dev_priv = dev->dev_private;
1331 struct drm_gem_object *obj;
1332 struct drm_i915_gem_object *obj_priv;
1333 int ret;
1334
1335 if (!(dev->driver->driver_features & DRIVER_GEM))
1336 return -ENODEV;
1337
1338 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1339 if (obj == NULL)
1340 return -EBADF;
1341
1342 mutex_lock(&dev->struct_mutex);
1343
1344 obj_priv = obj->driver_private;
1345
1346 if (!obj_priv->mmap_offset) {
1347 ret = i915_gem_create_mmap_offset(obj);
13af1062
CW
1348 if (ret) {
1349 drm_gem_object_unreference(obj);
1350 mutex_unlock(&dev->struct_mutex);
de151cf6 1351 return ret;
13af1062 1352 }
de151cf6
JB
1353 }
1354
1355 args->offset = obj_priv->mmap_offset;
1356
1357 obj_priv->gtt_alignment = i915_gem_get_gtt_alignment(obj);
1358
1359 /* Make sure the alignment is correct for fence regs etc */
1360 if (obj_priv->agp_mem &&
1361 (obj_priv->gtt_offset & (obj_priv->gtt_alignment - 1))) {
1362 drm_gem_object_unreference(obj);
1363 mutex_unlock(&dev->struct_mutex);
1364 return -EINVAL;
1365 }
1366
1367 /*
1368 * Pull it into the GTT so that we have a page list (makes the
1369 * initial fault faster and any subsequent flushing possible).
1370 */
1371 if (!obj_priv->agp_mem) {
1372 ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
1373 if (ret) {
1374 drm_gem_object_unreference(obj);
1375 mutex_unlock(&dev->struct_mutex);
1376 return ret;
1377 }
14b60391 1378 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
de151cf6
JB
1379 }
1380
1381 drm_gem_object_unreference(obj);
1382 mutex_unlock(&dev->struct_mutex);
1383
1384 return 0;
1385}
1386
6911a9b8 1387void
856fa198 1388i915_gem_object_put_pages(struct drm_gem_object *obj)
673a394b
EA
1389{
1390 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1391 int page_count = obj->size / PAGE_SIZE;
1392 int i;
1393
856fa198 1394 BUG_ON(obj_priv->pages_refcount == 0);
673a394b 1395
856fa198
EA
1396 if (--obj_priv->pages_refcount != 0)
1397 return;
673a394b 1398
280b713b
EA
1399 if (obj_priv->tiling_mode != I915_TILING_NONE)
1400 i915_gem_object_save_bit_17_swizzle(obj);
1401
673a394b 1402 for (i = 0; i < page_count; i++)
856fa198 1403 if (obj_priv->pages[i] != NULL) {
673a394b 1404 if (obj_priv->dirty)
856fa198
EA
1405 set_page_dirty(obj_priv->pages[i]);
1406 mark_page_accessed(obj_priv->pages[i]);
1407 page_cache_release(obj_priv->pages[i]);
673a394b
EA
1408 }
1409 obj_priv->dirty = 0;
1410
8e7d2b2c 1411 drm_free_large(obj_priv->pages);
856fa198 1412 obj_priv->pages = NULL;
673a394b
EA
1413}
1414
1415static void
ce44b0ea 1416i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
673a394b
EA
1417{
1418 struct drm_device *dev = obj->dev;
1419 drm_i915_private_t *dev_priv = dev->dev_private;
1420 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1421
1422 /* Add a reference if we're newly entering the active list. */
1423 if (!obj_priv->active) {
1424 drm_gem_object_reference(obj);
1425 obj_priv->active = 1;
1426 }
1427 /* Move from whatever list we were on to the tail of execution. */
5e118f41 1428 spin_lock(&dev_priv->mm.active_list_lock);
673a394b
EA
1429 list_move_tail(&obj_priv->list,
1430 &dev_priv->mm.active_list);
5e118f41 1431 spin_unlock(&dev_priv->mm.active_list_lock);
ce44b0ea 1432 obj_priv->last_rendering_seqno = seqno;
673a394b
EA
1433}
1434
ce44b0ea
EA
1435static void
1436i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1437{
1438 struct drm_device *dev = obj->dev;
1439 drm_i915_private_t *dev_priv = dev->dev_private;
1440 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1441
1442 BUG_ON(!obj_priv->active);
1443 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
1444 obj_priv->last_rendering_seqno = 0;
1445}
673a394b
EA
1446
1447static void
1448i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1449{
1450 struct drm_device *dev = obj->dev;
1451 drm_i915_private_t *dev_priv = dev->dev_private;
1452 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1453
1454 i915_verify_inactive(dev, __FILE__, __LINE__);
1455 if (obj_priv->pin_count != 0)
1456 list_del_init(&obj_priv->list);
1457 else
1458 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1459
ce44b0ea 1460 obj_priv->last_rendering_seqno = 0;
673a394b
EA
1461 if (obj_priv->active) {
1462 obj_priv->active = 0;
1463 drm_gem_object_unreference(obj);
1464 }
1465 i915_verify_inactive(dev, __FILE__, __LINE__);
1466}
1467
1468/**
1469 * Creates a new sequence number, emitting a write of it to the status page
1470 * plus an interrupt, which will trigger i915_user_interrupt_handler.
1471 *
1472 * Must be called with struct_lock held.
1473 *
1474 * Returned sequence numbers are nonzero on success.
1475 */
1476static uint32_t
1477i915_add_request(struct drm_device *dev, uint32_t flush_domains)
1478{
1479 drm_i915_private_t *dev_priv = dev->dev_private;
1480 struct drm_i915_gem_request *request;
1481 uint32_t seqno;
1482 int was_empty;
1483 RING_LOCALS;
1484
1485 request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
1486 if (request == NULL)
1487 return 0;
1488
1489 /* Grab the seqno we're going to make this request be, and bump the
1490 * next (skipping 0 so it can be the reserved no-seqno value).
1491 */
1492 seqno = dev_priv->mm.next_gem_seqno;
1493 dev_priv->mm.next_gem_seqno++;
1494 if (dev_priv->mm.next_gem_seqno == 0)
1495 dev_priv->mm.next_gem_seqno++;
1496
1497 BEGIN_LP_RING(4);
1498 OUT_RING(MI_STORE_DWORD_INDEX);
1499 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1500 OUT_RING(seqno);
1501
1502 OUT_RING(MI_USER_INTERRUPT);
1503 ADVANCE_LP_RING();
1504
1505 DRM_DEBUG("%d\n", seqno);
1506
1507 request->seqno = seqno;
1508 request->emitted_jiffies = jiffies;
673a394b
EA
1509 was_empty = list_empty(&dev_priv->mm.request_list);
1510 list_add_tail(&request->list, &dev_priv->mm.request_list);
1511
ce44b0ea
EA
1512 /* Associate any objects on the flushing list matching the write
1513 * domain we're flushing with our flush.
1514 */
1515 if (flush_domains != 0) {
1516 struct drm_i915_gem_object *obj_priv, *next;
1517
1518 list_for_each_entry_safe(obj_priv, next,
1519 &dev_priv->mm.flushing_list, list) {
1520 struct drm_gem_object *obj = obj_priv->obj;
1521
1522 if ((obj->write_domain & flush_domains) ==
1523 obj->write_domain) {
1524 obj->write_domain = 0;
1525 i915_gem_object_move_to_active(obj, seqno);
1526 }
1527 }
1528
1529 }
1530
6dbe2772 1531 if (was_empty && !dev_priv->mm.suspended)
673a394b
EA
1532 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
1533 return seqno;
1534}
1535
1536/**
1537 * Command execution barrier
1538 *
1539 * Ensures that all commands in the ring are finished
1540 * before signalling the CPU
1541 */
3043c60c 1542static uint32_t
673a394b
EA
1543i915_retire_commands(struct drm_device *dev)
1544{
1545 drm_i915_private_t *dev_priv = dev->dev_private;
1546 uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1547 uint32_t flush_domains = 0;
1548 RING_LOCALS;
1549
1550 /* The sampler always gets flushed on i965 (sigh) */
1551 if (IS_I965G(dev))
1552 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
1553 BEGIN_LP_RING(2);
1554 OUT_RING(cmd);
1555 OUT_RING(0); /* noop */
1556 ADVANCE_LP_RING();
1557 return flush_domains;
1558}
1559
1560/**
1561 * Moves buffers associated only with the given active seqno from the active
1562 * to inactive list, potentially freeing them.
1563 */
1564static void
1565i915_gem_retire_request(struct drm_device *dev,
1566 struct drm_i915_gem_request *request)
1567{
1568 drm_i915_private_t *dev_priv = dev->dev_private;
1569
1570 /* Move any buffers on the active list that are no longer referenced
1571 * by the ringbuffer to the flushing/inactive lists as appropriate.
1572 */
5e118f41 1573 spin_lock(&dev_priv->mm.active_list_lock);
673a394b
EA
1574 while (!list_empty(&dev_priv->mm.active_list)) {
1575 struct drm_gem_object *obj;
1576 struct drm_i915_gem_object *obj_priv;
1577
1578 obj_priv = list_first_entry(&dev_priv->mm.active_list,
1579 struct drm_i915_gem_object,
1580 list);
1581 obj = obj_priv->obj;
1582
1583 /* If the seqno being retired doesn't match the oldest in the
1584 * list, then the oldest in the list must still be newer than
1585 * this seqno.
1586 */
1587 if (obj_priv->last_rendering_seqno != request->seqno)
5e118f41 1588 goto out;
de151cf6 1589
673a394b
EA
1590#if WATCH_LRU
1591 DRM_INFO("%s: retire %d moves to inactive list %p\n",
1592 __func__, request->seqno, obj);
1593#endif
1594
ce44b0ea
EA
1595 if (obj->write_domain != 0)
1596 i915_gem_object_move_to_flushing(obj);
68c84342
SL
1597 else {
1598 /* Take a reference on the object so it won't be
1599 * freed while the spinlock is held. The list
1600 * protection for this spinlock is safe when breaking
1601 * the lock like this since the next thing we do
1602 * is just get the head of the list again.
1603 */
1604 drm_gem_object_reference(obj);
673a394b 1605 i915_gem_object_move_to_inactive(obj);
68c84342
SL
1606 spin_unlock(&dev_priv->mm.active_list_lock);
1607 drm_gem_object_unreference(obj);
1608 spin_lock(&dev_priv->mm.active_list_lock);
1609 }
673a394b 1610 }
5e118f41
CW
1611out:
1612 spin_unlock(&dev_priv->mm.active_list_lock);
673a394b
EA
1613}
1614
1615/**
1616 * Returns true if seq1 is later than seq2.
1617 */
1618static int
1619i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1620{
1621 return (int32_t)(seq1 - seq2) >= 0;
1622}
1623
1624uint32_t
1625i915_get_gem_seqno(struct drm_device *dev)
1626{
1627 drm_i915_private_t *dev_priv = dev->dev_private;
1628
1629 return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
1630}
1631
1632/**
1633 * This function clears the request list as sequence numbers are passed.
1634 */
1635void
1636i915_gem_retire_requests(struct drm_device *dev)
1637{
1638 drm_i915_private_t *dev_priv = dev->dev_private;
1639 uint32_t seqno;
1640
6c0594a3
KW
1641 if (!dev_priv->hw_status_page)
1642 return;
1643
673a394b
EA
1644 seqno = i915_get_gem_seqno(dev);
1645
1646 while (!list_empty(&dev_priv->mm.request_list)) {
1647 struct drm_i915_gem_request *request;
1648 uint32_t retiring_seqno;
1649
1650 request = list_first_entry(&dev_priv->mm.request_list,
1651 struct drm_i915_gem_request,
1652 list);
1653 retiring_seqno = request->seqno;
1654
1655 if (i915_seqno_passed(seqno, retiring_seqno) ||
1656 dev_priv->mm.wedged) {
1657 i915_gem_retire_request(dev, request);
1658
1659 list_del(&request->list);
1660 drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
1661 } else
1662 break;
1663 }
1664}
1665
1666void
1667i915_gem_retire_work_handler(struct work_struct *work)
1668{
1669 drm_i915_private_t *dev_priv;
1670 struct drm_device *dev;
1671
1672 dev_priv = container_of(work, drm_i915_private_t,
1673 mm.retire_work.work);
1674 dev = dev_priv->dev;
1675
1676 mutex_lock(&dev->struct_mutex);
1677 i915_gem_retire_requests(dev);
6dbe2772
KP
1678 if (!dev_priv->mm.suspended &&
1679 !list_empty(&dev_priv->mm.request_list))
673a394b
EA
1680 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
1681 mutex_unlock(&dev->struct_mutex);
1682}
1683
1684/**
1685 * Waits for a sequence number to be signaled, and cleans up the
1686 * request and object lists appropriately for that event.
1687 */
3043c60c 1688static int
673a394b
EA
1689i915_wait_request(struct drm_device *dev, uint32_t seqno)
1690{
1691 drm_i915_private_t *dev_priv = dev->dev_private;
802c7eb6 1692 u32 ier;
673a394b
EA
1693 int ret = 0;
1694
1695 BUG_ON(seqno == 0);
1696
1697 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
802c7eb6
JB
1698 ier = I915_READ(IER);
1699 if (!ier) {
1700 DRM_ERROR("something (likely vbetool) disabled "
1701 "interrupts, re-enabling\n");
1702 i915_driver_irq_preinstall(dev);
1703 i915_driver_irq_postinstall(dev);
1704 }
1705
673a394b
EA
1706 dev_priv->mm.waiting_gem_seqno = seqno;
1707 i915_user_irq_get(dev);
1708 ret = wait_event_interruptible(dev_priv->irq_queue,
1709 i915_seqno_passed(i915_get_gem_seqno(dev),
1710 seqno) ||
1711 dev_priv->mm.wedged);
1712 i915_user_irq_put(dev);
1713 dev_priv->mm.waiting_gem_seqno = 0;
1714 }
1715 if (dev_priv->mm.wedged)
1716 ret = -EIO;
1717
1718 if (ret && ret != -ERESTARTSYS)
1719 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
1720 __func__, ret, seqno, i915_get_gem_seqno(dev));
1721
1722 /* Directly dispatch request retiring. While we have the work queue
1723 * to handle this, the waiter on a request often wants an associated
1724 * buffer to have made it to the inactive list, and we would need
1725 * a separate wait queue to handle that.
1726 */
1727 if (ret == 0)
1728 i915_gem_retire_requests(dev);
1729
1730 return ret;
1731}
1732
1733static void
1734i915_gem_flush(struct drm_device *dev,
1735 uint32_t invalidate_domains,
1736 uint32_t flush_domains)
1737{
1738 drm_i915_private_t *dev_priv = dev->dev_private;
1739 uint32_t cmd;
1740 RING_LOCALS;
1741
1742#if WATCH_EXEC
1743 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
1744 invalidate_domains, flush_domains);
1745#endif
1746
1747 if (flush_domains & I915_GEM_DOMAIN_CPU)
1748 drm_agp_chipset_flush(dev);
1749
1750 if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU |
1751 I915_GEM_DOMAIN_GTT)) {
1752 /*
1753 * read/write caches:
1754 *
1755 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
1756 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
1757 * also flushed at 2d versus 3d pipeline switches.
1758 *
1759 * read-only caches:
1760 *
1761 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
1762 * MI_READ_FLUSH is set, and is always flushed on 965.
1763 *
1764 * I915_GEM_DOMAIN_COMMAND may not exist?
1765 *
1766 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
1767 * invalidated when MI_EXE_FLUSH is set.
1768 *
1769 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
1770 * invalidated with every MI_FLUSH.
1771 *
1772 * TLBs:
1773 *
1774 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
1775 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
1776 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
1777 * are flushed at any MI_FLUSH.
1778 */
1779
1780 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1781 if ((invalidate_domains|flush_domains) &
1782 I915_GEM_DOMAIN_RENDER)
1783 cmd &= ~MI_NO_WRITE_FLUSH;
1784 if (!IS_I965G(dev)) {
1785 /*
1786 * On the 965, the sampler cache always gets flushed
1787 * and this bit is reserved.
1788 */
1789 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
1790 cmd |= MI_READ_FLUSH;
1791 }
1792 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
1793 cmd |= MI_EXE_FLUSH;
1794
1795#if WATCH_EXEC
1796 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
1797#endif
1798 BEGIN_LP_RING(2);
1799 OUT_RING(cmd);
1800 OUT_RING(0); /* noop */
1801 ADVANCE_LP_RING();
1802 }
1803}
1804
1805/**
1806 * Ensures that all rendering to the object has completed and the object is
1807 * safe to unbind from the GTT or access from the CPU.
1808 */
1809static int
1810i915_gem_object_wait_rendering(struct drm_gem_object *obj)
1811{
1812 struct drm_device *dev = obj->dev;
1813 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1814 int ret;
1815
e47c68e9
EA
1816 /* This function only exists to support waiting for existing rendering,
1817 * not for emitting required flushes.
673a394b 1818 */
e47c68e9 1819 BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
673a394b
EA
1820
1821 /* If there is rendering queued on the buffer being evicted, wait for
1822 * it.
1823 */
1824 if (obj_priv->active) {
1825#if WATCH_BUF
1826 DRM_INFO("%s: object %p wait for seqno %08x\n",
1827 __func__, obj, obj_priv->last_rendering_seqno);
1828#endif
1829 ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
1830 if (ret != 0)
1831 return ret;
1832 }
1833
1834 return 0;
1835}
1836
1837/**
1838 * Unbinds an object from the GTT aperture.
1839 */
0f973f27 1840int
673a394b
EA
1841i915_gem_object_unbind(struct drm_gem_object *obj)
1842{
1843 struct drm_device *dev = obj->dev;
1844 struct drm_i915_gem_object *obj_priv = obj->driver_private;
de151cf6 1845 loff_t offset;
673a394b
EA
1846 int ret = 0;
1847
1848#if WATCH_BUF
1849 DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
1850 DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
1851#endif
1852 if (obj_priv->gtt_space == NULL)
1853 return 0;
1854
1855 if (obj_priv->pin_count != 0) {
1856 DRM_ERROR("Attempting to unbind pinned buffer\n");
1857 return -EINVAL;
1858 }
1859
673a394b
EA
1860 /* Move the object to the CPU domain to ensure that
1861 * any possible CPU writes while it's not in the GTT
1862 * are flushed when we go to remap it. This will
1863 * also ensure that all pending GPU writes are finished
1864 * before we unbind.
1865 */
e47c68e9 1866 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
673a394b 1867 if (ret) {
e47c68e9
EA
1868 if (ret != -ERESTARTSYS)
1869 DRM_ERROR("set_domain failed: %d\n", ret);
673a394b
EA
1870 return ret;
1871 }
1872
1873 if (obj_priv->agp_mem != NULL) {
1874 drm_unbind_agp(obj_priv->agp_mem);
1875 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
1876 obj_priv->agp_mem = NULL;
1877 }
1878
1879 BUG_ON(obj_priv->active);
1880
de151cf6
JB
1881 /* blow away mappings if mapped through GTT */
1882 offset = ((loff_t) obj->map_list.hash.key) << PAGE_SHIFT;
79e53945
JB
1883 if (dev->dev_mapping)
1884 unmap_mapping_range(dev->dev_mapping, offset, obj->size, 1);
de151cf6
JB
1885
1886 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
1887 i915_gem_clear_fence_reg(obj);
1888
856fa198 1889 i915_gem_object_put_pages(obj);
673a394b
EA
1890
1891 if (obj_priv->gtt_space) {
1892 atomic_dec(&dev->gtt_count);
1893 atomic_sub(obj->size, &dev->gtt_memory);
1894
1895 drm_mm_put_block(obj_priv->gtt_space);
1896 obj_priv->gtt_space = NULL;
1897 }
1898
1899 /* Remove ourselves from the LRU list if present. */
1900 if (!list_empty(&obj_priv->list))
1901 list_del_init(&obj_priv->list);
1902
1903 return 0;
1904}
1905
1906static int
1907i915_gem_evict_something(struct drm_device *dev)
1908{
1909 drm_i915_private_t *dev_priv = dev->dev_private;
1910 struct drm_gem_object *obj;
1911 struct drm_i915_gem_object *obj_priv;
1912 int ret = 0;
1913
1914 for (;;) {
1915 /* If there's an inactive buffer available now, grab it
1916 * and be done.
1917 */
1918 if (!list_empty(&dev_priv->mm.inactive_list)) {
1919 obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
1920 struct drm_i915_gem_object,
1921 list);
1922 obj = obj_priv->obj;
1923 BUG_ON(obj_priv->pin_count != 0);
1924#if WATCH_LRU
1925 DRM_INFO("%s: evicting %p\n", __func__, obj);
1926#endif
1927 BUG_ON(obj_priv->active);
1928
1929 /* Wait on the rendering and unbind the buffer. */
1930 ret = i915_gem_object_unbind(obj);
1931 break;
1932 }
1933
1934 /* If we didn't get anything, but the ring is still processing
1935 * things, wait for one of those things to finish and hopefully
1936 * leave us a buffer to evict.
1937 */
1938 if (!list_empty(&dev_priv->mm.request_list)) {
1939 struct drm_i915_gem_request *request;
1940
1941 request = list_first_entry(&dev_priv->mm.request_list,
1942 struct drm_i915_gem_request,
1943 list);
1944
1945 ret = i915_wait_request(dev, request->seqno);
1946 if (ret)
1947 break;
1948
1949 /* if waiting caused an object to become inactive,
1950 * then loop around and wait for it. Otherwise, we
1951 * assume that waiting freed and unbound something,
1952 * so there should now be some space in the GTT
1953 */
1954 if (!list_empty(&dev_priv->mm.inactive_list))
1955 continue;
1956 break;
1957 }
1958
1959 /* If we didn't have anything on the request list but there
1960 * are buffers awaiting a flush, emit one and try again.
1961 * When we wait on it, those buffers waiting for that flush
1962 * will get moved to inactive.
1963 */
1964 if (!list_empty(&dev_priv->mm.flushing_list)) {
1965 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
1966 struct drm_i915_gem_object,
1967 list);
1968 obj = obj_priv->obj;
1969
1970 i915_gem_flush(dev,
1971 obj->write_domain,
1972 obj->write_domain);
1973 i915_add_request(dev, obj->write_domain);
1974
1975 obj = NULL;
1976 continue;
1977 }
1978
1979 DRM_ERROR("inactive empty %d request empty %d "
1980 "flushing empty %d\n",
1981 list_empty(&dev_priv->mm.inactive_list),
1982 list_empty(&dev_priv->mm.request_list),
1983 list_empty(&dev_priv->mm.flushing_list));
1984 /* If we didn't do any of the above, there's nothing to be done
1985 * and we just can't fit it in.
1986 */
1987 return -ENOMEM;
1988 }
1989 return ret;
1990}
1991
ac94a962
KP
1992static int
1993i915_gem_evict_everything(struct drm_device *dev)
1994{
1995 int ret;
1996
1997 for (;;) {
1998 ret = i915_gem_evict_something(dev);
1999 if (ret != 0)
2000 break;
2001 }
15c35334
OA
2002 if (ret == -ENOMEM)
2003 return 0;
ac94a962
KP
2004 return ret;
2005}
2006
6911a9b8 2007int
856fa198 2008i915_gem_object_get_pages(struct drm_gem_object *obj)
673a394b
EA
2009{
2010 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2011 int page_count, i;
2012 struct address_space *mapping;
2013 struct inode *inode;
2014 struct page *page;
2015 int ret;
2016
856fa198 2017 if (obj_priv->pages_refcount++ != 0)
673a394b
EA
2018 return 0;
2019
2020 /* Get the list of pages out of our struct file. They'll be pinned
2021 * at this point until we release them.
2022 */
2023 page_count = obj->size / PAGE_SIZE;
856fa198 2024 BUG_ON(obj_priv->pages != NULL);
8e7d2b2c 2025 obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
856fa198 2026 if (obj_priv->pages == NULL) {
673a394b 2027 DRM_ERROR("Faled to allocate page list\n");
856fa198 2028 obj_priv->pages_refcount--;
673a394b
EA
2029 return -ENOMEM;
2030 }
2031
2032 inode = obj->filp->f_path.dentry->d_inode;
2033 mapping = inode->i_mapping;
2034 for (i = 0; i < page_count; i++) {
2035 page = read_mapping_page(mapping, i, NULL);
2036 if (IS_ERR(page)) {
2037 ret = PTR_ERR(page);
2038 DRM_ERROR("read_mapping_page failed: %d\n", ret);
856fa198 2039 i915_gem_object_put_pages(obj);
673a394b
EA
2040 return ret;
2041 }
856fa198 2042 obj_priv->pages[i] = page;
673a394b 2043 }
280b713b
EA
2044
2045 if (obj_priv->tiling_mode != I915_TILING_NONE)
2046 i915_gem_object_do_bit_17_swizzle(obj);
2047
673a394b
EA
2048 return 0;
2049}
2050
de151cf6
JB
2051static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
2052{
2053 struct drm_gem_object *obj = reg->obj;
2054 struct drm_device *dev = obj->dev;
2055 drm_i915_private_t *dev_priv = dev->dev_private;
2056 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2057 int regnum = obj_priv->fence_reg;
2058 uint64_t val;
2059
2060 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2061 0xfffff000) << 32;
2062 val |= obj_priv->gtt_offset & 0xfffff000;
2063 val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2064 if (obj_priv->tiling_mode == I915_TILING_Y)
2065 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2066 val |= I965_FENCE_REG_VALID;
2067
2068 I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
2069}
2070
2071static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
2072{
2073 struct drm_gem_object *obj = reg->obj;
2074 struct drm_device *dev = obj->dev;
2075 drm_i915_private_t *dev_priv = dev->dev_private;
2076 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2077 int regnum = obj_priv->fence_reg;
0f973f27 2078 int tile_width;
dc529a4f 2079 uint32_t fence_reg, val;
de151cf6
JB
2080 uint32_t pitch_val;
2081
2082 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
2083 (obj_priv->gtt_offset & (obj->size - 1))) {
f06da264 2084 WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
0f973f27 2085 __func__, obj_priv->gtt_offset, obj->size);
de151cf6
JB
2086 return;
2087 }
2088
0f973f27
JB
2089 if (obj_priv->tiling_mode == I915_TILING_Y &&
2090 HAS_128_BYTE_Y_TILING(dev))
2091 tile_width = 128;
de151cf6 2092 else
0f973f27
JB
2093 tile_width = 512;
2094
2095 /* Note: pitch better be a power of two tile widths */
2096 pitch_val = obj_priv->stride / tile_width;
2097 pitch_val = ffs(pitch_val) - 1;
de151cf6
JB
2098
2099 val = obj_priv->gtt_offset;
2100 if (obj_priv->tiling_mode == I915_TILING_Y)
2101 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2102 val |= I915_FENCE_SIZE_BITS(obj->size);
2103 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2104 val |= I830_FENCE_REG_VALID;
2105
dc529a4f
EA
2106 if (regnum < 8)
2107 fence_reg = FENCE_REG_830_0 + (regnum * 4);
2108 else
2109 fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
2110 I915_WRITE(fence_reg, val);
de151cf6
JB
2111}
2112
2113static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2114{
2115 struct drm_gem_object *obj = reg->obj;
2116 struct drm_device *dev = obj->dev;
2117 drm_i915_private_t *dev_priv = dev->dev_private;
2118 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2119 int regnum = obj_priv->fence_reg;
2120 uint32_t val;
2121 uint32_t pitch_val;
8d7773a3 2122 uint32_t fence_size_bits;
de151cf6 2123
8d7773a3 2124 if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
de151cf6 2125 (obj_priv->gtt_offset & (obj->size - 1))) {
8d7773a3 2126 WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
0f973f27 2127 __func__, obj_priv->gtt_offset);
de151cf6
JB
2128 return;
2129 }
2130
2131 pitch_val = (obj_priv->stride / 128) - 1;
8d7773a3 2132 WARN_ON(pitch_val & ~0x0000000f);
de151cf6
JB
2133 val = obj_priv->gtt_offset;
2134 if (obj_priv->tiling_mode == I915_TILING_Y)
2135 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
8d7773a3
DV
2136 fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
2137 WARN_ON(fence_size_bits & ~0x00000f00);
2138 val |= fence_size_bits;
de151cf6
JB
2139 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2140 val |= I830_FENCE_REG_VALID;
2141
2142 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
2143
2144}
2145
2146/**
2147 * i915_gem_object_get_fence_reg - set up a fence reg for an object
2148 * @obj: object to map through a fence reg
0f973f27 2149 * @write: object is about to be written
de151cf6
JB
2150 *
2151 * When mapping objects through the GTT, userspace wants to be able to write
2152 * to them without having to worry about swizzling if the object is tiled.
2153 *
2154 * This function walks the fence regs looking for a free one for @obj,
2155 * stealing one if it can't find any.
2156 *
2157 * It then sets up the reg based on the object's properties: address, pitch
2158 * and tiling format.
2159 */
d9ddcb96 2160static int
0f973f27 2161i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write)
de151cf6
JB
2162{
2163 struct drm_device *dev = obj->dev;
79e53945 2164 struct drm_i915_private *dev_priv = dev->dev_private;
de151cf6
JB
2165 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2166 struct drm_i915_fence_reg *reg = NULL;
fc7170ba
CW
2167 struct drm_i915_gem_object *old_obj_priv = NULL;
2168 int i, ret, avail;
de151cf6
JB
2169
2170 switch (obj_priv->tiling_mode) {
2171 case I915_TILING_NONE:
2172 WARN(1, "allocating a fence for non-tiled object?\n");
2173 break;
2174 case I915_TILING_X:
0f973f27
JB
2175 if (!obj_priv->stride)
2176 return -EINVAL;
2177 WARN((obj_priv->stride & (512 - 1)),
2178 "object 0x%08x is X tiled but has non-512B pitch\n",
2179 obj_priv->gtt_offset);
de151cf6
JB
2180 break;
2181 case I915_TILING_Y:
0f973f27
JB
2182 if (!obj_priv->stride)
2183 return -EINVAL;
2184 WARN((obj_priv->stride & (128 - 1)),
2185 "object 0x%08x is Y tiled but has non-128B pitch\n",
2186 obj_priv->gtt_offset);
de151cf6
JB
2187 break;
2188 }
2189
2190 /* First try to find a free reg */
9b2412f9 2191try_again:
fc7170ba 2192 avail = 0;
de151cf6
JB
2193 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2194 reg = &dev_priv->fence_regs[i];
2195 if (!reg->obj)
2196 break;
fc7170ba
CW
2197
2198 old_obj_priv = reg->obj->driver_private;
2199 if (!old_obj_priv->pin_count)
2200 avail++;
de151cf6
JB
2201 }
2202
2203 /* None available, try to steal one or wait for a user to finish */
2204 if (i == dev_priv->num_fence_regs) {
d7619c4b 2205 uint32_t seqno = dev_priv->mm.next_gem_seqno;
de151cf6
JB
2206 loff_t offset;
2207
fc7170ba
CW
2208 if (avail == 0)
2209 return -ENOMEM;
2210
de151cf6
JB
2211 for (i = dev_priv->fence_reg_start;
2212 i < dev_priv->num_fence_regs; i++) {
d7619c4b
CW
2213 uint32_t this_seqno;
2214
de151cf6
JB
2215 reg = &dev_priv->fence_regs[i];
2216 old_obj_priv = reg->obj->driver_private;
d7619c4b
CW
2217
2218 if (old_obj_priv->pin_count)
2219 continue;
2220
2221 /* i915 uses fences for GPU access to tiled buffers */
2222 if (IS_I965G(dev) || !old_obj_priv->active)
de151cf6 2223 break;
d7619c4b
CW
2224
2225 /* find the seqno of the first available fence */
2226 this_seqno = old_obj_priv->last_rendering_seqno;
2227 if (this_seqno != 0 &&
2228 reg->obj->write_domain == 0 &&
2229 i915_seqno_passed(seqno, this_seqno))
2230 seqno = this_seqno;
de151cf6
JB
2231 }
2232
2233 /*
2234 * Now things get ugly... we have to wait for one of the
2235 * objects to finish before trying again.
2236 */
2237 if (i == dev_priv->num_fence_regs) {
d7619c4b
CW
2238 if (seqno == dev_priv->mm.next_gem_seqno) {
2239 i915_gem_flush(dev,
2240 I915_GEM_GPU_DOMAINS,
2241 I915_GEM_GPU_DOMAINS);
2242 seqno = i915_add_request(dev,
2243 I915_GEM_GPU_DOMAINS);
2244 if (seqno == 0)
2245 return -ENOMEM;
de151cf6 2246 }
d7619c4b
CW
2247
2248 ret = i915_wait_request(dev, seqno);
2249 if (ret)
2250 return ret;
de151cf6
JB
2251 goto try_again;
2252 }
2253
d7619c4b
CW
2254 BUG_ON(old_obj_priv->active ||
2255 (reg->obj->write_domain & I915_GEM_GPU_DOMAINS));
2256
de151cf6
JB
2257 /*
2258 * Zap this virtual mapping so we can set up a fence again
2259 * for this object next time we need it.
2260 */
2261 offset = ((loff_t) reg->obj->map_list.hash.key) << PAGE_SHIFT;
79e53945
JB
2262 if (dev->dev_mapping)
2263 unmap_mapping_range(dev->dev_mapping, offset,
2264 reg->obj->size, 1);
de151cf6
JB
2265 old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
2266 }
2267
2268 obj_priv->fence_reg = i;
2269 reg->obj = obj;
2270
2271 if (IS_I965G(dev))
2272 i965_write_fence_reg(reg);
2273 else if (IS_I9XX(dev))
2274 i915_write_fence_reg(reg);
2275 else
2276 i830_write_fence_reg(reg);
d9ddcb96
EA
2277
2278 return 0;
de151cf6
JB
2279}
2280
2281/**
2282 * i915_gem_clear_fence_reg - clear out fence register info
2283 * @obj: object to clear
2284 *
2285 * Zeroes out the fence register itself and clears out the associated
2286 * data structures in dev_priv and obj_priv.
2287 */
2288static void
2289i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2290{
2291 struct drm_device *dev = obj->dev;
79e53945 2292 drm_i915_private_t *dev_priv = dev->dev_private;
de151cf6
JB
2293 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2294
2295 if (IS_I965G(dev))
2296 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
dc529a4f
EA
2297 else {
2298 uint32_t fence_reg;
2299
2300 if (obj_priv->fence_reg < 8)
2301 fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
2302 else
2303 fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg -
2304 8) * 4;
2305
2306 I915_WRITE(fence_reg, 0);
2307 }
de151cf6
JB
2308
2309 dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
2310 obj_priv->fence_reg = I915_FENCE_REG_NONE;
2311}
2312
673a394b
EA
2313/**
2314 * Finds free space in the GTT aperture and binds the object there.
2315 */
2316static int
2317i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2318{
2319 struct drm_device *dev = obj->dev;
2320 drm_i915_private_t *dev_priv = dev->dev_private;
2321 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2322 struct drm_mm_node *free_space;
2323 int page_count, ret;
2324
9bb2d6f9
EA
2325 if (dev_priv->mm.suspended)
2326 return -EBUSY;
673a394b 2327 if (alignment == 0)
0f973f27 2328 alignment = i915_gem_get_gtt_alignment(obj);
8d7773a3 2329 if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
673a394b
EA
2330 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2331 return -EINVAL;
2332 }
2333
2334 search_free:
2335 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2336 obj->size, alignment, 0);
2337 if (free_space != NULL) {
2338 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
2339 alignment);
2340 if (obj_priv->gtt_space != NULL) {
2341 obj_priv->gtt_space->private = obj;
2342 obj_priv->gtt_offset = obj_priv->gtt_space->start;
2343 }
2344 }
2345 if (obj_priv->gtt_space == NULL) {
5e118f41
CW
2346 bool lists_empty;
2347
673a394b
EA
2348 /* If the gtt is empty and we're still having trouble
2349 * fitting our object in, we're out of memory.
2350 */
2351#if WATCH_LRU
2352 DRM_INFO("%s: GTT full, evicting something\n", __func__);
2353#endif
5e118f41
CW
2354 spin_lock(&dev_priv->mm.active_list_lock);
2355 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2356 list_empty(&dev_priv->mm.flushing_list) &&
2357 list_empty(&dev_priv->mm.active_list));
2358 spin_unlock(&dev_priv->mm.active_list_lock);
2359 if (lists_empty) {
673a394b
EA
2360 DRM_ERROR("GTT full, but LRU list empty\n");
2361 return -ENOMEM;
2362 }
2363
2364 ret = i915_gem_evict_something(dev);
2365 if (ret != 0) {
ac94a962
KP
2366 if (ret != -ERESTARTSYS)
2367 DRM_ERROR("Failed to evict a buffer %d\n", ret);
673a394b
EA
2368 return ret;
2369 }
2370 goto search_free;
2371 }
2372
2373#if WATCH_BUF
2374 DRM_INFO("Binding object of size %d at 0x%08x\n",
2375 obj->size, obj_priv->gtt_offset);
2376#endif
856fa198 2377 ret = i915_gem_object_get_pages(obj);
673a394b
EA
2378 if (ret) {
2379 drm_mm_put_block(obj_priv->gtt_space);
2380 obj_priv->gtt_space = NULL;
2381 return ret;
2382 }
2383
2384 page_count = obj->size / PAGE_SIZE;
2385 /* Create an AGP memory structure pointing at our pages, and bind it
2386 * into the GTT.
2387 */
2388 obj_priv->agp_mem = drm_agp_bind_pages(dev,
856fa198 2389 obj_priv->pages,
673a394b 2390 page_count,
ba1eb1d8
KP
2391 obj_priv->gtt_offset,
2392 obj_priv->agp_type);
673a394b 2393 if (obj_priv->agp_mem == NULL) {
856fa198 2394 i915_gem_object_put_pages(obj);
673a394b
EA
2395 drm_mm_put_block(obj_priv->gtt_space);
2396 obj_priv->gtt_space = NULL;
2397 return -ENOMEM;
2398 }
2399 atomic_inc(&dev->gtt_count);
2400 atomic_add(obj->size, &dev->gtt_memory);
2401
2402 /* Assert that the object is not currently in any GPU domain. As it
2403 * wasn't in the GTT, there shouldn't be any way it could have been in
2404 * a GPU cache
2405 */
2406 BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
2407 BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
2408
2409 return 0;
2410}
2411
2412void
2413i915_gem_clflush_object(struct drm_gem_object *obj)
2414{
2415 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2416
2417 /* If we don't have a page list set up, then we're not pinned
2418 * to GPU, and we can ignore the cache flush because it'll happen
2419 * again at bind time.
2420 */
856fa198 2421 if (obj_priv->pages == NULL)
673a394b
EA
2422 return;
2423
856fa198 2424 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
673a394b
EA
2425}
2426
e47c68e9
EA
2427/** Flushes any GPU write domain for the object if it's dirty. */
2428static void
2429i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2430{
2431 struct drm_device *dev = obj->dev;
2432 uint32_t seqno;
2433
2434 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
2435 return;
2436
2437 /* Queue the GPU write cache flushing we need. */
2438 i915_gem_flush(dev, 0, obj->write_domain);
2439 seqno = i915_add_request(dev, obj->write_domain);
2440 obj->write_domain = 0;
2441 i915_gem_object_move_to_active(obj, seqno);
2442}
2443
2444/** Flushes the GTT write domain for the object if it's dirty. */
2445static void
2446i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
2447{
2448 if (obj->write_domain != I915_GEM_DOMAIN_GTT)
2449 return;
2450
2451 /* No actual flushing is required for the GTT write domain. Writes
2452 * to it immediately go to main memory as far as we know, so there's
2453 * no chipset flush. It also doesn't land in render cache.
2454 */
2455 obj->write_domain = 0;
2456}
2457
2458/** Flushes the CPU write domain for the object if it's dirty. */
2459static void
2460i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2461{
2462 struct drm_device *dev = obj->dev;
2463
2464 if (obj->write_domain != I915_GEM_DOMAIN_CPU)
2465 return;
2466
2467 i915_gem_clflush_object(obj);
2468 drm_agp_chipset_flush(dev);
2469 obj->write_domain = 0;
2470}
2471
2ef7eeaa
EA
2472/**
2473 * Moves a single object to the GTT read, and possibly write domain.
2474 *
2475 * This function returns when the move is complete, including waiting on
2476 * flushes to occur.
2477 */
79e53945 2478int
2ef7eeaa
EA
2479i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2480{
2ef7eeaa 2481 struct drm_i915_gem_object *obj_priv = obj->driver_private;
e47c68e9 2482 int ret;
2ef7eeaa 2483
02354392
EA
2484 /* Not valid to be called on unbound objects. */
2485 if (obj_priv->gtt_space == NULL)
2486 return -EINVAL;
2487
e47c68e9
EA
2488 i915_gem_object_flush_gpu_write_domain(obj);
2489 /* Wait on any GPU rendering and flushing to occur. */
2490 ret = i915_gem_object_wait_rendering(obj);
2491 if (ret != 0)
2492 return ret;
2493
2494 /* If we're writing through the GTT domain, then CPU and GPU caches
2495 * will need to be invalidated at next use.
2ef7eeaa 2496 */
e47c68e9
EA
2497 if (write)
2498 obj->read_domains &= I915_GEM_DOMAIN_GTT;
2ef7eeaa 2499
e47c68e9 2500 i915_gem_object_flush_cpu_write_domain(obj);
2ef7eeaa 2501
e47c68e9
EA
2502 /* It should now be out of any other write domains, and we can update
2503 * the domain values for our changes.
2504 */
2505 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2506 obj->read_domains |= I915_GEM_DOMAIN_GTT;
2507 if (write) {
2508 obj->write_domain = I915_GEM_DOMAIN_GTT;
2509 obj_priv->dirty = 1;
2ef7eeaa
EA
2510 }
2511
e47c68e9
EA
2512 return 0;
2513}
2514
2515/**
2516 * Moves a single object to the CPU read, and possibly write domain.
2517 *
2518 * This function returns when the move is complete, including waiting on
2519 * flushes to occur.
2520 */
2521static int
2522i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2523{
e47c68e9
EA
2524 int ret;
2525
2526 i915_gem_object_flush_gpu_write_domain(obj);
2ef7eeaa 2527 /* Wait on any GPU rendering and flushing to occur. */
e47c68e9
EA
2528 ret = i915_gem_object_wait_rendering(obj);
2529 if (ret != 0)
2530 return ret;
2ef7eeaa 2531
e47c68e9 2532 i915_gem_object_flush_gtt_write_domain(obj);
2ef7eeaa 2533
e47c68e9
EA
2534 /* If we have a partially-valid cache of the object in the CPU,
2535 * finish invalidating it and free the per-page flags.
2ef7eeaa 2536 */
e47c68e9 2537 i915_gem_object_set_to_full_cpu_read_domain(obj);
2ef7eeaa 2538
e47c68e9
EA
2539 /* Flush the CPU cache if it's still invalid. */
2540 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2ef7eeaa 2541 i915_gem_clflush_object(obj);
2ef7eeaa 2542
e47c68e9 2543 obj->read_domains |= I915_GEM_DOMAIN_CPU;
2ef7eeaa
EA
2544 }
2545
2546 /* It should now be out of any other write domains, and we can update
2547 * the domain values for our changes.
2548 */
e47c68e9
EA
2549 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2550
2551 /* If we're writing through the CPU, then the GPU read domains will
2552 * need to be invalidated at next use.
2553 */
2554 if (write) {
2555 obj->read_domains &= I915_GEM_DOMAIN_CPU;
2556 obj->write_domain = I915_GEM_DOMAIN_CPU;
2557 }
2ef7eeaa
EA
2558
2559 return 0;
2560}
2561
673a394b
EA
2562/*
2563 * Set the next domain for the specified object. This
2564 * may not actually perform the necessary flushing/invaliding though,
2565 * as that may want to be batched with other set_domain operations
2566 *
2567 * This is (we hope) the only really tricky part of gem. The goal
2568 * is fairly simple -- track which caches hold bits of the object
2569 * and make sure they remain coherent. A few concrete examples may
2570 * help to explain how it works. For shorthand, we use the notation
2571 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
2572 * a pair of read and write domain masks.
2573 *
2574 * Case 1: the batch buffer
2575 *
2576 * 1. Allocated
2577 * 2. Written by CPU
2578 * 3. Mapped to GTT
2579 * 4. Read by GPU
2580 * 5. Unmapped from GTT
2581 * 6. Freed
2582 *
2583 * Let's take these a step at a time
2584 *
2585 * 1. Allocated
2586 * Pages allocated from the kernel may still have
2587 * cache contents, so we set them to (CPU, CPU) always.
2588 * 2. Written by CPU (using pwrite)
2589 * The pwrite function calls set_domain (CPU, CPU) and
2590 * this function does nothing (as nothing changes)
2591 * 3. Mapped by GTT
2592 * This function asserts that the object is not
2593 * currently in any GPU-based read or write domains
2594 * 4. Read by GPU
2595 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
2596 * As write_domain is zero, this function adds in the
2597 * current read domains (CPU+COMMAND, 0).
2598 * flush_domains is set to CPU.
2599 * invalidate_domains is set to COMMAND
2600 * clflush is run to get data out of the CPU caches
2601 * then i915_dev_set_domain calls i915_gem_flush to
2602 * emit an MI_FLUSH and drm_agp_chipset_flush
2603 * 5. Unmapped from GTT
2604 * i915_gem_object_unbind calls set_domain (CPU, CPU)
2605 * flush_domains and invalidate_domains end up both zero
2606 * so no flushing/invalidating happens
2607 * 6. Freed
2608 * yay, done
2609 *
2610 * Case 2: The shared render buffer
2611 *
2612 * 1. Allocated
2613 * 2. Mapped to GTT
2614 * 3. Read/written by GPU
2615 * 4. set_domain to (CPU,CPU)
2616 * 5. Read/written by CPU
2617 * 6. Read/written by GPU
2618 *
2619 * 1. Allocated
2620 * Same as last example, (CPU, CPU)
2621 * 2. Mapped to GTT
2622 * Nothing changes (assertions find that it is not in the GPU)
2623 * 3. Read/written by GPU
2624 * execbuffer calls set_domain (RENDER, RENDER)
2625 * flush_domains gets CPU
2626 * invalidate_domains gets GPU
2627 * clflush (obj)
2628 * MI_FLUSH and drm_agp_chipset_flush
2629 * 4. set_domain (CPU, CPU)
2630 * flush_domains gets GPU
2631 * invalidate_domains gets CPU
2632 * wait_rendering (obj) to make sure all drawing is complete.
2633 * This will include an MI_FLUSH to get the data from GPU
2634 * to memory
2635 * clflush (obj) to invalidate the CPU cache
2636 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
2637 * 5. Read/written by CPU
2638 * cache lines are loaded and dirtied
2639 * 6. Read written by GPU
2640 * Same as last GPU access
2641 *
2642 * Case 3: The constant buffer
2643 *
2644 * 1. Allocated
2645 * 2. Written by CPU
2646 * 3. Read by GPU
2647 * 4. Updated (written) by CPU again
2648 * 5. Read by GPU
2649 *
2650 * 1. Allocated
2651 * (CPU, CPU)
2652 * 2. Written by CPU
2653 * (CPU, CPU)
2654 * 3. Read by GPU
2655 * (CPU+RENDER, 0)
2656 * flush_domains = CPU
2657 * invalidate_domains = RENDER
2658 * clflush (obj)
2659 * MI_FLUSH
2660 * drm_agp_chipset_flush
2661 * 4. Updated (written) by CPU again
2662 * (CPU, CPU)
2663 * flush_domains = 0 (no previous write domain)
2664 * invalidate_domains = 0 (no new read domains)
2665 * 5. Read by GPU
2666 * (CPU+RENDER, 0)
2667 * flush_domains = CPU
2668 * invalidate_domains = RENDER
2669 * clflush (obj)
2670 * MI_FLUSH
2671 * drm_agp_chipset_flush
2672 */
c0d90829 2673static void
8b0e378a 2674i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
673a394b
EA
2675{
2676 struct drm_device *dev = obj->dev;
2677 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2678 uint32_t invalidate_domains = 0;
2679 uint32_t flush_domains = 0;
e47c68e9 2680
8b0e378a
EA
2681 BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
2682 BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
673a394b
EA
2683
2684#if WATCH_BUF
2685 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
2686 __func__, obj,
8b0e378a
EA
2687 obj->read_domains, obj->pending_read_domains,
2688 obj->write_domain, obj->pending_write_domain);
673a394b
EA
2689#endif
2690 /*
2691 * If the object isn't moving to a new write domain,
2692 * let the object stay in multiple read domains
2693 */
8b0e378a
EA
2694 if (obj->pending_write_domain == 0)
2695 obj->pending_read_domains |= obj->read_domains;
673a394b
EA
2696 else
2697 obj_priv->dirty = 1;
2698
2699 /*
2700 * Flush the current write domain if
2701 * the new read domains don't match. Invalidate
2702 * any read domains which differ from the old
2703 * write domain
2704 */
8b0e378a
EA
2705 if (obj->write_domain &&
2706 obj->write_domain != obj->pending_read_domains) {
673a394b 2707 flush_domains |= obj->write_domain;
8b0e378a
EA
2708 invalidate_domains |=
2709 obj->pending_read_domains & ~obj->write_domain;
673a394b
EA
2710 }
2711 /*
2712 * Invalidate any read caches which may have
2713 * stale data. That is, any new read domains.
2714 */
8b0e378a 2715 invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
673a394b
EA
2716 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
2717#if WATCH_BUF
2718 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
2719 __func__, flush_domains, invalidate_domains);
2720#endif
673a394b
EA
2721 i915_gem_clflush_object(obj);
2722 }
2723
efbeed96
EA
2724 /* The actual obj->write_domain will be updated with
2725 * pending_write_domain after we emit the accumulated flush for all
2726 * of our domain changes in execbuffers (which clears objects'
2727 * write_domains). So if we have a current write domain that we
2728 * aren't changing, set pending_write_domain to that.
2729 */
2730 if (flush_domains == 0 && obj->pending_write_domain == 0)
2731 obj->pending_write_domain = obj->write_domain;
8b0e378a 2732 obj->read_domains = obj->pending_read_domains;
673a394b
EA
2733
2734 dev->invalidate_domains |= invalidate_domains;
2735 dev->flush_domains |= flush_domains;
2736#if WATCH_BUF
2737 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
2738 __func__,
2739 obj->read_domains, obj->write_domain,
2740 dev->invalidate_domains, dev->flush_domains);
2741#endif
673a394b
EA
2742}
2743
2744/**
e47c68e9 2745 * Moves the object from a partially CPU read to a full one.
673a394b 2746 *
e47c68e9
EA
2747 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
2748 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
673a394b 2749 */
e47c68e9
EA
2750static void
2751i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
673a394b
EA
2752{
2753 struct drm_i915_gem_object *obj_priv = obj->driver_private;
673a394b 2754
e47c68e9
EA
2755 if (!obj_priv->page_cpu_valid)
2756 return;
2757
2758 /* If we're partially in the CPU read domain, finish moving it in.
2759 */
2760 if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
2761 int i;
2762
2763 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
2764 if (obj_priv->page_cpu_valid[i])
2765 continue;
856fa198 2766 drm_clflush_pages(obj_priv->pages + i, 1);
e47c68e9 2767 }
e47c68e9
EA
2768 }
2769
2770 /* Free the page_cpu_valid mappings which are now stale, whether
2771 * or not we've got I915_GEM_DOMAIN_CPU.
2772 */
2773 drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
2774 DRM_MEM_DRIVER);
2775 obj_priv->page_cpu_valid = NULL;
2776}
2777
2778/**
2779 * Set the CPU read domain on a range of the object.
2780 *
2781 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
2782 * not entirely valid. The page_cpu_valid member of the object flags which
2783 * pages have been flushed, and will be respected by
2784 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
2785 * of the whole object.
2786 *
2787 * This function returns when the move is complete, including waiting on
2788 * flushes to occur.
2789 */
2790static int
2791i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
2792 uint64_t offset, uint64_t size)
2793{
2794 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2795 int i, ret;
673a394b 2796
e47c68e9
EA
2797 if (offset == 0 && size == obj->size)
2798 return i915_gem_object_set_to_cpu_domain(obj, 0);
673a394b 2799
e47c68e9
EA
2800 i915_gem_object_flush_gpu_write_domain(obj);
2801 /* Wait on any GPU rendering and flushing to occur. */
6a47baa6 2802 ret = i915_gem_object_wait_rendering(obj);
e47c68e9 2803 if (ret != 0)
6a47baa6 2804 return ret;
e47c68e9
EA
2805 i915_gem_object_flush_gtt_write_domain(obj);
2806
2807 /* If we're already fully in the CPU read domain, we're done. */
2808 if (obj_priv->page_cpu_valid == NULL &&
2809 (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
2810 return 0;
673a394b 2811
e47c68e9
EA
2812 /* Otherwise, create/clear the per-page CPU read domain flag if we're
2813 * newly adding I915_GEM_DOMAIN_CPU
2814 */
673a394b
EA
2815 if (obj_priv->page_cpu_valid == NULL) {
2816 obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
2817 DRM_MEM_DRIVER);
e47c68e9
EA
2818 if (obj_priv->page_cpu_valid == NULL)
2819 return -ENOMEM;
2820 } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
2821 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
673a394b
EA
2822
2823 /* Flush the cache on any pages that are still invalid from the CPU's
2824 * perspective.
2825 */
e47c68e9
EA
2826 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
2827 i++) {
673a394b
EA
2828 if (obj_priv->page_cpu_valid[i])
2829 continue;
2830
856fa198 2831 drm_clflush_pages(obj_priv->pages + i, 1);
673a394b
EA
2832
2833 obj_priv->page_cpu_valid[i] = 1;
2834 }
2835
e47c68e9
EA
2836 /* It should now be out of any other write domains, and we can update
2837 * the domain values for our changes.
2838 */
2839 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2840
2841 obj->read_domains |= I915_GEM_DOMAIN_CPU;
2842
673a394b
EA
2843 return 0;
2844}
2845
673a394b
EA
2846/**
2847 * Pin an object to the GTT and evaluate the relocations landing in it.
2848 */
2849static int
2850i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2851 struct drm_file *file_priv,
40a5f0de
EA
2852 struct drm_i915_gem_exec_object *entry,
2853 struct drm_i915_gem_relocation_entry *relocs)
673a394b
EA
2854{
2855 struct drm_device *dev = obj->dev;
0839ccb8 2856 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b
EA
2857 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2858 int i, ret;
0839ccb8 2859 void __iomem *reloc_page;
673a394b
EA
2860
2861 /* Choose the GTT offset for our buffer and put it there. */
2862 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
2863 if (ret)
2864 return ret;
2865
2866 entry->offset = obj_priv->gtt_offset;
2867
673a394b
EA
2868 /* Apply the relocations, using the GTT aperture to avoid cache
2869 * flushing requirements.
2870 */
2871 for (i = 0; i < entry->relocation_count; i++) {
40a5f0de 2872 struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
673a394b
EA
2873 struct drm_gem_object *target_obj;
2874 struct drm_i915_gem_object *target_obj_priv;
3043c60c
EA
2875 uint32_t reloc_val, reloc_offset;
2876 uint32_t __iomem *reloc_entry;
673a394b 2877
673a394b 2878 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
40a5f0de 2879 reloc->target_handle);
673a394b
EA
2880 if (target_obj == NULL) {
2881 i915_gem_object_unpin(obj);
2882 return -EBADF;
2883 }
2884 target_obj_priv = target_obj->driver_private;
2885
2886 /* The target buffer should have appeared before us in the
2887 * exec_object list, so it should have a GTT space bound by now.
2888 */
2889 if (target_obj_priv->gtt_space == NULL) {
2890 DRM_ERROR("No GTT space found for object %d\n",
40a5f0de 2891 reloc->target_handle);
673a394b
EA
2892 drm_gem_object_unreference(target_obj);
2893 i915_gem_object_unpin(obj);
2894 return -EINVAL;
2895 }
2896
40a5f0de 2897 if (reloc->offset > obj->size - 4) {
673a394b
EA
2898 DRM_ERROR("Relocation beyond object bounds: "
2899 "obj %p target %d offset %d size %d.\n",
40a5f0de
EA
2900 obj, reloc->target_handle,
2901 (int) reloc->offset, (int) obj->size);
673a394b
EA
2902 drm_gem_object_unreference(target_obj);
2903 i915_gem_object_unpin(obj);
2904 return -EINVAL;
2905 }
40a5f0de 2906 if (reloc->offset & 3) {
673a394b
EA
2907 DRM_ERROR("Relocation not 4-byte aligned: "
2908 "obj %p target %d offset %d.\n",
40a5f0de
EA
2909 obj, reloc->target_handle,
2910 (int) reloc->offset);
673a394b
EA
2911 drm_gem_object_unreference(target_obj);
2912 i915_gem_object_unpin(obj);
2913 return -EINVAL;
2914 }
2915
40a5f0de
EA
2916 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
2917 reloc->read_domains & I915_GEM_DOMAIN_CPU) {
e47c68e9
EA
2918 DRM_ERROR("reloc with read/write CPU domains: "
2919 "obj %p target %d offset %d "
2920 "read %08x write %08x",
40a5f0de
EA
2921 obj, reloc->target_handle,
2922 (int) reloc->offset,
2923 reloc->read_domains,
2924 reloc->write_domain);
491152b8
CW
2925 drm_gem_object_unreference(target_obj);
2926 i915_gem_object_unpin(obj);
e47c68e9
EA
2927 return -EINVAL;
2928 }
2929
40a5f0de
EA
2930 if (reloc->write_domain && target_obj->pending_write_domain &&
2931 reloc->write_domain != target_obj->pending_write_domain) {
673a394b
EA
2932 DRM_ERROR("Write domain conflict: "
2933 "obj %p target %d offset %d "
2934 "new %08x old %08x\n",
40a5f0de
EA
2935 obj, reloc->target_handle,
2936 (int) reloc->offset,
2937 reloc->write_domain,
673a394b
EA
2938 target_obj->pending_write_domain);
2939 drm_gem_object_unreference(target_obj);
2940 i915_gem_object_unpin(obj);
2941 return -EINVAL;
2942 }
2943
2944#if WATCH_RELOC
2945 DRM_INFO("%s: obj %p offset %08x target %d "
2946 "read %08x write %08x gtt %08x "
2947 "presumed %08x delta %08x\n",
2948 __func__,
2949 obj,
40a5f0de
EA
2950 (int) reloc->offset,
2951 (int) reloc->target_handle,
2952 (int) reloc->read_domains,
2953 (int) reloc->write_domain,
673a394b 2954 (int) target_obj_priv->gtt_offset,
40a5f0de
EA
2955 (int) reloc->presumed_offset,
2956 reloc->delta);
673a394b
EA
2957#endif
2958
40a5f0de
EA
2959 target_obj->pending_read_domains |= reloc->read_domains;
2960 target_obj->pending_write_domain |= reloc->write_domain;
673a394b
EA
2961
2962 /* If the relocation already has the right value in it, no
2963 * more work needs to be done.
2964 */
40a5f0de 2965 if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
673a394b
EA
2966 drm_gem_object_unreference(target_obj);
2967 continue;
2968 }
2969
2ef7eeaa
EA
2970 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
2971 if (ret != 0) {
2972 drm_gem_object_unreference(target_obj);
2973 i915_gem_object_unpin(obj);
2974 return -EINVAL;
673a394b
EA
2975 }
2976
2977 /* Map the page containing the relocation we're going to
2978 * perform.
2979 */
40a5f0de 2980 reloc_offset = obj_priv->gtt_offset + reloc->offset;
0839ccb8
KP
2981 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
2982 (reloc_offset &
2983 ~(PAGE_SIZE - 1)));
3043c60c 2984 reloc_entry = (uint32_t __iomem *)(reloc_page +
0839ccb8 2985 (reloc_offset & (PAGE_SIZE - 1)));
40a5f0de 2986 reloc_val = target_obj_priv->gtt_offset + reloc->delta;
673a394b
EA
2987
2988#if WATCH_BUF
2989 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
40a5f0de 2990 obj, (unsigned int) reloc->offset,
673a394b
EA
2991 readl(reloc_entry), reloc_val);
2992#endif
2993 writel(reloc_val, reloc_entry);
0839ccb8 2994 io_mapping_unmap_atomic(reloc_page);
673a394b 2995
40a5f0de
EA
2996 /* The updated presumed offset for this entry will be
2997 * copied back out to the user.
673a394b 2998 */
40a5f0de 2999 reloc->presumed_offset = target_obj_priv->gtt_offset;
673a394b
EA
3000
3001 drm_gem_object_unreference(target_obj);
3002 }
3003
673a394b
EA
3004#if WATCH_BUF
3005 if (0)
3006 i915_gem_dump_object(obj, 128, __func__, ~0);
3007#endif
3008 return 0;
3009}
3010
3011/** Dispatch a batchbuffer to the ring
3012 */
3013static int
3014i915_dispatch_gem_execbuffer(struct drm_device *dev,
3015 struct drm_i915_gem_execbuffer *exec,
201361a5 3016 struct drm_clip_rect *cliprects,
673a394b
EA
3017 uint64_t exec_offset)
3018{
3019 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b
EA
3020 int nbox = exec->num_cliprects;
3021 int i = 0, count;
3022 uint32_t exec_start, exec_len;
3023 RING_LOCALS;
3024
3025 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3026 exec_len = (uint32_t) exec->batch_len;
3027
3028 if ((exec_start | exec_len) & 0x7) {
3029 DRM_ERROR("alignment\n");
3030 return -EINVAL;
3031 }
3032
3033 if (!exec_start)
3034 return -EINVAL;
3035
3036 count = nbox ? nbox : 1;
3037
3038 for (i = 0; i < count; i++) {
3039 if (i < nbox) {
201361a5 3040 int ret = i915_emit_box(dev, cliprects, i,
673a394b
EA
3041 exec->DR1, exec->DR4);
3042 if (ret)
3043 return ret;
3044 }
3045
3046 if (IS_I830(dev) || IS_845G(dev)) {
3047 BEGIN_LP_RING(4);
3048 OUT_RING(MI_BATCH_BUFFER);
3049 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
3050 OUT_RING(exec_start + exec_len - 4);
3051 OUT_RING(0);
3052 ADVANCE_LP_RING();
3053 } else {
3054 BEGIN_LP_RING(2);
3055 if (IS_I965G(dev)) {
3056 OUT_RING(MI_BATCH_BUFFER_START |
3057 (2 << 6) |
3058 MI_BATCH_NON_SECURE_I965);
3059 OUT_RING(exec_start);
3060 } else {
3061 OUT_RING(MI_BATCH_BUFFER_START |
3062 (2 << 6));
3063 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
3064 }
3065 ADVANCE_LP_RING();
3066 }
3067 }
3068
3069 /* XXX breadcrumb */
3070 return 0;
3071}
3072
3073/* Throttle our rendering by waiting until the ring has completed our requests
3074 * emitted over 20 msec ago.
3075 *
3076 * This should get us reasonable parallelism between CPU and GPU but also
3077 * relatively low latency when blocking on a particular request to finish.
3078 */
3079static int
3080i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
3081{
3082 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
3083 int ret = 0;
3084 uint32_t seqno;
3085
3086 mutex_lock(&dev->struct_mutex);
3087 seqno = i915_file_priv->mm.last_gem_throttle_seqno;
3088 i915_file_priv->mm.last_gem_throttle_seqno =
3089 i915_file_priv->mm.last_gem_seqno;
3090 if (seqno)
3091 ret = i915_wait_request(dev, seqno);
3092 mutex_unlock(&dev->struct_mutex);
3093 return ret;
3094}
3095
40a5f0de
EA
3096static int
3097i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
3098 uint32_t buffer_count,
3099 struct drm_i915_gem_relocation_entry **relocs)
3100{
3101 uint32_t reloc_count = 0, reloc_index = 0, i;
3102 int ret;
3103
3104 *relocs = NULL;
3105 for (i = 0; i < buffer_count; i++) {
3106 if (reloc_count + exec_list[i].relocation_count < reloc_count)
3107 return -EINVAL;
3108 reloc_count += exec_list[i].relocation_count;
3109 }
3110
8e7d2b2c 3111 *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
40a5f0de
EA
3112 if (*relocs == NULL)
3113 return -ENOMEM;
3114
3115 for (i = 0; i < buffer_count; i++) {
3116 struct drm_i915_gem_relocation_entry __user *user_relocs;
3117
3118 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3119
3120 ret = copy_from_user(&(*relocs)[reloc_index],
3121 user_relocs,
3122 exec_list[i].relocation_count *
3123 sizeof(**relocs));
3124 if (ret != 0) {
8e7d2b2c 3125 drm_free_large(*relocs);
40a5f0de 3126 *relocs = NULL;
2bc43b5c 3127 return -EFAULT;
40a5f0de
EA
3128 }
3129
3130 reloc_index += exec_list[i].relocation_count;
3131 }
3132
2bc43b5c 3133 return 0;
40a5f0de
EA
3134}
3135
3136static int
3137i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
3138 uint32_t buffer_count,
3139 struct drm_i915_gem_relocation_entry *relocs)
3140{
3141 uint32_t reloc_count = 0, i;
2bc43b5c 3142 int ret = 0;
40a5f0de
EA
3143
3144 for (i = 0; i < buffer_count; i++) {
3145 struct drm_i915_gem_relocation_entry __user *user_relocs;
2bc43b5c 3146 int unwritten;
40a5f0de
EA
3147
3148 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3149
2bc43b5c
FM
3150 unwritten = copy_to_user(user_relocs,
3151 &relocs[reloc_count],
3152 exec_list[i].relocation_count *
3153 sizeof(*relocs));
3154
3155 if (unwritten) {
3156 ret = -EFAULT;
3157 goto err;
40a5f0de
EA
3158 }
3159
3160 reloc_count += exec_list[i].relocation_count;
3161 }
3162
2bc43b5c 3163err:
8e7d2b2c 3164 drm_free_large(relocs);
40a5f0de
EA
3165
3166 return ret;
3167}
3168
673a394b
EA
3169int
3170i915_gem_execbuffer(struct drm_device *dev, void *data,
3171 struct drm_file *file_priv)
3172{
3173 drm_i915_private_t *dev_priv = dev->dev_private;
3174 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
3175 struct drm_i915_gem_execbuffer *args = data;
3176 struct drm_i915_gem_exec_object *exec_list = NULL;
3177 struct drm_gem_object **object_list = NULL;
3178 struct drm_gem_object *batch_obj;
b70d11da 3179 struct drm_i915_gem_object *obj_priv;
201361a5 3180 struct drm_clip_rect *cliprects = NULL;
40a5f0de
EA
3181 struct drm_i915_gem_relocation_entry *relocs;
3182 int ret, ret2, i, pinned = 0;
673a394b 3183 uint64_t exec_offset;
40a5f0de 3184 uint32_t seqno, flush_domains, reloc_index;
ac94a962 3185 int pin_tries;
673a394b
EA
3186
3187#if WATCH_EXEC
3188 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3189 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3190#endif
3191
4f481ed2
EA
3192 if (args->buffer_count < 1) {
3193 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3194 return -EINVAL;
3195 }
673a394b 3196 /* Copy in the exec list from userland */
8e7d2b2c
JB
3197 exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count);
3198 object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count);
673a394b
EA
3199 if (exec_list == NULL || object_list == NULL) {
3200 DRM_ERROR("Failed to allocate exec or object list "
3201 "for %d buffers\n",
3202 args->buffer_count);
3203 ret = -ENOMEM;
3204 goto pre_mutex_err;
3205 }
3206 ret = copy_from_user(exec_list,
3207 (struct drm_i915_relocation_entry __user *)
3208 (uintptr_t) args->buffers_ptr,
3209 sizeof(*exec_list) * args->buffer_count);
3210 if (ret != 0) {
3211 DRM_ERROR("copy %d exec entries failed %d\n",
3212 args->buffer_count, ret);
3213 goto pre_mutex_err;
3214 }
3215
201361a5
EA
3216 if (args->num_cliprects != 0) {
3217 cliprects = drm_calloc(args->num_cliprects, sizeof(*cliprects),
3218 DRM_MEM_DRIVER);
3219 if (cliprects == NULL)
3220 goto pre_mutex_err;
3221
3222 ret = copy_from_user(cliprects,
3223 (struct drm_clip_rect __user *)
3224 (uintptr_t) args->cliprects_ptr,
3225 sizeof(*cliprects) * args->num_cliprects);
3226 if (ret != 0) {
3227 DRM_ERROR("copy %d cliprects failed: %d\n",
3228 args->num_cliprects, ret);
3229 goto pre_mutex_err;
3230 }
3231 }
3232
40a5f0de
EA
3233 ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
3234 &relocs);
3235 if (ret != 0)
3236 goto pre_mutex_err;
3237
673a394b
EA
3238 mutex_lock(&dev->struct_mutex);
3239
3240 i915_verify_inactive(dev, __FILE__, __LINE__);
3241
3242 if (dev_priv->mm.wedged) {
3243 DRM_ERROR("Execbuf while wedged\n");
3244 mutex_unlock(&dev->struct_mutex);
a198bc80
CW
3245 ret = -EIO;
3246 goto pre_mutex_err;
673a394b
EA
3247 }
3248
3249 if (dev_priv->mm.suspended) {
3250 DRM_ERROR("Execbuf while VT-switched.\n");
3251 mutex_unlock(&dev->struct_mutex);
a198bc80
CW
3252 ret = -EBUSY;
3253 goto pre_mutex_err;
673a394b
EA
3254 }
3255
ac94a962 3256 /* Look up object handles */
673a394b
EA
3257 for (i = 0; i < args->buffer_count; i++) {
3258 object_list[i] = drm_gem_object_lookup(dev, file_priv,
3259 exec_list[i].handle);
3260 if (object_list[i] == NULL) {
3261 DRM_ERROR("Invalid object handle %d at index %d\n",
3262 exec_list[i].handle, i);
3263 ret = -EBADF;
3264 goto err;
3265 }
b70d11da
KH
3266
3267 obj_priv = object_list[i]->driver_private;
3268 if (obj_priv->in_execbuffer) {
3269 DRM_ERROR("Object %p appears more than once in object list\n",
3270 object_list[i]);
3271 ret = -EBADF;
3272 goto err;
3273 }
3274 obj_priv->in_execbuffer = true;
ac94a962 3275 }
673a394b 3276
ac94a962
KP
3277 /* Pin and relocate */
3278 for (pin_tries = 0; ; pin_tries++) {
3279 ret = 0;
40a5f0de
EA
3280 reloc_index = 0;
3281
ac94a962
KP
3282 for (i = 0; i < args->buffer_count; i++) {
3283 object_list[i]->pending_read_domains = 0;
3284 object_list[i]->pending_write_domain = 0;
3285 ret = i915_gem_object_pin_and_relocate(object_list[i],
3286 file_priv,
40a5f0de
EA
3287 &exec_list[i],
3288 &relocs[reloc_index]);
ac94a962
KP
3289 if (ret)
3290 break;
3291 pinned = i + 1;
40a5f0de 3292 reloc_index += exec_list[i].relocation_count;
ac94a962
KP
3293 }
3294 /* success */
3295 if (ret == 0)
3296 break;
3297
3298 /* error other than GTT full, or we've already tried again */
3299 if (ret != -ENOMEM || pin_tries >= 1) {
f1acec93
EA
3300 if (ret != -ERESTARTSYS)
3301 DRM_ERROR("Failed to pin buffers %d\n", ret);
673a394b
EA
3302 goto err;
3303 }
ac94a962
KP
3304
3305 /* unpin all of our buffers */
3306 for (i = 0; i < pinned; i++)
3307 i915_gem_object_unpin(object_list[i]);
b1177636 3308 pinned = 0;
ac94a962
KP
3309
3310 /* evict everyone we can from the aperture */
3311 ret = i915_gem_evict_everything(dev);
3312 if (ret)
3313 goto err;
673a394b
EA
3314 }
3315
3316 /* Set the pending read domains for the batch buffer to COMMAND */
3317 batch_obj = object_list[args->buffer_count-1];
3318 batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
3319 batch_obj->pending_write_domain = 0;
3320
3321 i915_verify_inactive(dev, __FILE__, __LINE__);
3322
646f0f6e
KP
3323 /* Zero the global flush/invalidate flags. These
3324 * will be modified as new domains are computed
3325 * for each object
3326 */
3327 dev->invalidate_domains = 0;
3328 dev->flush_domains = 0;
3329
673a394b
EA
3330 for (i = 0; i < args->buffer_count; i++) {
3331 struct drm_gem_object *obj = object_list[i];
673a394b 3332
646f0f6e 3333 /* Compute new gpu domains and update invalidate/flush */
8b0e378a 3334 i915_gem_object_set_to_gpu_domain(obj);
673a394b
EA
3335 }
3336
3337 i915_verify_inactive(dev, __FILE__, __LINE__);
3338
646f0f6e
KP
3339 if (dev->invalidate_domains | dev->flush_domains) {
3340#if WATCH_EXEC
3341 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3342 __func__,
3343 dev->invalidate_domains,
3344 dev->flush_domains);
3345#endif
3346 i915_gem_flush(dev,
3347 dev->invalidate_domains,
3348 dev->flush_domains);
3349 if (dev->flush_domains)
3350 (void)i915_add_request(dev, dev->flush_domains);
3351 }
673a394b 3352
efbeed96
EA
3353 for (i = 0; i < args->buffer_count; i++) {
3354 struct drm_gem_object *obj = object_list[i];
3355
3356 obj->write_domain = obj->pending_write_domain;
3357 }
3358
673a394b
EA
3359 i915_verify_inactive(dev, __FILE__, __LINE__);
3360
3361#if WATCH_COHERENCY
3362 for (i = 0; i < args->buffer_count; i++) {
3363 i915_gem_object_check_coherency(object_list[i],
3364 exec_list[i].handle);
3365 }
3366#endif
3367
3368 exec_offset = exec_list[args->buffer_count - 1].offset;
3369
3370#if WATCH_EXEC
6911a9b8 3371 i915_gem_dump_object(batch_obj,
673a394b
EA
3372 args->batch_len,
3373 __func__,
3374 ~0);
3375#endif
3376
673a394b 3377 /* Exec the batchbuffer */
201361a5 3378 ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
673a394b
EA
3379 if (ret) {
3380 DRM_ERROR("dispatch failed %d\n", ret);
3381 goto err;
3382 }
3383
3384 /*
3385 * Ensure that the commands in the batch buffer are
3386 * finished before the interrupt fires
3387 */
3388 flush_domains = i915_retire_commands(dev);
3389
3390 i915_verify_inactive(dev, __FILE__, __LINE__);
3391
3392 /*
3393 * Get a seqno representing the execution of the current buffer,
3394 * which we can wait on. We would like to mitigate these interrupts,
3395 * likely by only creating seqnos occasionally (so that we have
3396 * *some* interrupts representing completion of buffers that we can
3397 * wait on when trying to clear up gtt space).
3398 */
3399 seqno = i915_add_request(dev, flush_domains);
3400 BUG_ON(seqno == 0);
3401 i915_file_priv->mm.last_gem_seqno = seqno;
3402 for (i = 0; i < args->buffer_count; i++) {
3403 struct drm_gem_object *obj = object_list[i];
673a394b 3404
ce44b0ea 3405 i915_gem_object_move_to_active(obj, seqno);
673a394b
EA
3406#if WATCH_LRU
3407 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
3408#endif
3409 }
3410#if WATCH_LRU
3411 i915_dump_lru(dev, __func__);
3412#endif
3413
3414 i915_verify_inactive(dev, __FILE__, __LINE__);
3415
673a394b 3416err:
aad87dff
JL
3417 for (i = 0; i < pinned; i++)
3418 i915_gem_object_unpin(object_list[i]);
3419
b70d11da
KH
3420 for (i = 0; i < args->buffer_count; i++) {
3421 if (object_list[i]) {
3422 obj_priv = object_list[i]->driver_private;
3423 obj_priv->in_execbuffer = false;
3424 }
aad87dff 3425 drm_gem_object_unreference(object_list[i]);
b70d11da 3426 }
673a394b 3427
673a394b
EA
3428 mutex_unlock(&dev->struct_mutex);
3429
a35f2e2b
RD
3430 if (!ret) {
3431 /* Copy the new buffer offsets back to the user's exec list. */
3432 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
3433 (uintptr_t) args->buffers_ptr,
3434 exec_list,
3435 sizeof(*exec_list) * args->buffer_count);
2bc43b5c
FM
3436 if (ret) {
3437 ret = -EFAULT;
a35f2e2b
RD
3438 DRM_ERROR("failed to copy %d exec entries "
3439 "back to user (%d)\n",
3440 args->buffer_count, ret);
2bc43b5c 3441 }
a35f2e2b
RD
3442 }
3443
40a5f0de
EA
3444 /* Copy the updated relocations out regardless of current error
3445 * state. Failure to update the relocs would mean that the next
3446 * time userland calls execbuf, it would do so with presumed offset
3447 * state that didn't match the actual object state.
3448 */
3449 ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
3450 relocs);
3451 if (ret2 != 0) {
3452 DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
3453
3454 if (ret == 0)
3455 ret = ret2;
3456 }
3457
673a394b 3458pre_mutex_err:
8e7d2b2c
JB
3459 drm_free_large(object_list);
3460 drm_free_large(exec_list);
201361a5
EA
3461 drm_free(cliprects, sizeof(*cliprects) * args->num_cliprects,
3462 DRM_MEM_DRIVER);
673a394b
EA
3463
3464 return ret;
3465}
3466
3467int
3468i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
3469{
3470 struct drm_device *dev = obj->dev;
3471 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3472 int ret;
3473
3474 i915_verify_inactive(dev, __FILE__, __LINE__);
3475 if (obj_priv->gtt_space == NULL) {
3476 ret = i915_gem_object_bind_to_gtt(obj, alignment);
3477 if (ret != 0) {
9bb2d6f9 3478 if (ret != -EBUSY && ret != -ERESTARTSYS)
0fce81e3 3479 DRM_ERROR("Failure to bind: %d\n", ret);
673a394b
EA
3480 return ret;
3481 }
22c344e9
CW
3482 }
3483 /*
3484 * Pre-965 chips need a fence register set up in order to
3485 * properly handle tiled surfaces.
3486 */
3487 if (!IS_I965G(dev) &&
3488 obj_priv->fence_reg == I915_FENCE_REG_NONE &&
3489 obj_priv->tiling_mode != I915_TILING_NONE) {
3490 ret = i915_gem_object_get_fence_reg(obj, true);
3491 if (ret != 0) {
3492 if (ret != -EBUSY && ret != -ERESTARTSYS)
3493 DRM_ERROR("Failure to install fence: %d\n",
3494 ret);
3495 return ret;
3496 }
673a394b
EA
3497 }
3498 obj_priv->pin_count++;
3499
3500 /* If the object is not active and not pending a flush,
3501 * remove it from the inactive list
3502 */
3503 if (obj_priv->pin_count == 1) {
3504 atomic_inc(&dev->pin_count);
3505 atomic_add(obj->size, &dev->pin_memory);
3506 if (!obj_priv->active &&
3507 (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
3508 I915_GEM_DOMAIN_GTT)) == 0 &&
3509 !list_empty(&obj_priv->list))
3510 list_del_init(&obj_priv->list);
3511 }
3512 i915_verify_inactive(dev, __FILE__, __LINE__);
3513
3514 return 0;
3515}
3516
3517void
3518i915_gem_object_unpin(struct drm_gem_object *obj)
3519{
3520 struct drm_device *dev = obj->dev;
3521 drm_i915_private_t *dev_priv = dev->dev_private;
3522 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3523
3524 i915_verify_inactive(dev, __FILE__, __LINE__);
3525 obj_priv->pin_count--;
3526 BUG_ON(obj_priv->pin_count < 0);
3527 BUG_ON(obj_priv->gtt_space == NULL);
3528
3529 /* If the object is no longer pinned, and is
3530 * neither active nor being flushed, then stick it on
3531 * the inactive list
3532 */
3533 if (obj_priv->pin_count == 0) {
3534 if (!obj_priv->active &&
3535 (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
3536 I915_GEM_DOMAIN_GTT)) == 0)
3537 list_move_tail(&obj_priv->list,
3538 &dev_priv->mm.inactive_list);
3539 atomic_dec(&dev->pin_count);
3540 atomic_sub(obj->size, &dev->pin_memory);
3541 }
3542 i915_verify_inactive(dev, __FILE__, __LINE__);
3543}
3544
3545int
3546i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3547 struct drm_file *file_priv)
3548{
3549 struct drm_i915_gem_pin *args = data;
3550 struct drm_gem_object *obj;
3551 struct drm_i915_gem_object *obj_priv;
3552 int ret;
3553
3554 mutex_lock(&dev->struct_mutex);
3555
3556 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3557 if (obj == NULL) {
3558 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
3559 args->handle);
3560 mutex_unlock(&dev->struct_mutex);
3561 return -EBADF;
3562 }
3563 obj_priv = obj->driver_private;
3564
79e53945
JB
3565 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
3566 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3567 args->handle);
96dec61d 3568 drm_gem_object_unreference(obj);
673a394b 3569 mutex_unlock(&dev->struct_mutex);
79e53945
JB
3570 return -EINVAL;
3571 }
3572
3573 obj_priv->user_pin_count++;
3574 obj_priv->pin_filp = file_priv;
3575 if (obj_priv->user_pin_count == 1) {
3576 ret = i915_gem_object_pin(obj, args->alignment);
3577 if (ret != 0) {
3578 drm_gem_object_unreference(obj);
3579 mutex_unlock(&dev->struct_mutex);
3580 return ret;
3581 }
673a394b
EA
3582 }
3583
3584 /* XXX - flush the CPU caches for pinned objects
3585 * as the X server doesn't manage domains yet
3586 */
e47c68e9 3587 i915_gem_object_flush_cpu_write_domain(obj);
673a394b
EA
3588 args->offset = obj_priv->gtt_offset;
3589 drm_gem_object_unreference(obj);
3590 mutex_unlock(&dev->struct_mutex);
3591
3592 return 0;
3593}
3594
3595int
3596i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3597 struct drm_file *file_priv)
3598{
3599 struct drm_i915_gem_pin *args = data;
3600 struct drm_gem_object *obj;
79e53945 3601 struct drm_i915_gem_object *obj_priv;
673a394b
EA
3602
3603 mutex_lock(&dev->struct_mutex);
3604
3605 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3606 if (obj == NULL) {
3607 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
3608 args->handle);
3609 mutex_unlock(&dev->struct_mutex);
3610 return -EBADF;
3611 }
3612
79e53945
JB
3613 obj_priv = obj->driver_private;
3614 if (obj_priv->pin_filp != file_priv) {
3615 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3616 args->handle);
3617 drm_gem_object_unreference(obj);
3618 mutex_unlock(&dev->struct_mutex);
3619 return -EINVAL;
3620 }
3621 obj_priv->user_pin_count--;
3622 if (obj_priv->user_pin_count == 0) {
3623 obj_priv->pin_filp = NULL;
3624 i915_gem_object_unpin(obj);
3625 }
673a394b
EA
3626
3627 drm_gem_object_unreference(obj);
3628 mutex_unlock(&dev->struct_mutex);
3629 return 0;
3630}
3631
3632int
3633i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3634 struct drm_file *file_priv)
3635{
3636 struct drm_i915_gem_busy *args = data;
3637 struct drm_gem_object *obj;
3638 struct drm_i915_gem_object *obj_priv;
3639
3640 mutex_lock(&dev->struct_mutex);
3641 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3642 if (obj == NULL) {
3643 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
3644 args->handle);
3645 mutex_unlock(&dev->struct_mutex);
3646 return -EBADF;
3647 }
3648
f21289b3
EA
3649 /* Update the active list for the hardware's current position.
3650 * Otherwise this only updates on a delayed timer or when irqs are
3651 * actually unmasked, and our working set ends up being larger than
3652 * required.
3653 */
3654 i915_gem_retire_requests(dev);
3655
673a394b 3656 obj_priv = obj->driver_private;
c4de0a5d
EA
3657 /* Don't count being on the flushing list against the object being
3658 * done. Otherwise, a buffer left on the flushing list but not getting
3659 * flushed (because nobody's flushing that domain) won't ever return
3660 * unbusy and get reused by libdrm's bo cache. The other expected
3661 * consumer of this interface, OpenGL's occlusion queries, also specs
3662 * that the objects get unbusy "eventually" without any interference.
3663 */
3664 args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
673a394b
EA
3665
3666 drm_gem_object_unreference(obj);
3667 mutex_unlock(&dev->struct_mutex);
3668 return 0;
3669}
3670
3671int
3672i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3673 struct drm_file *file_priv)
3674{
3675 return i915_gem_ring_throttle(dev, file_priv);
3676}
3677
3678int i915_gem_init_object(struct drm_gem_object *obj)
3679{
3680 struct drm_i915_gem_object *obj_priv;
3681
3682 obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
3683 if (obj_priv == NULL)
3684 return -ENOMEM;
3685
3686 /*
3687 * We've just allocated pages from the kernel,
3688 * so they've just been written by the CPU with
3689 * zeros. They'll need to be clflushed before we
3690 * use them with the GPU.
3691 */
3692 obj->write_domain = I915_GEM_DOMAIN_CPU;
3693 obj->read_domains = I915_GEM_DOMAIN_CPU;
3694
ba1eb1d8
KP
3695 obj_priv->agp_type = AGP_USER_MEMORY;
3696
673a394b
EA
3697 obj->driver_private = obj_priv;
3698 obj_priv->obj = obj;
de151cf6 3699 obj_priv->fence_reg = I915_FENCE_REG_NONE;
673a394b 3700 INIT_LIST_HEAD(&obj_priv->list);
de151cf6 3701
673a394b
EA
3702 return 0;
3703}
3704
3705void i915_gem_free_object(struct drm_gem_object *obj)
3706{
de151cf6 3707 struct drm_device *dev = obj->dev;
673a394b
EA
3708 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3709
3710 while (obj_priv->pin_count > 0)
3711 i915_gem_object_unpin(obj);
3712
71acb5eb
DA
3713 if (obj_priv->phys_obj)
3714 i915_gem_detach_phys_object(dev, obj);
3715
673a394b
EA
3716 i915_gem_object_unbind(obj);
3717
ab00b3e5 3718 i915_gem_free_mmap_offset(obj);
de151cf6 3719
673a394b 3720 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
280b713b 3721 kfree(obj_priv->bit_17);
673a394b
EA
3722 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
3723}
3724
673a394b
EA
3725/** Unbinds all objects that are on the given buffer list. */
3726static int
3727i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
3728{
3729 struct drm_gem_object *obj;
3730 struct drm_i915_gem_object *obj_priv;
3731 int ret;
3732
3733 while (!list_empty(head)) {
3734 obj_priv = list_first_entry(head,
3735 struct drm_i915_gem_object,
3736 list);
3737 obj = obj_priv->obj;
3738
3739 if (obj_priv->pin_count != 0) {
3740 DRM_ERROR("Pinned object in unbind list\n");
3741 mutex_unlock(&dev->struct_mutex);
3742 return -EINVAL;
3743 }
3744
3745 ret = i915_gem_object_unbind(obj);
3746 if (ret != 0) {
3747 DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
3748 ret);
3749 mutex_unlock(&dev->struct_mutex);
3750 return ret;
3751 }
3752 }
3753
3754
3755 return 0;
3756}
3757
5669fcac 3758int
673a394b
EA
3759i915_gem_idle(struct drm_device *dev)
3760{
3761 drm_i915_private_t *dev_priv = dev->dev_private;
3762 uint32_t seqno, cur_seqno, last_seqno;
3763 int stuck, ret;
3764
6dbe2772
KP
3765 mutex_lock(&dev->struct_mutex);
3766
3767 if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
3768 mutex_unlock(&dev->struct_mutex);
673a394b 3769 return 0;
6dbe2772 3770 }
673a394b
EA
3771
3772 /* Hack! Don't let anybody do execbuf while we don't control the chip.
3773 * We need to replace this with a semaphore, or something.
3774 */
3775 dev_priv->mm.suspended = 1;
3776
6dbe2772
KP
3777 /* Cancel the retire work handler, wait for it to finish if running
3778 */
3779 mutex_unlock(&dev->struct_mutex);
3780 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3781 mutex_lock(&dev->struct_mutex);
3782
673a394b
EA
3783 i915_kernel_lost_context(dev);
3784
3785 /* Flush the GPU along with all non-CPU write domains
3786 */
3787 i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
3788 ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
de151cf6 3789 seqno = i915_add_request(dev, ~I915_GEM_DOMAIN_CPU);
673a394b
EA
3790
3791 if (seqno == 0) {
3792 mutex_unlock(&dev->struct_mutex);
3793 return -ENOMEM;
3794 }
3795
3796 dev_priv->mm.waiting_gem_seqno = seqno;
3797 last_seqno = 0;
3798 stuck = 0;
3799 for (;;) {
3800 cur_seqno = i915_get_gem_seqno(dev);
3801 if (i915_seqno_passed(cur_seqno, seqno))
3802 break;
3803 if (last_seqno == cur_seqno) {
3804 if (stuck++ > 100) {
3805 DRM_ERROR("hardware wedged\n");
3806 dev_priv->mm.wedged = 1;
3807 DRM_WAKEUP(&dev_priv->irq_queue);
3808 break;
3809 }
3810 }
3811 msleep(10);
3812 last_seqno = cur_seqno;
3813 }
3814 dev_priv->mm.waiting_gem_seqno = 0;
3815
3816 i915_gem_retire_requests(dev);
3817
5e118f41 3818 spin_lock(&dev_priv->mm.active_list_lock);
28dfe52a
EA
3819 if (!dev_priv->mm.wedged) {
3820 /* Active and flushing should now be empty as we've
3821 * waited for a sequence higher than any pending execbuffer
3822 */
3823 WARN_ON(!list_empty(&dev_priv->mm.active_list));
3824 WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
3825 /* Request should now be empty as we've also waited
3826 * for the last request in the list
3827 */
3828 WARN_ON(!list_empty(&dev_priv->mm.request_list));
3829 }
673a394b 3830
28dfe52a
EA
3831 /* Empty the active and flushing lists to inactive. If there's
3832 * anything left at this point, it means that we're wedged and
3833 * nothing good's going to happen by leaving them there. So strip
3834 * the GPU domains and just stuff them onto inactive.
673a394b 3835 */
28dfe52a
EA
3836 while (!list_empty(&dev_priv->mm.active_list)) {
3837 struct drm_i915_gem_object *obj_priv;
673a394b 3838
28dfe52a
EA
3839 obj_priv = list_first_entry(&dev_priv->mm.active_list,
3840 struct drm_i915_gem_object,
3841 list);
3842 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
3843 i915_gem_object_move_to_inactive(obj_priv->obj);
3844 }
5e118f41 3845 spin_unlock(&dev_priv->mm.active_list_lock);
28dfe52a
EA
3846
3847 while (!list_empty(&dev_priv->mm.flushing_list)) {
3848 struct drm_i915_gem_object *obj_priv;
3849
151903d5 3850 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
28dfe52a
EA
3851 struct drm_i915_gem_object,
3852 list);
3853 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
3854 i915_gem_object_move_to_inactive(obj_priv->obj);
3855 }
3856
3857
3858 /* Move all inactive buffers out of the GTT. */
673a394b 3859 ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
28dfe52a 3860 WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
6dbe2772
KP
3861 if (ret) {
3862 mutex_unlock(&dev->struct_mutex);
673a394b 3863 return ret;
6dbe2772 3864 }
673a394b 3865
6dbe2772
KP
3866 i915_gem_cleanup_ringbuffer(dev);
3867 mutex_unlock(&dev->struct_mutex);
3868
673a394b
EA
3869 return 0;
3870}
3871
3872static int
3873i915_gem_init_hws(struct drm_device *dev)
3874{
3875 drm_i915_private_t *dev_priv = dev->dev_private;
3876 struct drm_gem_object *obj;
3877 struct drm_i915_gem_object *obj_priv;
3878 int ret;
3879
3880 /* If we need a physical address for the status page, it's already
3881 * initialized at driver load time.
3882 */
3883 if (!I915_NEED_GFX_HWS(dev))
3884 return 0;
3885
3886 obj = drm_gem_object_alloc(dev, 4096);
3887 if (obj == NULL) {
3888 DRM_ERROR("Failed to allocate status page\n");
3889 return -ENOMEM;
3890 }
3891 obj_priv = obj->driver_private;
ba1eb1d8 3892 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
673a394b
EA
3893
3894 ret = i915_gem_object_pin(obj, 4096);
3895 if (ret != 0) {
3896 drm_gem_object_unreference(obj);
3897 return ret;
3898 }
3899
3900 dev_priv->status_gfx_addr = obj_priv->gtt_offset;
673a394b 3901
856fa198 3902 dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
ba1eb1d8 3903 if (dev_priv->hw_status_page == NULL) {
673a394b
EA
3904 DRM_ERROR("Failed to map status page.\n");
3905 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
3eb2ee77 3906 i915_gem_object_unpin(obj);
673a394b
EA
3907 drm_gem_object_unreference(obj);
3908 return -EINVAL;
3909 }
3910 dev_priv->hws_obj = obj;
673a394b
EA
3911 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
3912 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
ba1eb1d8 3913 I915_READ(HWS_PGA); /* posting read */
673a394b
EA
3914 DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
3915
3916 return 0;
3917}
3918
85a7bb98
CW
3919static void
3920i915_gem_cleanup_hws(struct drm_device *dev)
3921{
3922 drm_i915_private_t *dev_priv = dev->dev_private;
bab2d1f6
CW
3923 struct drm_gem_object *obj;
3924 struct drm_i915_gem_object *obj_priv;
85a7bb98
CW
3925
3926 if (dev_priv->hws_obj == NULL)
3927 return;
3928
bab2d1f6
CW
3929 obj = dev_priv->hws_obj;
3930 obj_priv = obj->driver_private;
3931
856fa198 3932 kunmap(obj_priv->pages[0]);
85a7bb98
CW
3933 i915_gem_object_unpin(obj);
3934 drm_gem_object_unreference(obj);
3935 dev_priv->hws_obj = NULL;
bab2d1f6 3936
85a7bb98
CW
3937 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
3938 dev_priv->hw_status_page = NULL;
3939
3940 /* Write high address into HWS_PGA when disabling. */
3941 I915_WRITE(HWS_PGA, 0x1ffff000);
3942}
3943
79e53945 3944int
673a394b
EA
3945i915_gem_init_ringbuffer(struct drm_device *dev)
3946{
3947 drm_i915_private_t *dev_priv = dev->dev_private;
3948 struct drm_gem_object *obj;
3949 struct drm_i915_gem_object *obj_priv;
79e53945 3950 drm_i915_ring_buffer_t *ring = &dev_priv->ring;
673a394b 3951 int ret;
50aa253d 3952 u32 head;
673a394b
EA
3953
3954 ret = i915_gem_init_hws(dev);
3955 if (ret != 0)
3956 return ret;
3957
3958 obj = drm_gem_object_alloc(dev, 128 * 1024);
3959 if (obj == NULL) {
3960 DRM_ERROR("Failed to allocate ringbuffer\n");
85a7bb98 3961 i915_gem_cleanup_hws(dev);
673a394b
EA
3962 return -ENOMEM;
3963 }
3964 obj_priv = obj->driver_private;
3965
3966 ret = i915_gem_object_pin(obj, 4096);
3967 if (ret != 0) {
3968 drm_gem_object_unreference(obj);
85a7bb98 3969 i915_gem_cleanup_hws(dev);
673a394b
EA
3970 return ret;
3971 }
3972
3973 /* Set up the kernel mapping for the ring. */
79e53945
JB
3974 ring->Size = obj->size;
3975 ring->tail_mask = obj->size - 1;
673a394b 3976
79e53945
JB
3977 ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
3978 ring->map.size = obj->size;
3979 ring->map.type = 0;
3980 ring->map.flags = 0;
3981 ring->map.mtrr = 0;
673a394b 3982
79e53945
JB
3983 drm_core_ioremap_wc(&ring->map, dev);
3984 if (ring->map.handle == NULL) {
673a394b
EA
3985 DRM_ERROR("Failed to map ringbuffer.\n");
3986 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
47ed185a 3987 i915_gem_object_unpin(obj);
673a394b 3988 drm_gem_object_unreference(obj);
85a7bb98 3989 i915_gem_cleanup_hws(dev);
673a394b
EA
3990 return -EINVAL;
3991 }
79e53945
JB
3992 ring->ring_obj = obj;
3993 ring->virtual_start = ring->map.handle;
673a394b
EA
3994
3995 /* Stop the ring if it's running. */
3996 I915_WRITE(PRB0_CTL, 0);
673a394b 3997 I915_WRITE(PRB0_TAIL, 0);
50aa253d 3998 I915_WRITE(PRB0_HEAD, 0);
673a394b
EA
3999
4000 /* Initialize the ring. */
4001 I915_WRITE(PRB0_START, obj_priv->gtt_offset);
50aa253d
KP
4002 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4003
4004 /* G45 ring initialization fails to reset head to zero */
4005 if (head != 0) {
4006 DRM_ERROR("Ring head not reset to zero "
4007 "ctl %08x head %08x tail %08x start %08x\n",
4008 I915_READ(PRB0_CTL),
4009 I915_READ(PRB0_HEAD),
4010 I915_READ(PRB0_TAIL),
4011 I915_READ(PRB0_START));
4012 I915_WRITE(PRB0_HEAD, 0);
4013
4014 DRM_ERROR("Ring head forced to zero "
4015 "ctl %08x head %08x tail %08x start %08x\n",
4016 I915_READ(PRB0_CTL),
4017 I915_READ(PRB0_HEAD),
4018 I915_READ(PRB0_TAIL),
4019 I915_READ(PRB0_START));
4020 }
4021
673a394b
EA
4022 I915_WRITE(PRB0_CTL,
4023 ((obj->size - 4096) & RING_NR_PAGES) |
4024 RING_NO_REPORT |
4025 RING_VALID);
4026
50aa253d
KP
4027 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4028
4029 /* If the head is still not zero, the ring is dead */
4030 if (head != 0) {
4031 DRM_ERROR("Ring initialization failed "
4032 "ctl %08x head %08x tail %08x start %08x\n",
4033 I915_READ(PRB0_CTL),
4034 I915_READ(PRB0_HEAD),
4035 I915_READ(PRB0_TAIL),
4036 I915_READ(PRB0_START));
4037 return -EIO;
4038 }
4039
673a394b 4040 /* Update our cache of the ring state */
79e53945
JB
4041 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4042 i915_kernel_lost_context(dev);
4043 else {
4044 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4045 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
4046 ring->space = ring->head - (ring->tail + 8);
4047 if (ring->space < 0)
4048 ring->space += ring->Size;
4049 }
673a394b
EA
4050
4051 return 0;
4052}
4053
79e53945 4054void
673a394b
EA
4055i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4056{
4057 drm_i915_private_t *dev_priv = dev->dev_private;
4058
4059 if (dev_priv->ring.ring_obj == NULL)
4060 return;
4061
4062 drm_core_ioremapfree(&dev_priv->ring.map, dev);
4063
4064 i915_gem_object_unpin(dev_priv->ring.ring_obj);
4065 drm_gem_object_unreference(dev_priv->ring.ring_obj);
4066 dev_priv->ring.ring_obj = NULL;
4067 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
4068
85a7bb98 4069 i915_gem_cleanup_hws(dev);
673a394b
EA
4070}
4071
4072int
4073i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4074 struct drm_file *file_priv)
4075{
4076 drm_i915_private_t *dev_priv = dev->dev_private;
4077 int ret;
4078
79e53945
JB
4079 if (drm_core_check_feature(dev, DRIVER_MODESET))
4080 return 0;
4081
673a394b
EA
4082 if (dev_priv->mm.wedged) {
4083 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4084 dev_priv->mm.wedged = 0;
4085 }
4086
673a394b 4087 mutex_lock(&dev->struct_mutex);
9bb2d6f9
EA
4088 dev_priv->mm.suspended = 0;
4089
4090 ret = i915_gem_init_ringbuffer(dev);
d816f6ac
WF
4091 if (ret != 0) {
4092 mutex_unlock(&dev->struct_mutex);
9bb2d6f9 4093 return ret;
d816f6ac 4094 }
9bb2d6f9 4095
5e118f41 4096 spin_lock(&dev_priv->mm.active_list_lock);
673a394b 4097 BUG_ON(!list_empty(&dev_priv->mm.active_list));
5e118f41
CW
4098 spin_unlock(&dev_priv->mm.active_list_lock);
4099
673a394b
EA
4100 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
4101 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
4102 BUG_ON(!list_empty(&dev_priv->mm.request_list));
673a394b 4103 mutex_unlock(&dev->struct_mutex);
dbb19d30
KH
4104
4105 drm_irq_install(dev);
4106
673a394b
EA
4107 return 0;
4108}
4109
4110int
4111i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4112 struct drm_file *file_priv)
4113{
4114 int ret;
4115
79e53945
JB
4116 if (drm_core_check_feature(dev, DRIVER_MODESET))
4117 return 0;
4118
673a394b 4119 ret = i915_gem_idle(dev);
dbb19d30
KH
4120 drm_irq_uninstall(dev);
4121
6dbe2772 4122 return ret;
673a394b
EA
4123}
4124
4125void
4126i915_gem_lastclose(struct drm_device *dev)
4127{
4128 int ret;
673a394b 4129
e806b495
EA
4130 if (drm_core_check_feature(dev, DRIVER_MODESET))
4131 return;
4132
6dbe2772
KP
4133 ret = i915_gem_idle(dev);
4134 if (ret)
4135 DRM_ERROR("failed to idle hardware: %d\n", ret);
673a394b
EA
4136}
4137
4138void
4139i915_gem_load(struct drm_device *dev)
4140{
4141 drm_i915_private_t *dev_priv = dev->dev_private;
4142
5e118f41 4143 spin_lock_init(&dev_priv->mm.active_list_lock);
673a394b
EA
4144 INIT_LIST_HEAD(&dev_priv->mm.active_list);
4145 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4146 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4147 INIT_LIST_HEAD(&dev_priv->mm.request_list);
4148 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4149 i915_gem_retire_work_handler);
4150 dev_priv->mm.next_gem_seqno = 1;
4151
de151cf6
JB
4152 /* Old X drivers will take 0-2 for front, back, depth buffers */
4153 dev_priv->fence_reg_start = 3;
4154
0f973f27 4155 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
de151cf6
JB
4156 dev_priv->num_fence_regs = 16;
4157 else
4158 dev_priv->num_fence_regs = 8;
4159
673a394b
EA
4160 i915_gem_detect_bit_6_swizzle(dev);
4161}
71acb5eb
DA
4162
4163/*
4164 * Create a physically contiguous memory object for this object
4165 * e.g. for cursor + overlay regs
4166 */
4167int i915_gem_init_phys_object(struct drm_device *dev,
4168 int id, int size)
4169{
4170 drm_i915_private_t *dev_priv = dev->dev_private;
4171 struct drm_i915_gem_phys_object *phys_obj;
4172 int ret;
4173
4174 if (dev_priv->mm.phys_objs[id - 1] || !size)
4175 return 0;
4176
4177 phys_obj = drm_calloc(1, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER);
4178 if (!phys_obj)
4179 return -ENOMEM;
4180
4181 phys_obj->id = id;
4182
4183 phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff);
4184 if (!phys_obj->handle) {
4185 ret = -ENOMEM;
4186 goto kfree_obj;
4187 }
4188#ifdef CONFIG_X86
4189 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4190#endif
4191
4192 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4193
4194 return 0;
4195kfree_obj:
4196 drm_free(phys_obj, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER);
4197 return ret;
4198}
4199
4200void i915_gem_free_phys_object(struct drm_device *dev, int id)
4201{
4202 drm_i915_private_t *dev_priv = dev->dev_private;
4203 struct drm_i915_gem_phys_object *phys_obj;
4204
4205 if (!dev_priv->mm.phys_objs[id - 1])
4206 return;
4207
4208 phys_obj = dev_priv->mm.phys_objs[id - 1];
4209 if (phys_obj->cur_obj) {
4210 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4211 }
4212
4213#ifdef CONFIG_X86
4214 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4215#endif
4216 drm_pci_free(dev, phys_obj->handle);
4217 kfree(phys_obj);
4218 dev_priv->mm.phys_objs[id - 1] = NULL;
4219}
4220
4221void i915_gem_free_all_phys_object(struct drm_device *dev)
4222{
4223 int i;
4224
260883c8 4225 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
71acb5eb
DA
4226 i915_gem_free_phys_object(dev, i);
4227}
4228
4229void i915_gem_detach_phys_object(struct drm_device *dev,
4230 struct drm_gem_object *obj)
4231{
4232 struct drm_i915_gem_object *obj_priv;
4233 int i;
4234 int ret;
4235 int page_count;
4236
4237 obj_priv = obj->driver_private;
4238 if (!obj_priv->phys_obj)
4239 return;
4240
856fa198 4241 ret = i915_gem_object_get_pages(obj);
71acb5eb
DA
4242 if (ret)
4243 goto out;
4244
4245 page_count = obj->size / PAGE_SIZE;
4246
4247 for (i = 0; i < page_count; i++) {
856fa198 4248 char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
71acb5eb
DA
4249 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4250
4251 memcpy(dst, src, PAGE_SIZE);
4252 kunmap_atomic(dst, KM_USER0);
4253 }
856fa198 4254 drm_clflush_pages(obj_priv->pages, page_count);
71acb5eb
DA
4255 drm_agp_chipset_flush(dev);
4256out:
4257 obj_priv->phys_obj->cur_obj = NULL;
4258 obj_priv->phys_obj = NULL;
4259}
4260
4261int
4262i915_gem_attach_phys_object(struct drm_device *dev,
4263 struct drm_gem_object *obj, int id)
4264{
4265 drm_i915_private_t *dev_priv = dev->dev_private;
4266 struct drm_i915_gem_object *obj_priv;
4267 int ret = 0;
4268 int page_count;
4269 int i;
4270
4271 if (id > I915_MAX_PHYS_OBJECT)
4272 return -EINVAL;
4273
4274 obj_priv = obj->driver_private;
4275
4276 if (obj_priv->phys_obj) {
4277 if (obj_priv->phys_obj->id == id)
4278 return 0;
4279 i915_gem_detach_phys_object(dev, obj);
4280 }
4281
4282
4283 /* create a new object */
4284 if (!dev_priv->mm.phys_objs[id - 1]) {
4285 ret = i915_gem_init_phys_object(dev, id,
4286 obj->size);
4287 if (ret) {
aeb565df 4288 DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
71acb5eb
DA
4289 goto out;
4290 }
4291 }
4292
4293 /* bind to the object */
4294 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
4295 obj_priv->phys_obj->cur_obj = obj;
4296
856fa198 4297 ret = i915_gem_object_get_pages(obj);
71acb5eb
DA
4298 if (ret) {
4299 DRM_ERROR("failed to get page list\n");
4300 goto out;
4301 }
4302
4303 page_count = obj->size / PAGE_SIZE;
4304
4305 for (i = 0; i < page_count; i++) {
856fa198 4306 char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
71acb5eb
DA
4307 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4308
4309 memcpy(dst, src, PAGE_SIZE);
4310 kunmap_atomic(src, KM_USER0);
4311 }
4312
4313 return 0;
4314out:
4315 return ret;
4316}
4317
4318static int
4319i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4320 struct drm_i915_gem_pwrite *args,
4321 struct drm_file *file_priv)
4322{
4323 struct drm_i915_gem_object *obj_priv = obj->driver_private;
4324 void *obj_addr;
4325 int ret;
4326 char __user *user_data;
4327
4328 user_data = (char __user *) (uintptr_t) args->data_ptr;
4329 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
4330
e08fb4f6 4331 DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size);
71acb5eb
DA
4332 ret = copy_from_user(obj_addr, user_data, args->size);
4333 if (ret)
4334 return -EFAULT;
4335
4336 drm_agp_chipset_flush(dev);
4337 return 0;
4338}