]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/gpu/drm/ttm/ttm_bo_util.c
drm/ttm: split no_wait argument in 2 GPU or reserve wait
[net-next-2.6.git] / drivers / gpu / drm / ttm / ttm_bo_util.c
CommitLineData
ba4e7d97
TH
1/**************************************************************************
2 *
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#include "ttm/ttm_bo_driver.h"
32#include "ttm/ttm_placement.h"
33#include <linux/io.h>
34#include <linux/highmem.h>
35#include <linux/wait.h>
36#include <linux/vmalloc.h>
ba4e7d97
TH
37#include <linux/module.h>
38
39void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
40{
41 struct ttm_mem_reg *old_mem = &bo->mem;
42
43 if (old_mem->mm_node) {
a987fcaa 44 spin_lock(&bo->glob->lru_lock);
ba4e7d97 45 drm_mm_put_block(old_mem->mm_node);
a987fcaa 46 spin_unlock(&bo->glob->lru_lock);
ba4e7d97
TH
47 }
48 old_mem->mm_node = NULL;
49}
50
51int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
9d87fa21
JG
52 bool evict, bool no_wait_reserve,
53 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
ba4e7d97
TH
54{
55 struct ttm_tt *ttm = bo->ttm;
56 struct ttm_mem_reg *old_mem = &bo->mem;
ba4e7d97
TH
57 int ret;
58
59 if (old_mem->mem_type != TTM_PL_SYSTEM) {
60 ttm_tt_unbind(ttm);
61 ttm_bo_free_old_node(bo);
62 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
63 TTM_PL_MASK_MEM);
64 old_mem->mem_type = TTM_PL_SYSTEM;
ba4e7d97
TH
65 }
66
67 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
68 if (unlikely(ret != 0))
69 return ret;
70
71 if (new_mem->mem_type != TTM_PL_SYSTEM) {
72 ret = ttm_tt_bind(ttm, new_mem);
73 if (unlikely(ret != 0))
74 return ret;
75 }
76
77 *old_mem = *new_mem;
78 new_mem->mm_node = NULL;
110b20c3 79
ba4e7d97
TH
80 return 0;
81}
82EXPORT_SYMBOL(ttm_bo_move_ttm);
83
84int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
85 void **virtual)
86{
87 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
88 unsigned long bus_offset;
89 unsigned long bus_size;
90 unsigned long bus_base;
91 int ret;
92 void *addr;
93
94 *virtual = NULL;
95 ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size);
96 if (ret || bus_size == 0)
97 return ret;
98
99 if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
100 addr = (void *)(((u8 *) man->io_addr) + bus_offset);
101 else {
102 if (mem->placement & TTM_PL_FLAG_WC)
103 addr = ioremap_wc(bus_base + bus_offset, bus_size);
104 else
105 addr = ioremap_nocache(bus_base + bus_offset, bus_size);
106 if (!addr)
107 return -ENOMEM;
108 }
109 *virtual = addr;
110 return 0;
111}
112
113void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
114 void *virtual)
115{
116 struct ttm_mem_type_manager *man;
117
118 man = &bdev->man[mem->mem_type];
119
120 if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
121 iounmap(virtual);
122}
123
124static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
125{
126 uint32_t *dstP =
127 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
128 uint32_t *srcP =
129 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
130
131 int i;
132 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
133 iowrite32(ioread32(srcP++), dstP++);
134 return 0;
135}
136
137static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
542c6f6d
TH
138 unsigned long page,
139 pgprot_t prot)
ba4e7d97
TH
140{
141 struct page *d = ttm_tt_get_page(ttm, page);
142 void *dst;
143
144 if (!d)
145 return -ENOMEM;
146
147 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
542c6f6d
TH
148
149#ifdef CONFIG_X86
150 dst = kmap_atomic_prot(d, KM_USER0, prot);
151#else
6d0897ba 152 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
542c6f6d
TH
153 dst = vmap(&d, 1, 0, prot);
154 else
155 dst = kmap(d);
156#endif
ba4e7d97
TH
157 if (!dst)
158 return -ENOMEM;
159
160 memcpy_fromio(dst, src, PAGE_SIZE);
542c6f6d
TH
161
162#ifdef CONFIG_X86
163 kunmap_atomic(dst, KM_USER0);
164#else
6d0897ba 165 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
542c6f6d
TH
166 vunmap(dst);
167 else
168 kunmap(d);
169#endif
170
ba4e7d97
TH
171 return 0;
172}
173
174static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
542c6f6d
TH
175 unsigned long page,
176 pgprot_t prot)
ba4e7d97
TH
177{
178 struct page *s = ttm_tt_get_page(ttm, page);
179 void *src;
180
181 if (!s)
182 return -ENOMEM;
183
184 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
542c6f6d
TH
185#ifdef CONFIG_X86
186 src = kmap_atomic_prot(s, KM_USER0, prot);
187#else
6d0897ba 188 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
542c6f6d
TH
189 src = vmap(&s, 1, 0, prot);
190 else
191 src = kmap(s);
192#endif
ba4e7d97
TH
193 if (!src)
194 return -ENOMEM;
195
196 memcpy_toio(dst, src, PAGE_SIZE);
542c6f6d
TH
197
198#ifdef CONFIG_X86
199 kunmap_atomic(src, KM_USER0);
200#else
6d0897ba 201 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
542c6f6d
TH
202 vunmap(src);
203 else
204 kunmap(s);
205#endif
206
ba4e7d97
TH
207 return 0;
208}
209
210int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
9d87fa21
JG
211 bool evict, bool no_wait_reserve, bool no_wait_gpu,
212 struct ttm_mem_reg *new_mem)
ba4e7d97
TH
213{
214 struct ttm_bo_device *bdev = bo->bdev;
215 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
216 struct ttm_tt *ttm = bo->ttm;
217 struct ttm_mem_reg *old_mem = &bo->mem;
218 struct ttm_mem_reg old_copy = *old_mem;
219 void *old_iomap;
220 void *new_iomap;
221 int ret;
ba4e7d97
TH
222 unsigned long i;
223 unsigned long page;
224 unsigned long add = 0;
225 int dir;
226
227 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
228 if (ret)
229 return ret;
230 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
231 if (ret)
232 goto out;
233
234 if (old_iomap == NULL && new_iomap == NULL)
235 goto out2;
236 if (old_iomap == NULL && ttm == NULL)
237 goto out2;
238
239 add = 0;
240 dir = 1;
241
242 if ((old_mem->mem_type == new_mem->mem_type) &&
243 (new_mem->mm_node->start <
244 old_mem->mm_node->start + old_mem->mm_node->size)) {
245 dir = -1;
246 add = new_mem->num_pages - 1;
247 }
248
249 for (i = 0; i < new_mem->num_pages; ++i) {
250 page = i * dir + add;
542c6f6d
TH
251 if (old_iomap == NULL) {
252 pgprot_t prot = ttm_io_prot(old_mem->placement,
253 PAGE_KERNEL);
254 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
255 prot);
256 } else if (new_iomap == NULL) {
257 pgprot_t prot = ttm_io_prot(new_mem->placement,
258 PAGE_KERNEL);
259 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
260 prot);
261 } else
ba4e7d97
TH
262 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
263 if (ret)
264 goto out1;
265 }
266 mb();
267out2:
268 ttm_bo_free_old_node(bo);
269
270 *old_mem = *new_mem;
271 new_mem->mm_node = NULL;
ba4e7d97
TH
272
273 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
274 ttm_tt_unbind(ttm);
275 ttm_tt_destroy(ttm);
276 bo->ttm = NULL;
277 }
278
279out1:
280 ttm_mem_reg_iounmap(bdev, new_mem, new_iomap);
281out:
282 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
283 return ret;
284}
285EXPORT_SYMBOL(ttm_bo_move_memcpy);
286
287static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
288{
289 kfree(bo);
290}
291
292/**
293 * ttm_buffer_object_transfer
294 *
295 * @bo: A pointer to a struct ttm_buffer_object.
296 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
297 * holding the data of @bo with the old placement.
298 *
299 * This is a utility function that may be called after an accelerated move
300 * has been scheduled. A new buffer object is created as a placeholder for
301 * the old data while it's being copied. When that buffer object is idle,
302 * it can be destroyed, releasing the space of the old placement.
303 * Returns:
304 * !0: Failure.
305 */
306
307static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
308 struct ttm_buffer_object **new_obj)
309{
310 struct ttm_buffer_object *fbo;
311 struct ttm_bo_device *bdev = bo->bdev;
312 struct ttm_bo_driver *driver = bdev->driver;
313
314 fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
315 if (!fbo)
316 return -ENOMEM;
317
318 *fbo = *bo;
319
320 /**
321 * Fix up members that we shouldn't copy directly:
322 * TODO: Explicit member copy would probably be better here.
323 */
324
325 spin_lock_init(&fbo->lock);
326 init_waitqueue_head(&fbo->event_queue);
327 INIT_LIST_HEAD(&fbo->ddestroy);
328 INIT_LIST_HEAD(&fbo->lru);
329 INIT_LIST_HEAD(&fbo->swap);
330 fbo->vm_node = NULL;
331
332 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
333 if (fbo->mem.mm_node)
334 fbo->mem.mm_node->private = (void *)fbo;
335 kref_init(&fbo->list_kref);
336 kref_init(&fbo->kref);
337 fbo->destroy = &ttm_transfered_destroy;
338
339 *new_obj = fbo;
340 return 0;
341}
342
343pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
344{
345#if defined(__i386__) || defined(__x86_64__)
346 if (caching_flags & TTM_PL_FLAG_WC)
347 tmp = pgprot_writecombine(tmp);
348 else if (boot_cpu_data.x86 > 3)
349 tmp = pgprot_noncached(tmp);
350
351#elif defined(__powerpc__)
352 if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
353 pgprot_val(tmp) |= _PAGE_NO_CACHE;
354 if (caching_flags & TTM_PL_FLAG_UNCACHED)
355 pgprot_val(tmp) |= _PAGE_GUARDED;
356 }
357#endif
358#if defined(__ia64__)
359 if (caching_flags & TTM_PL_FLAG_WC)
360 tmp = pgprot_writecombine(tmp);
361 else
362 tmp = pgprot_noncached(tmp);
363#endif
364#if defined(__sparc__)
365 if (!(caching_flags & TTM_PL_FLAG_CACHED))
366 tmp = pgprot_noncached(tmp);
367#endif
368 return tmp;
369}
4bfd75cb 370EXPORT_SYMBOL(ttm_io_prot);
ba4e7d97
TH
371
372static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
373 unsigned long bus_base,
374 unsigned long bus_offset,
375 unsigned long bus_size,
376 struct ttm_bo_kmap_obj *map)
377{
378 struct ttm_bo_device *bdev = bo->bdev;
379 struct ttm_mem_reg *mem = &bo->mem;
380 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
381
382 if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) {
383 map->bo_kmap_type = ttm_bo_map_premapped;
384 map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);
385 } else {
386 map->bo_kmap_type = ttm_bo_map_iomap;
387 if (mem->placement & TTM_PL_FLAG_WC)
388 map->virtual = ioremap_wc(bus_base + bus_offset,
389 bus_size);
390 else
391 map->virtual = ioremap_nocache(bus_base + bus_offset,
392 bus_size);
393 }
394 return (!map->virtual) ? -ENOMEM : 0;
395}
396
397static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
398 unsigned long start_page,
399 unsigned long num_pages,
400 struct ttm_bo_kmap_obj *map)
401{
402 struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
403 struct ttm_tt *ttm = bo->ttm;
404 struct page *d;
405 int i;
406
407 BUG_ON(!ttm);
408 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
409 /*
410 * We're mapping a single page, and the desired
411 * page protection is consistent with the bo.
412 */
413
414 map->bo_kmap_type = ttm_bo_map_kmap;
415 map->page = ttm_tt_get_page(ttm, start_page);
416 map->virtual = kmap(map->page);
417 } else {
418 /*
419 * Populate the part we're mapping;
420 */
421 for (i = start_page; i < start_page + num_pages; ++i) {
422 d = ttm_tt_get_page(ttm, i);
423 if (!d)
424 return -ENOMEM;
425 }
426
427 /*
428 * We need to use vmap to get the desired page protection
af901ca1 429 * or to make the buffer object look contiguous.
ba4e7d97
TH
430 */
431 prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
432 PAGE_KERNEL :
433 ttm_io_prot(mem->placement, PAGE_KERNEL);
434 map->bo_kmap_type = ttm_bo_map_vmap;
435 map->virtual = vmap(ttm->pages + start_page, num_pages,
436 0, prot);
437 }
438 return (!map->virtual) ? -ENOMEM : 0;
439}
440
441int ttm_bo_kmap(struct ttm_buffer_object *bo,
442 unsigned long start_page, unsigned long num_pages,
443 struct ttm_bo_kmap_obj *map)
444{
445 int ret;
446 unsigned long bus_base;
447 unsigned long bus_offset;
448 unsigned long bus_size;
449
450 BUG_ON(!list_empty(&bo->swap));
451 map->virtual = NULL;
452 if (num_pages > bo->num_pages)
453 return -EINVAL;
454 if (start_page > bo->num_pages)
455 return -EINVAL;
456#if 0
457 if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
458 return -EPERM;
459#endif
460 ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base,
461 &bus_offset, &bus_size);
462 if (ret)
463 return ret;
464 if (bus_size == 0) {
465 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
466 } else {
467 bus_offset += start_page << PAGE_SHIFT;
468 bus_size = num_pages << PAGE_SHIFT;
469 return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
470 }
471}
472EXPORT_SYMBOL(ttm_bo_kmap);
473
474void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
475{
476 if (!map->virtual)
477 return;
478 switch (map->bo_kmap_type) {
479 case ttm_bo_map_iomap:
480 iounmap(map->virtual);
481 break;
482 case ttm_bo_map_vmap:
483 vunmap(map->virtual);
484 break;
485 case ttm_bo_map_kmap:
486 kunmap(map->page);
487 break;
488 case ttm_bo_map_premapped:
489 break;
490 default:
491 BUG();
492 }
493 map->virtual = NULL;
494 map->page = NULL;
495}
496EXPORT_SYMBOL(ttm_bo_kunmap);
497
498int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
499 unsigned long dst_offset,
500 unsigned long *pfn, pgprot_t *prot)
501{
502 struct ttm_mem_reg *mem = &bo->mem;
503 struct ttm_bo_device *bdev = bo->bdev;
504 unsigned long bus_offset;
505 unsigned long bus_size;
506 unsigned long bus_base;
507 int ret;
508 ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset,
509 &bus_size);
510 if (ret)
511 return -EINVAL;
512 if (bus_size != 0)
513 *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
514 else
515 if (!bo->ttm)
516 return -EINVAL;
517 else
518 *pfn = page_to_pfn(ttm_tt_get_page(bo->ttm,
519 dst_offset >>
520 PAGE_SHIFT));
521 *prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
522 PAGE_KERNEL : ttm_io_prot(mem->placement, PAGE_KERNEL);
523
524 return 0;
525}
526
527int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
528 void *sync_obj,
529 void *sync_obj_arg,
9d87fa21
JG
530 bool evict, bool no_wait_reserve,
531 bool no_wait_gpu,
ba4e7d97
TH
532 struct ttm_mem_reg *new_mem)
533{
534 struct ttm_bo_device *bdev = bo->bdev;
535 struct ttm_bo_driver *driver = bdev->driver;
536 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
537 struct ttm_mem_reg *old_mem = &bo->mem;
538 int ret;
ba4e7d97
TH
539 struct ttm_buffer_object *ghost_obj;
540 void *tmp_obj = NULL;
541
542 spin_lock(&bo->lock);
543 if (bo->sync_obj) {
544 tmp_obj = bo->sync_obj;
545 bo->sync_obj = NULL;
546 }
547 bo->sync_obj = driver->sync_obj_ref(sync_obj);
548 bo->sync_obj_arg = sync_obj_arg;
549 if (evict) {
550 ret = ttm_bo_wait(bo, false, false, false);
551 spin_unlock(&bo->lock);
4677f15c
TH
552 if (tmp_obj)
553 driver->sync_obj_unref(&tmp_obj);
ba4e7d97
TH
554 if (ret)
555 return ret;
556
557 ttm_bo_free_old_node(bo);
558 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
559 (bo->ttm != NULL)) {
560 ttm_tt_unbind(bo->ttm);
561 ttm_tt_destroy(bo->ttm);
562 bo->ttm = NULL;
563 }
564 } else {
565 /**
566 * This should help pipeline ordinary buffer moves.
567 *
568 * Hang old buffer memory on a new buffer object,
569 * and leave it to be released when the GPU
570 * operation has completed.
571 */
572
573 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
574 spin_unlock(&bo->lock);
4677f15c
TH
575 if (tmp_obj)
576 driver->sync_obj_unref(&tmp_obj);
ba4e7d97
TH
577
578 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
579 if (ret)
580 return ret;
581
582 /**
583 * If we're not moving to fixed memory, the TTM object
584 * needs to stay alive. Otherwhise hang it on the ghost
585 * bo to be unbound and destroyed.
586 */
587
588 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
589 ghost_obj->ttm = NULL;
590 else
591 bo->ttm = NULL;
592
593 ttm_bo_unreserve(ghost_obj);
594 ttm_bo_unref(&ghost_obj);
595 }
596
597 *old_mem = *new_mem;
598 new_mem->mm_node = NULL;
110b20c3 599
ba4e7d97
TH
600 return 0;
601}
602EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);