]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/gpu/drm/nouveau/nouveau_gem.c
drm/nouveau: drop drm_global_mutex before sleeping in submission path
[net-next-2.6.git] / drivers / gpu / drm / nouveau / nouveau_gem.c
CommitLineData
6ee73861
BS
1/*
2 * Copyright (C) 2008 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26#include "drmP.h"
27#include "drm.h"
28
29#include "nouveau_drv.h"
30#include "nouveau_drm.h"
31#include "nouveau_dma.h"
32
33#define nouveau_gem_pushbuf_sync(chan) 0
34
35int
36nouveau_gem_object_new(struct drm_gem_object *gem)
37{
38 return 0;
39}
40
41void
42nouveau_gem_object_del(struct drm_gem_object *gem)
43{
44 struct nouveau_bo *nvbo = gem->driver_private;
45 struct ttm_buffer_object *bo = &nvbo->bo;
46
47 if (!nvbo)
48 return;
49 nvbo->gem = NULL;
50
51 if (unlikely(nvbo->cpu_filp))
52 ttm_bo_synccpu_write_release(bo);
53
54 if (unlikely(nvbo->pin_refcnt)) {
55 nvbo->pin_refcnt = 1;
56 nouveau_bo_unpin(nvbo);
57 }
58
59 ttm_bo_unref(&bo);
fd632aa3
DV
60
61 drm_gem_object_release(gem);
62 kfree(gem);
6ee73861
BS
63}
64
65int
66nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan,
67 int size, int align, uint32_t flags, uint32_t tile_mode,
68 uint32_t tile_flags, bool no_vm, bool mappable,
69 struct nouveau_bo **pnvbo)
70{
71 struct nouveau_bo *nvbo;
72 int ret;
73
74 ret = nouveau_bo_new(dev, chan, size, align, flags, tile_mode,
75 tile_flags, no_vm, mappable, pnvbo);
76 if (ret)
77 return ret;
78 nvbo = *pnvbo;
79
80 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
81 if (!nvbo->gem) {
82 nouveau_bo_ref(NULL, pnvbo);
83 return -ENOMEM;
84 }
85
86 nvbo->bo.persistant_swap_storage = nvbo->gem->filp;
87 nvbo->gem->driver_private = nvbo;
88 return 0;
89}
90
91static int
92nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep)
93{
94 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
95
96 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
97 rep->domain = NOUVEAU_GEM_DOMAIN_GART;
98 else
99 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
100
101 rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
102 rep->offset = nvbo->bo.offset;
103 rep->map_handle = nvbo->mappable ? nvbo->bo.addr_space_offset : 0;
104 rep->tile_mode = nvbo->tile_mode;
105 rep->tile_flags = nvbo->tile_flags;
106 return 0;
107}
108
109static bool
110nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags) {
111 switch (tile_flags) {
112 case 0x0000:
113 case 0x1800:
114 case 0x2800:
115 case 0x4800:
116 case 0x7000:
117 case 0x7400:
118 case 0x7a00:
119 case 0xe000:
120 break;
121 default:
122 NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags);
123 return false;
124 }
125
126 return true;
127}
128
129int
130nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
131 struct drm_file *file_priv)
132{
133 struct drm_nouveau_private *dev_priv = dev->dev_private;
134 struct drm_nouveau_gem_new *req = data;
135 struct nouveau_bo *nvbo = NULL;
136 struct nouveau_channel *chan = NULL;
137 uint32_t flags = 0;
138 int ret = 0;
139
6ee73861
BS
140 if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
141 dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
142
143 if (req->channel_hint) {
144 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel_hint,
145 file_priv, chan);
146 }
147
148 if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM)
149 flags |= TTM_PL_FLAG_VRAM;
150 if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART)
151 flags |= TTM_PL_FLAG_TT;
152 if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU)
153 flags |= TTM_PL_FLAG_SYSTEM;
154
155 if (!nouveau_gem_tile_flags_valid(dev, req->info.tile_flags))
156 return -EINVAL;
157
158 ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags,
159 req->info.tile_mode, req->info.tile_flags, false,
160 (req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE),
161 &nvbo);
162 if (ret)
163 return ret;
164
165 ret = nouveau_gem_info(nvbo->gem, &req->info);
166 if (ret)
167 goto out;
168
169 ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
170out:
bc9025bd 171 drm_gem_object_handle_unreference_unlocked(nvbo->gem);
6ee73861
BS
172
173 if (ret)
bc9025bd 174 drm_gem_object_unreference_unlocked(nvbo->gem);
6ee73861
BS
175 return ret;
176}
177
178static int
179nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
180 uint32_t write_domains, uint32_t valid_domains)
181{
182 struct nouveau_bo *nvbo = gem->driver_private;
183 struct ttm_buffer_object *bo = &nvbo->bo;
78ad0f7b
FJ
184 uint32_t domains = valid_domains &
185 (write_domains ? write_domains : read_domains);
186 uint32_t pref_flags = 0, valid_flags = 0;
6ee73861 187
78ad0f7b 188 if (!domains)
6ee73861
BS
189 return -EINVAL;
190
78ad0f7b
FJ
191 if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
192 valid_flags |= TTM_PL_FLAG_VRAM;
193
194 if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
195 valid_flags |= TTM_PL_FLAG_TT;
196
197 if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
198 bo->mem.mem_type == TTM_PL_VRAM)
199 pref_flags |= TTM_PL_FLAG_VRAM;
200
201 else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
202 bo->mem.mem_type == TTM_PL_TT)
203 pref_flags |= TTM_PL_FLAG_TT;
204
205 else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
206 pref_flags |= TTM_PL_FLAG_VRAM;
207
208 else
209 pref_flags |= TTM_PL_FLAG_TT;
210
211 nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
6ee73861 212
6ee73861
BS
213 return 0;
214}
215
216struct validate_op {
6ee73861
BS
217 struct list_head vram_list;
218 struct list_head gart_list;
219 struct list_head both_list;
220};
221
222static void
223validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
224{
225 struct list_head *entry, *tmp;
226 struct nouveau_bo *nvbo;
227
228 list_for_each_safe(entry, tmp, list) {
229 nvbo = list_entry(entry, struct nouveau_bo, entry);
230 if (likely(fence)) {
231 struct nouveau_fence *prev_fence;
232
233 spin_lock(&nvbo->bo.lock);
234 prev_fence = nvbo->bo.sync_obj;
235 nvbo->bo.sync_obj = nouveau_fence_ref(fence);
236 spin_unlock(&nvbo->bo.lock);
237 nouveau_fence_unref((void *)&prev_fence);
238 }
239
a1606a95
BS
240 if (unlikely(nvbo->validate_mapped)) {
241 ttm_bo_kunmap(&nvbo->kmap);
242 nvbo->validate_mapped = false;
243 }
244
6ee73861
BS
245 list_del(&nvbo->entry);
246 nvbo->reserved_by = NULL;
247 ttm_bo_unreserve(&nvbo->bo);
248 drm_gem_object_unreference(nvbo->gem);
249 }
250}
251
252static void
234896a7 253validate_fini(struct validate_op *op, struct nouveau_fence* fence)
6ee73861 254{
234896a7
LB
255 validate_fini_list(&op->vram_list, fence);
256 validate_fini_list(&op->gart_list, fence);
257 validate_fini_list(&op->both_list, fence);
6ee73861
BS
258}
259
260static int
261validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
262 struct drm_nouveau_gem_pushbuf_bo *pbbo,
263 int nr_buffers, struct validate_op *op)
264{
265 struct drm_device *dev = chan->dev;
266 struct drm_nouveau_private *dev_priv = dev->dev_private;
267 uint32_t sequence;
268 int trycnt = 0;
269 int ret, i;
270
271 sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
272retry:
273 if (++trycnt > 100000) {
274 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
275 return -EINVAL;
276 }
277
278 for (i = 0; i < nr_buffers; i++) {
279 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
280 struct drm_gem_object *gem;
281 struct nouveau_bo *nvbo;
282
283 gem = drm_gem_object_lookup(dev, file_priv, b->handle);
284 if (!gem) {
285 NV_ERROR(dev, "Unknown handle 0x%08x\n", b->handle);
286 validate_fini(op, NULL);
287 return -EINVAL;
288 }
289 nvbo = gem->driver_private;
290
291 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
292 NV_ERROR(dev, "multiple instances of buffer %d on "
293 "validation list\n", b->handle);
294 validate_fini(op, NULL);
295 return -EINVAL;
296 }
297
298 ret = ttm_bo_reserve(&nvbo->bo, false, false, true, sequence);
299 if (ret) {
300 validate_fini(op, NULL);
301 if (ret == -EAGAIN)
302 ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
303 drm_gem_object_unreference(gem);
a1606a95
BS
304 if (ret) {
305 NV_ERROR(dev, "fail reserve\n");
6ee73861 306 return ret;
a1606a95 307 }
6ee73861
BS
308 goto retry;
309 }
310
a1606a95 311 b->user_priv = (uint64_t)(unsigned long)nvbo;
6ee73861
BS
312 nvbo->reserved_by = file_priv;
313 nvbo->pbbo_index = i;
314 if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
315 (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
316 list_add_tail(&nvbo->entry, &op->both_list);
317 else
318 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
319 list_add_tail(&nvbo->entry, &op->vram_list);
320 else
321 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
322 list_add_tail(&nvbo->entry, &op->gart_list);
323 else {
324 NV_ERROR(dev, "invalid valid domains: 0x%08x\n",
325 b->valid_domains);
0208843d 326 list_add_tail(&nvbo->entry, &op->both_list);
6ee73861
BS
327 validate_fini(op, NULL);
328 return -EINVAL;
329 }
330
331 if (unlikely(atomic_read(&nvbo->bo.cpu_writers) > 0)) {
332 validate_fini(op, NULL);
333
334 if (nvbo->cpu_filp == file_priv) {
335 NV_ERROR(dev, "bo %p mapped by process trying "
336 "to validate it!\n", nvbo);
337 return -EINVAL;
338 }
339
ab699ec6 340 mutex_unlock(&drm_global_mutex);
6ee73861 341 ret = ttm_bo_wait_cpu(&nvbo->bo, false);
ab699ec6 342 mutex_lock(&drm_global_mutex);
a1606a95
BS
343 if (ret) {
344 NV_ERROR(dev, "fail wait_cpu\n");
6ee73861 345 return ret;
a1606a95 346 }
6ee73861
BS
347 goto retry;
348 }
349 }
350
351 return 0;
352}
353
354static int
355validate_list(struct nouveau_channel *chan, struct list_head *list,
356 struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
357{
358 struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
359 (void __force __user *)(uintptr_t)user_pbbo_ptr;
a1606a95 360 struct drm_device *dev = chan->dev;
6ee73861
BS
361 struct nouveau_bo *nvbo;
362 int ret, relocs = 0;
363
364 list_for_each_entry(nvbo, list, entry) {
365 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
6ee73861 366
415e6186
BS
367 ret = nouveau_bo_sync_gpu(nvbo, chan);
368 if (unlikely(ret)) {
369 NV_ERROR(dev, "fail pre-validate sync\n");
370 return ret;
6ee73861
BS
371 }
372
373 ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
374 b->write_domains,
375 b->valid_domains);
a1606a95
BS
376 if (unlikely(ret)) {
377 NV_ERROR(dev, "fail set_domain\n");
6ee73861 378 return ret;
a1606a95 379 }
6ee73861 380
415e6186 381 nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan;
6ee73861 382 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
9d87fa21 383 false, false, false);
6ee73861 384 nvbo->channel = NULL;
a1606a95
BS
385 if (unlikely(ret)) {
386 NV_ERROR(dev, "fail ttm_validate\n");
6ee73861 387 return ret;
a1606a95 388 }
6ee73861 389
415e6186
BS
390 ret = nouveau_bo_sync_gpu(nvbo, chan);
391 if (unlikely(ret)) {
392 NV_ERROR(dev, "fail post-validate sync\n");
393 return ret;
394 }
395
a1606a95 396 if (nvbo->bo.offset == b->presumed.offset &&
6ee73861 397 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
a1606a95 398 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
6ee73861 399 (nvbo->bo.mem.mem_type == TTM_PL_TT &&
a1606a95 400 b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
6ee73861
BS
401 continue;
402
403 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
a1606a95 404 b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
6ee73861 405 else
a1606a95
BS
406 b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
407 b->presumed.offset = nvbo->bo.offset;
408 b->presumed.valid = 0;
6ee73861
BS
409 relocs++;
410
a1606a95
BS
411 if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
412 &b->presumed, sizeof(b->presumed)))
6ee73861
BS
413 return -EFAULT;
414 }
415
416 return relocs;
417}
418
419static int
420nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
421 struct drm_file *file_priv,
422 struct drm_nouveau_gem_pushbuf_bo *pbbo,
423 uint64_t user_buffers, int nr_buffers,
424 struct validate_op *op, int *apply_relocs)
425{
a1606a95 426 struct drm_device *dev = chan->dev;
6ee73861
BS
427 int ret, relocs = 0;
428
429 INIT_LIST_HEAD(&op->vram_list);
430 INIT_LIST_HEAD(&op->gart_list);
431 INIT_LIST_HEAD(&op->both_list);
432
6ee73861
BS
433 if (nr_buffers == 0)
434 return 0;
435
436 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
a1606a95
BS
437 if (unlikely(ret)) {
438 NV_ERROR(dev, "validate_init\n");
6ee73861 439 return ret;
a1606a95 440 }
6ee73861
BS
441
442 ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
443 if (unlikely(ret < 0)) {
a1606a95 444 NV_ERROR(dev, "validate vram_list\n");
6ee73861
BS
445 validate_fini(op, NULL);
446 return ret;
447 }
448 relocs += ret;
449
450 ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
451 if (unlikely(ret < 0)) {
a1606a95 452 NV_ERROR(dev, "validate gart_list\n");
6ee73861
BS
453 validate_fini(op, NULL);
454 return ret;
455 }
456 relocs += ret;
457
458 ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
459 if (unlikely(ret < 0)) {
a1606a95 460 NV_ERROR(dev, "validate both_list\n");
6ee73861
BS
461 validate_fini(op, NULL);
462 return ret;
463 }
464 relocs += ret;
465
466 *apply_relocs = relocs;
467 return 0;
468}
469
470static inline void *
471u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
472{
473 void *mem;
474 void __user *userptr = (void __force __user *)(uintptr_t)user;
475
476 mem = kmalloc(nmemb * size, GFP_KERNEL);
477 if (!mem)
478 return ERR_PTR(-ENOMEM);
479
480 if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) {
481 kfree(mem);
482 return ERR_PTR(-EFAULT);
483 }
484
485 return mem;
486}
487
488static int
a1606a95
BS
489nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
490 struct drm_nouveau_gem_pushbuf *req,
491 struct drm_nouveau_gem_pushbuf_bo *bo)
6ee73861
BS
492{
493 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
12f735b7
LB
494 int ret = 0;
495 unsigned i;
6ee73861 496
a1606a95 497 reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
6ee73861
BS
498 if (IS_ERR(reloc))
499 return PTR_ERR(reloc);
500
a1606a95 501 for (i = 0; i < req->nr_relocs; i++) {
6ee73861
BS
502 struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
503 struct drm_nouveau_gem_pushbuf_bo *b;
a1606a95 504 struct nouveau_bo *nvbo;
6ee73861
BS
505 uint32_t data;
506
a1606a95
BS
507 if (unlikely(r->bo_index > req->nr_buffers)) {
508 NV_ERROR(dev, "reloc bo index invalid\n");
6ee73861
BS
509 ret = -EINVAL;
510 break;
511 }
512
513 b = &bo[r->bo_index];
a1606a95 514 if (b->presumed.valid)
6ee73861
BS
515 continue;
516
a1606a95
BS
517 if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
518 NV_ERROR(dev, "reloc container bo index invalid\n");
519 ret = -EINVAL;
520 break;
521 }
522 nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
523
524 if (unlikely(r->reloc_bo_offset + 4 >
525 nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
526 NV_ERROR(dev, "reloc outside of bo\n");
527 ret = -EINVAL;
528 break;
529 }
530
531 if (!nvbo->kmap.virtual) {
532 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
533 &nvbo->kmap);
534 if (ret) {
535 NV_ERROR(dev, "failed kmap for reloc\n");
536 break;
537 }
538 nvbo->validate_mapped = true;
539 }
540
6ee73861 541 if (r->flags & NOUVEAU_GEM_RELOC_LOW)
a1606a95 542 data = b->presumed.offset + r->data;
6ee73861
BS
543 else
544 if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
a1606a95 545 data = (b->presumed.offset + r->data) >> 32;
6ee73861
BS
546 else
547 data = r->data;
548
549 if (r->flags & NOUVEAU_GEM_RELOC_OR) {
a1606a95 550 if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
6ee73861
BS
551 data |= r->tor;
552 else
553 data |= r->vor;
554 }
555
a1606a95
BS
556 spin_lock(&nvbo->bo.lock);
557 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
e32b2c88 558 spin_unlock(&nvbo->bo.lock);
a1606a95
BS
559 if (ret) {
560 NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret);
561 break;
562 }
a1606a95
BS
563
564 nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
6ee73861
BS
565 }
566
567 kfree(reloc);
568 return ret;
569}
570
571int
572nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
573 struct drm_file *file_priv)
574{
a1606a95 575 struct drm_nouveau_private *dev_priv = dev->dev_private;
6ee73861 576 struct drm_nouveau_gem_pushbuf *req = data;
a1606a95
BS
577 struct drm_nouveau_gem_pushbuf_push *push;
578 struct drm_nouveau_gem_pushbuf_bo *bo;
6ee73861
BS
579 struct nouveau_channel *chan;
580 struct validate_op op;
6e86e041 581 struct nouveau_fence *fence = NULL;
a1606a95 582 int i, j, ret = 0, do_reloc = 0;
6ee73861 583
6ee73861
BS
584 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
585
a1606a95
BS
586 req->vram_available = dev_priv->fb_aper_free;
587 req->gart_available = dev_priv->gart_info.aper_free;
588 if (unlikely(req->nr_push == 0))
589 goto out_next;
6ee73861 590
a1606a95
BS
591 if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
592 NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n",
593 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
594 return -EINVAL;
6ee73861
BS
595 }
596
a1606a95
BS
597 if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
598 NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n",
599 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
600 return -EINVAL;
6ee73861
BS
601 }
602
a1606a95
BS
603 if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
604 NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n",
605 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
6ee73861
BS
606 return -EINVAL;
607 }
608
a1606a95
BS
609 push = u_memcpya(req->push, req->nr_push, sizeof(*push));
610 if (IS_ERR(push))
611 return PTR_ERR(push);
612
6ee73861 613 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
a1606a95
BS
614 if (IS_ERR(bo)) {
615 kfree(push);
6ee73861 616 return PTR_ERR(bo);
a1606a95 617 }
6ee73861
BS
618
619 mutex_lock(&dev->struct_mutex);
620
415e6186
BS
621 /* Mark push buffers as being used on PFIFO, the validation code
622 * will then make sure that if the pushbuf bo moves, that they
623 * happen on the kernel channel, which will in turn cause a sync
624 * to happen before we try and submit the push buffer.
625 */
626 for (i = 0; i < req->nr_push; i++) {
627 if (push[i].bo_index >= req->nr_buffers) {
628 NV_ERROR(dev, "push %d buffer not in list\n", i);
629 ret = -EINVAL;
630 goto out;
631 }
632
633 bo[push[i].bo_index].read_domains |= (1 << 31);
634 }
635
6ee73861
BS
636 /* Validate buffer list */
637 ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
638 req->nr_buffers, &op, &do_reloc);
639 if (ret) {
640 NV_ERROR(dev, "validate: %d\n", ret);
641 goto out;
642 }
643
6ee73861
BS
644 /* Apply any relocations that are required */
645 if (do_reloc) {
a1606a95 646 ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo);
6ee73861 647 if (ret) {
6ee73861 648 NV_ERROR(dev, "reloc apply: %d\n", ret);
6ee73861
BS
649 goto out;
650 }
6ee73861 651 }
6ee73861 652
9a391ad8 653 if (chan->dma.ib_max) {
a1606a95 654 ret = nouveau_dma_wait(chan, req->nr_push + 1, 6);
6ee73861 655 if (ret) {
9a391ad8 656 NV_INFO(dev, "nv50cal_space: %d\n", ret);
6ee73861
BS
657 goto out;
658 }
6ee73861 659
a1606a95
BS
660 for (i = 0; i < req->nr_push; i++) {
661 struct nouveau_bo *nvbo = (void *)(unsigned long)
662 bo[push[i].bo_index].user_priv;
663
664 nv50_dma_push(chan, nvbo, push[i].offset,
665 push[i].length);
666 }
9a391ad8 667 } else
ee508b82 668 if (dev_priv->chipset >= 0x25) {
a1606a95 669 ret = RING_SPACE(chan, req->nr_push * 2);
6ee73861
BS
670 if (ret) {
671 NV_ERROR(dev, "cal_space: %d\n", ret);
672 goto out;
673 }
a1606a95
BS
674
675 for (i = 0; i < req->nr_push; i++) {
676 struct nouveau_bo *nvbo = (void *)(unsigned long)
677 bo[push[i].bo_index].user_priv;
678 struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
679
680 OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
681 push[i].offset) | 2);
682 OUT_RING(chan, 0);
683 }
6ee73861 684 } else {
a1606a95 685 ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
6ee73861
BS
686 if (ret) {
687 NV_ERROR(dev, "jmp_space: %d\n", ret);
688 goto out;
689 }
6ee73861 690
a1606a95
BS
691 for (i = 0; i < req->nr_push; i++) {
692 struct nouveau_bo *nvbo = (void *)(unsigned long)
693 bo[push[i].bo_index].user_priv;
694 struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
695 uint32_t cmd;
696
697 cmd = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
698 cmd |= 0x20000000;
699 if (unlikely(cmd != req->suffix0)) {
700 if (!nvbo->kmap.virtual) {
701 ret = ttm_bo_kmap(&nvbo->bo, 0,
702 nvbo->bo.mem.
703 num_pages,
704 &nvbo->kmap);
705 if (ret) {
706 WIND_RING(chan);
707 goto out;
708 }
709 nvbo->validate_mapped = true;
710 }
711
712 nouveau_bo_wr32(nvbo, (push[i].offset +
713 push[i].length - 8) / 4, cmd);
714 }
715
716 OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
717 push[i].offset) | 0x20000000);
6ee73861 718 OUT_RING(chan, 0);
a1606a95
BS
719 for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
720 OUT_RING(chan, 0);
721 }
6ee73861
BS
722 }
723
234896a7 724 ret = nouveau_fence_new(chan, &fence, true);
6ee73861
BS
725 if (ret) {
726 NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
727 WIND_RING(chan);
728 goto out;
729 }
730
731out:
234896a7
LB
732 validate_fini(&op, fence);
733 nouveau_fence_unref((void**)&fence);
6ee73861
BS
734 mutex_unlock(&dev->struct_mutex);
735 kfree(bo);
a1606a95 736 kfree(push);
6ee73861
BS
737
738out_next:
9a391ad8
BS
739 if (chan->dma.ib_max) {
740 req->suffix0 = 0x00000000;
741 req->suffix1 = 0x00000000;
742 } else
ee508b82 743 if (dev_priv->chipset >= 0x25) {
6ee73861
BS
744 req->suffix0 = 0x00020000;
745 req->suffix1 = 0x00000000;
746 } else {
747 req->suffix0 = 0x20000000 |
748 (chan->pushbuf_base + ((chan->dma.cur + 2) << 2));
749 req->suffix1 = 0x00000000;
750 }
751
752 return ret;
753}
754
6ee73861
BS
755static inline uint32_t
756domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
757{
758 uint32_t flags = 0;
759
760 if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
761 flags |= TTM_PL_FLAG_VRAM;
762 if (domain & NOUVEAU_GEM_DOMAIN_GART)
763 flags |= TTM_PL_FLAG_TT;
764
765 return flags;
766}
767
6ee73861
BS
768int
769nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
770 struct drm_file *file_priv)
771{
772 struct drm_nouveau_gem_cpu_prep *req = data;
773 struct drm_gem_object *gem;
774 struct nouveau_bo *nvbo;
775 bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
776 int ret = -EINVAL;
777
6ee73861
BS
778 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
779 if (!gem)
780 return ret;
781 nvbo = nouveau_gem_object(gem);
782
783 if (nvbo->cpu_filp) {
784 if (nvbo->cpu_filp == file_priv)
785 goto out;
786
787 ret = ttm_bo_wait_cpu(&nvbo->bo, no_wait);
6ee73861
BS
788 if (ret)
789 goto out;
790 }
791
792 if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) {
f0fbe3eb 793 spin_lock(&nvbo->bo.lock);
6ee73861 794 ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
f0fbe3eb 795 spin_unlock(&nvbo->bo.lock);
6ee73861
BS
796 } else {
797 ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
6ee73861
BS
798 if (ret == 0)
799 nvbo->cpu_filp = file_priv;
800 }
801
802out:
bc9025bd 803 drm_gem_object_unreference_unlocked(gem);
6ee73861
BS
804 return ret;
805}
806
807int
808nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
809 struct drm_file *file_priv)
810{
811 struct drm_nouveau_gem_cpu_prep *req = data;
812 struct drm_gem_object *gem;
813 struct nouveau_bo *nvbo;
814 int ret = -EINVAL;
815
6ee73861
BS
816 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
817 if (!gem)
818 return ret;
819 nvbo = nouveau_gem_object(gem);
820
821 if (nvbo->cpu_filp != file_priv)
822 goto out;
823 nvbo->cpu_filp = NULL;
824
825 ttm_bo_synccpu_write_release(&nvbo->bo);
826 ret = 0;
827
828out:
bc9025bd 829 drm_gem_object_unreference_unlocked(gem);
6ee73861
BS
830 return ret;
831}
832
833int
834nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
835 struct drm_file *file_priv)
836{
837 struct drm_nouveau_gem_info *req = data;
838 struct drm_gem_object *gem;
839 int ret;
840
6ee73861
BS
841 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
842 if (!gem)
843 return -EINVAL;
844
845 ret = nouveau_gem_info(gem, req);
bc9025bd 846 drm_gem_object_unreference_unlocked(gem);
6ee73861
BS
847 return ret;
848}
849