]>
Commit | Line | Data |
---|---|---|
6ee73861 BS |
1 | /* |
2 | * Copyright (C) 2008 Ben Skeggs. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining | |
6 | * a copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sublicense, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice (including the | |
14 | * next paragraph) shall be included in all copies or substantial | |
15 | * portions of the Software. | |
16 | * | |
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | |
20 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | |
21 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | |
22 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | |
23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |
24 | * | |
25 | */ | |
26 | #include "drmP.h" | |
27 | #include "drm.h" | |
28 | ||
29 | #include "nouveau_drv.h" | |
30 | #include "nouveau_drm.h" | |
31 | #include "nouveau_dma.h" | |
32 | ||
33 | #define nouveau_gem_pushbuf_sync(chan) 0 | |
34 | ||
35 | int | |
36 | nouveau_gem_object_new(struct drm_gem_object *gem) | |
37 | { | |
38 | return 0; | |
39 | } | |
40 | ||
41 | void | |
42 | nouveau_gem_object_del(struct drm_gem_object *gem) | |
43 | { | |
44 | struct nouveau_bo *nvbo = gem->driver_private; | |
45 | struct ttm_buffer_object *bo = &nvbo->bo; | |
46 | ||
47 | if (!nvbo) | |
48 | return; | |
49 | nvbo->gem = NULL; | |
50 | ||
51 | if (unlikely(nvbo->cpu_filp)) | |
52 | ttm_bo_synccpu_write_release(bo); | |
53 | ||
54 | if (unlikely(nvbo->pin_refcnt)) { | |
55 | nvbo->pin_refcnt = 1; | |
56 | nouveau_bo_unpin(nvbo); | |
57 | } | |
58 | ||
59 | ttm_bo_unref(&bo); | |
fd632aa3 DV |
60 | |
61 | drm_gem_object_release(gem); | |
62 | kfree(gem); | |
6ee73861 BS |
63 | } |
64 | ||
65 | int | |
66 | nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan, | |
67 | int size, int align, uint32_t flags, uint32_t tile_mode, | |
68 | uint32_t tile_flags, bool no_vm, bool mappable, | |
69 | struct nouveau_bo **pnvbo) | |
70 | { | |
71 | struct nouveau_bo *nvbo; | |
72 | int ret; | |
73 | ||
74 | ret = nouveau_bo_new(dev, chan, size, align, flags, tile_mode, | |
75 | tile_flags, no_vm, mappable, pnvbo); | |
76 | if (ret) | |
77 | return ret; | |
78 | nvbo = *pnvbo; | |
79 | ||
80 | nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size); | |
81 | if (!nvbo->gem) { | |
82 | nouveau_bo_ref(NULL, pnvbo); | |
83 | return -ENOMEM; | |
84 | } | |
85 | ||
86 | nvbo->bo.persistant_swap_storage = nvbo->gem->filp; | |
87 | nvbo->gem->driver_private = nvbo; | |
88 | return 0; | |
89 | } | |
90 | ||
91 | static int | |
92 | nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep) | |
93 | { | |
94 | struct nouveau_bo *nvbo = nouveau_gem_object(gem); | |
95 | ||
96 | if (nvbo->bo.mem.mem_type == TTM_PL_TT) | |
97 | rep->domain = NOUVEAU_GEM_DOMAIN_GART; | |
98 | else | |
99 | rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; | |
100 | ||
101 | rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; | |
102 | rep->offset = nvbo->bo.offset; | |
103 | rep->map_handle = nvbo->mappable ? nvbo->bo.addr_space_offset : 0; | |
104 | rep->tile_mode = nvbo->tile_mode; | |
105 | rep->tile_flags = nvbo->tile_flags; | |
106 | return 0; | |
107 | } | |
108 | ||
109 | static bool | |
110 | nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags) { | |
111 | switch (tile_flags) { | |
112 | case 0x0000: | |
113 | case 0x1800: | |
114 | case 0x2800: | |
115 | case 0x4800: | |
116 | case 0x7000: | |
117 | case 0x7400: | |
118 | case 0x7a00: | |
119 | case 0xe000: | |
120 | break; | |
121 | default: | |
122 | NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags); | |
123 | return false; | |
124 | } | |
125 | ||
126 | return true; | |
127 | } | |
128 | ||
129 | int | |
130 | nouveau_gem_ioctl_new(struct drm_device *dev, void *data, | |
131 | struct drm_file *file_priv) | |
132 | { | |
133 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
134 | struct drm_nouveau_gem_new *req = data; | |
135 | struct nouveau_bo *nvbo = NULL; | |
136 | struct nouveau_channel *chan = NULL; | |
137 | uint32_t flags = 0; | |
138 | int ret = 0; | |
139 | ||
6ee73861 BS |
140 | if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL)) |
141 | dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping; | |
142 | ||
143 | if (req->channel_hint) { | |
144 | NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel_hint, | |
145 | file_priv, chan); | |
146 | } | |
147 | ||
148 | if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM) | |
149 | flags |= TTM_PL_FLAG_VRAM; | |
150 | if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART) | |
151 | flags |= TTM_PL_FLAG_TT; | |
152 | if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU) | |
153 | flags |= TTM_PL_FLAG_SYSTEM; | |
154 | ||
155 | if (!nouveau_gem_tile_flags_valid(dev, req->info.tile_flags)) | |
156 | return -EINVAL; | |
157 | ||
158 | ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags, | |
159 | req->info.tile_mode, req->info.tile_flags, false, | |
160 | (req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE), | |
161 | &nvbo); | |
162 | if (ret) | |
163 | return ret; | |
164 | ||
165 | ret = nouveau_gem_info(nvbo->gem, &req->info); | |
166 | if (ret) | |
167 | goto out; | |
168 | ||
169 | ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); | |
170 | out: | |
bc9025bd | 171 | drm_gem_object_handle_unreference_unlocked(nvbo->gem); |
6ee73861 BS |
172 | |
173 | if (ret) | |
bc9025bd | 174 | drm_gem_object_unreference_unlocked(nvbo->gem); |
6ee73861 BS |
175 | return ret; |
176 | } | |
177 | ||
178 | static int | |
179 | nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains, | |
180 | uint32_t write_domains, uint32_t valid_domains) | |
181 | { | |
182 | struct nouveau_bo *nvbo = gem->driver_private; | |
183 | struct ttm_buffer_object *bo = &nvbo->bo; | |
78ad0f7b FJ |
184 | uint32_t domains = valid_domains & |
185 | (write_domains ? write_domains : read_domains); | |
186 | uint32_t pref_flags = 0, valid_flags = 0; | |
6ee73861 | 187 | |
78ad0f7b | 188 | if (!domains) |
6ee73861 BS |
189 | return -EINVAL; |
190 | ||
78ad0f7b FJ |
191 | if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) |
192 | valid_flags |= TTM_PL_FLAG_VRAM; | |
193 | ||
194 | if (valid_domains & NOUVEAU_GEM_DOMAIN_GART) | |
195 | valid_flags |= TTM_PL_FLAG_TT; | |
196 | ||
197 | if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) && | |
198 | bo->mem.mem_type == TTM_PL_VRAM) | |
199 | pref_flags |= TTM_PL_FLAG_VRAM; | |
200 | ||
201 | else if ((domains & NOUVEAU_GEM_DOMAIN_GART) && | |
202 | bo->mem.mem_type == TTM_PL_TT) | |
203 | pref_flags |= TTM_PL_FLAG_TT; | |
204 | ||
205 | else if (domains & NOUVEAU_GEM_DOMAIN_VRAM) | |
206 | pref_flags |= TTM_PL_FLAG_VRAM; | |
207 | ||
208 | else | |
209 | pref_flags |= TTM_PL_FLAG_TT; | |
210 | ||
211 | nouveau_bo_placement_set(nvbo, pref_flags, valid_flags); | |
6ee73861 | 212 | |
6ee73861 BS |
213 | return 0; |
214 | } | |
215 | ||
216 | struct validate_op { | |
6ee73861 BS |
217 | struct list_head vram_list; |
218 | struct list_head gart_list; | |
219 | struct list_head both_list; | |
220 | }; | |
221 | ||
222 | static void | |
223 | validate_fini_list(struct list_head *list, struct nouveau_fence *fence) | |
224 | { | |
225 | struct list_head *entry, *tmp; | |
226 | struct nouveau_bo *nvbo; | |
227 | ||
228 | list_for_each_safe(entry, tmp, list) { | |
229 | nvbo = list_entry(entry, struct nouveau_bo, entry); | |
230 | if (likely(fence)) { | |
231 | struct nouveau_fence *prev_fence; | |
232 | ||
233 | spin_lock(&nvbo->bo.lock); | |
234 | prev_fence = nvbo->bo.sync_obj; | |
235 | nvbo->bo.sync_obj = nouveau_fence_ref(fence); | |
236 | spin_unlock(&nvbo->bo.lock); | |
237 | nouveau_fence_unref((void *)&prev_fence); | |
238 | } | |
239 | ||
a1606a95 BS |
240 | if (unlikely(nvbo->validate_mapped)) { |
241 | ttm_bo_kunmap(&nvbo->kmap); | |
242 | nvbo->validate_mapped = false; | |
243 | } | |
244 | ||
6ee73861 BS |
245 | list_del(&nvbo->entry); |
246 | nvbo->reserved_by = NULL; | |
247 | ttm_bo_unreserve(&nvbo->bo); | |
248 | drm_gem_object_unreference(nvbo->gem); | |
249 | } | |
250 | } | |
251 | ||
252 | static void | |
234896a7 | 253 | validate_fini(struct validate_op *op, struct nouveau_fence* fence) |
6ee73861 | 254 | { |
234896a7 LB |
255 | validate_fini_list(&op->vram_list, fence); |
256 | validate_fini_list(&op->gart_list, fence); | |
257 | validate_fini_list(&op->both_list, fence); | |
6ee73861 BS |
258 | } |
259 | ||
260 | static int | |
261 | validate_init(struct nouveau_channel *chan, struct drm_file *file_priv, | |
262 | struct drm_nouveau_gem_pushbuf_bo *pbbo, | |
263 | int nr_buffers, struct validate_op *op) | |
264 | { | |
265 | struct drm_device *dev = chan->dev; | |
266 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
267 | uint32_t sequence; | |
268 | int trycnt = 0; | |
269 | int ret, i; | |
270 | ||
271 | sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence); | |
272 | retry: | |
273 | if (++trycnt > 100000) { | |
274 | NV_ERROR(dev, "%s failed and gave up.\n", __func__); | |
275 | return -EINVAL; | |
276 | } | |
277 | ||
278 | for (i = 0; i < nr_buffers; i++) { | |
279 | struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i]; | |
280 | struct drm_gem_object *gem; | |
281 | struct nouveau_bo *nvbo; | |
282 | ||
283 | gem = drm_gem_object_lookup(dev, file_priv, b->handle); | |
284 | if (!gem) { | |
285 | NV_ERROR(dev, "Unknown handle 0x%08x\n", b->handle); | |
286 | validate_fini(op, NULL); | |
bf79cb91 | 287 | return -ENOENT; |
6ee73861 BS |
288 | } |
289 | nvbo = gem->driver_private; | |
290 | ||
291 | if (nvbo->reserved_by && nvbo->reserved_by == file_priv) { | |
292 | NV_ERROR(dev, "multiple instances of buffer %d on " | |
293 | "validation list\n", b->handle); | |
294 | validate_fini(op, NULL); | |
295 | return -EINVAL; | |
296 | } | |
297 | ||
298 | ret = ttm_bo_reserve(&nvbo->bo, false, false, true, sequence); | |
299 | if (ret) { | |
300 | validate_fini(op, NULL); | |
301 | if (ret == -EAGAIN) | |
302 | ret = ttm_bo_wait_unreserved(&nvbo->bo, false); | |
303 | drm_gem_object_unreference(gem); | |
a1606a95 BS |
304 | if (ret) { |
305 | NV_ERROR(dev, "fail reserve\n"); | |
6ee73861 | 306 | return ret; |
a1606a95 | 307 | } |
6ee73861 BS |
308 | goto retry; |
309 | } | |
310 | ||
a1606a95 | 311 | b->user_priv = (uint64_t)(unsigned long)nvbo; |
6ee73861 BS |
312 | nvbo->reserved_by = file_priv; |
313 | nvbo->pbbo_index = i; | |
314 | if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && | |
315 | (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)) | |
316 | list_add_tail(&nvbo->entry, &op->both_list); | |
317 | else | |
318 | if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) | |
319 | list_add_tail(&nvbo->entry, &op->vram_list); | |
320 | else | |
321 | if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART) | |
322 | list_add_tail(&nvbo->entry, &op->gart_list); | |
323 | else { | |
324 | NV_ERROR(dev, "invalid valid domains: 0x%08x\n", | |
325 | b->valid_domains); | |
0208843d | 326 | list_add_tail(&nvbo->entry, &op->both_list); |
6ee73861 BS |
327 | validate_fini(op, NULL); |
328 | return -EINVAL; | |
329 | } | |
330 | ||
331 | if (unlikely(atomic_read(&nvbo->bo.cpu_writers) > 0)) { | |
332 | validate_fini(op, NULL); | |
333 | ||
334 | if (nvbo->cpu_filp == file_priv) { | |
335 | NV_ERROR(dev, "bo %p mapped by process trying " | |
336 | "to validate it!\n", nvbo); | |
337 | return -EINVAL; | |
338 | } | |
339 | ||
340 | ret = ttm_bo_wait_cpu(&nvbo->bo, false); | |
a1606a95 BS |
341 | if (ret) { |
342 | NV_ERROR(dev, "fail wait_cpu\n"); | |
6ee73861 | 343 | return ret; |
a1606a95 | 344 | } |
6ee73861 BS |
345 | goto retry; |
346 | } | |
347 | } | |
348 | ||
349 | return 0; | |
350 | } | |
351 | ||
352 | static int | |
353 | validate_list(struct nouveau_channel *chan, struct list_head *list, | |
354 | struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr) | |
355 | { | |
356 | struct drm_nouveau_gem_pushbuf_bo __user *upbbo = | |
357 | (void __force __user *)(uintptr_t)user_pbbo_ptr; | |
a1606a95 | 358 | struct drm_device *dev = chan->dev; |
6ee73861 BS |
359 | struct nouveau_bo *nvbo; |
360 | int ret, relocs = 0; | |
361 | ||
362 | list_for_each_entry(nvbo, list, entry) { | |
363 | struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; | |
6ee73861 | 364 | |
415e6186 BS |
365 | ret = nouveau_bo_sync_gpu(nvbo, chan); |
366 | if (unlikely(ret)) { | |
367 | NV_ERROR(dev, "fail pre-validate sync\n"); | |
368 | return ret; | |
6ee73861 BS |
369 | } |
370 | ||
371 | ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains, | |
372 | b->write_domains, | |
373 | b->valid_domains); | |
a1606a95 BS |
374 | if (unlikely(ret)) { |
375 | NV_ERROR(dev, "fail set_domain\n"); | |
6ee73861 | 376 | return ret; |
a1606a95 | 377 | } |
6ee73861 | 378 | |
415e6186 | 379 | nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan; |
6ee73861 | 380 | ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, |
9d87fa21 | 381 | false, false, false); |
6ee73861 | 382 | nvbo->channel = NULL; |
a1606a95 BS |
383 | if (unlikely(ret)) { |
384 | NV_ERROR(dev, "fail ttm_validate\n"); | |
6ee73861 | 385 | return ret; |
a1606a95 | 386 | } |
6ee73861 | 387 | |
415e6186 BS |
388 | ret = nouveau_bo_sync_gpu(nvbo, chan); |
389 | if (unlikely(ret)) { | |
390 | NV_ERROR(dev, "fail post-validate sync\n"); | |
391 | return ret; | |
392 | } | |
393 | ||
a1606a95 | 394 | if (nvbo->bo.offset == b->presumed.offset && |
6ee73861 | 395 | ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && |
a1606a95 | 396 | b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || |
6ee73861 | 397 | (nvbo->bo.mem.mem_type == TTM_PL_TT && |
a1606a95 | 398 | b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART))) |
6ee73861 BS |
399 | continue; |
400 | ||
401 | if (nvbo->bo.mem.mem_type == TTM_PL_TT) | |
a1606a95 | 402 | b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART; |
6ee73861 | 403 | else |
a1606a95 BS |
404 | b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM; |
405 | b->presumed.offset = nvbo->bo.offset; | |
406 | b->presumed.valid = 0; | |
6ee73861 BS |
407 | relocs++; |
408 | ||
a1606a95 BS |
409 | if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed, |
410 | &b->presumed, sizeof(b->presumed))) | |
6ee73861 BS |
411 | return -EFAULT; |
412 | } | |
413 | ||
414 | return relocs; | |
415 | } | |
416 | ||
417 | static int | |
418 | nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, | |
419 | struct drm_file *file_priv, | |
420 | struct drm_nouveau_gem_pushbuf_bo *pbbo, | |
421 | uint64_t user_buffers, int nr_buffers, | |
422 | struct validate_op *op, int *apply_relocs) | |
423 | { | |
a1606a95 | 424 | struct drm_device *dev = chan->dev; |
6ee73861 BS |
425 | int ret, relocs = 0; |
426 | ||
427 | INIT_LIST_HEAD(&op->vram_list); | |
428 | INIT_LIST_HEAD(&op->gart_list); | |
429 | INIT_LIST_HEAD(&op->both_list); | |
430 | ||
6ee73861 BS |
431 | if (nr_buffers == 0) |
432 | return 0; | |
433 | ||
434 | ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); | |
a1606a95 BS |
435 | if (unlikely(ret)) { |
436 | NV_ERROR(dev, "validate_init\n"); | |
6ee73861 | 437 | return ret; |
a1606a95 | 438 | } |
6ee73861 BS |
439 | |
440 | ret = validate_list(chan, &op->vram_list, pbbo, user_buffers); | |
441 | if (unlikely(ret < 0)) { | |
a1606a95 | 442 | NV_ERROR(dev, "validate vram_list\n"); |
6ee73861 BS |
443 | validate_fini(op, NULL); |
444 | return ret; | |
445 | } | |
446 | relocs += ret; | |
447 | ||
448 | ret = validate_list(chan, &op->gart_list, pbbo, user_buffers); | |
449 | if (unlikely(ret < 0)) { | |
a1606a95 | 450 | NV_ERROR(dev, "validate gart_list\n"); |
6ee73861 BS |
451 | validate_fini(op, NULL); |
452 | return ret; | |
453 | } | |
454 | relocs += ret; | |
455 | ||
456 | ret = validate_list(chan, &op->both_list, pbbo, user_buffers); | |
457 | if (unlikely(ret < 0)) { | |
a1606a95 | 458 | NV_ERROR(dev, "validate both_list\n"); |
6ee73861 BS |
459 | validate_fini(op, NULL); |
460 | return ret; | |
461 | } | |
462 | relocs += ret; | |
463 | ||
464 | *apply_relocs = relocs; | |
465 | return 0; | |
466 | } | |
467 | ||
468 | static inline void * | |
469 | u_memcpya(uint64_t user, unsigned nmemb, unsigned size) | |
470 | { | |
471 | void *mem; | |
472 | void __user *userptr = (void __force __user *)(uintptr_t)user; | |
473 | ||
474 | mem = kmalloc(nmemb * size, GFP_KERNEL); | |
475 | if (!mem) | |
476 | return ERR_PTR(-ENOMEM); | |
477 | ||
478 | if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) { | |
479 | kfree(mem); | |
480 | return ERR_PTR(-EFAULT); | |
481 | } | |
482 | ||
483 | return mem; | |
484 | } | |
485 | ||
486 | static int | |
a1606a95 BS |
487 | nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev, |
488 | struct drm_nouveau_gem_pushbuf *req, | |
489 | struct drm_nouveau_gem_pushbuf_bo *bo) | |
6ee73861 BS |
490 | { |
491 | struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; | |
12f735b7 LB |
492 | int ret = 0; |
493 | unsigned i; | |
6ee73861 | 494 | |
a1606a95 | 495 | reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc)); |
6ee73861 BS |
496 | if (IS_ERR(reloc)) |
497 | return PTR_ERR(reloc); | |
498 | ||
a1606a95 | 499 | for (i = 0; i < req->nr_relocs; i++) { |
6ee73861 BS |
500 | struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i]; |
501 | struct drm_nouveau_gem_pushbuf_bo *b; | |
a1606a95 | 502 | struct nouveau_bo *nvbo; |
6ee73861 BS |
503 | uint32_t data; |
504 | ||
a1606a95 BS |
505 | if (unlikely(r->bo_index > req->nr_buffers)) { |
506 | NV_ERROR(dev, "reloc bo index invalid\n"); | |
6ee73861 BS |
507 | ret = -EINVAL; |
508 | break; | |
509 | } | |
510 | ||
511 | b = &bo[r->bo_index]; | |
a1606a95 | 512 | if (b->presumed.valid) |
6ee73861 BS |
513 | continue; |
514 | ||
a1606a95 BS |
515 | if (unlikely(r->reloc_bo_index > req->nr_buffers)) { |
516 | NV_ERROR(dev, "reloc container bo index invalid\n"); | |
517 | ret = -EINVAL; | |
518 | break; | |
519 | } | |
520 | nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv; | |
521 | ||
522 | if (unlikely(r->reloc_bo_offset + 4 > | |
523 | nvbo->bo.mem.num_pages << PAGE_SHIFT)) { | |
524 | NV_ERROR(dev, "reloc outside of bo\n"); | |
525 | ret = -EINVAL; | |
526 | break; | |
527 | } | |
528 | ||
529 | if (!nvbo->kmap.virtual) { | |
530 | ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, | |
531 | &nvbo->kmap); | |
532 | if (ret) { | |
533 | NV_ERROR(dev, "failed kmap for reloc\n"); | |
534 | break; | |
535 | } | |
536 | nvbo->validate_mapped = true; | |
537 | } | |
538 | ||
6ee73861 | 539 | if (r->flags & NOUVEAU_GEM_RELOC_LOW) |
a1606a95 | 540 | data = b->presumed.offset + r->data; |
6ee73861 BS |
541 | else |
542 | if (r->flags & NOUVEAU_GEM_RELOC_HIGH) | |
a1606a95 | 543 | data = (b->presumed.offset + r->data) >> 32; |
6ee73861 BS |
544 | else |
545 | data = r->data; | |
546 | ||
547 | if (r->flags & NOUVEAU_GEM_RELOC_OR) { | |
a1606a95 | 548 | if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART) |
6ee73861 BS |
549 | data |= r->tor; |
550 | else | |
551 | data |= r->vor; | |
552 | } | |
553 | ||
a1606a95 BS |
554 | spin_lock(&nvbo->bo.lock); |
555 | ret = ttm_bo_wait(&nvbo->bo, false, false, false); | |
e32b2c88 | 556 | spin_unlock(&nvbo->bo.lock); |
a1606a95 BS |
557 | if (ret) { |
558 | NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret); | |
559 | break; | |
560 | } | |
a1606a95 BS |
561 | |
562 | nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data); | |
6ee73861 BS |
563 | } |
564 | ||
565 | kfree(reloc); | |
566 | return ret; | |
567 | } | |
568 | ||
569 | int | |
570 | nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, | |
571 | struct drm_file *file_priv) | |
572 | { | |
a1606a95 | 573 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
6ee73861 | 574 | struct drm_nouveau_gem_pushbuf *req = data; |
a1606a95 BS |
575 | struct drm_nouveau_gem_pushbuf_push *push; |
576 | struct drm_nouveau_gem_pushbuf_bo *bo; | |
6ee73861 BS |
577 | struct nouveau_channel *chan; |
578 | struct validate_op op; | |
6e86e041 | 579 | struct nouveau_fence *fence = NULL; |
a1606a95 | 580 | int i, j, ret = 0, do_reloc = 0; |
6ee73861 | 581 | |
6ee73861 BS |
582 | NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan); |
583 | ||
a1606a95 BS |
584 | req->vram_available = dev_priv->fb_aper_free; |
585 | req->gart_available = dev_priv->gart_info.aper_free; | |
586 | if (unlikely(req->nr_push == 0)) | |
587 | goto out_next; | |
6ee73861 | 588 | |
a1606a95 BS |
589 | if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) { |
590 | NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n", | |
591 | req->nr_push, NOUVEAU_GEM_MAX_PUSH); | |
592 | return -EINVAL; | |
6ee73861 BS |
593 | } |
594 | ||
a1606a95 BS |
595 | if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) { |
596 | NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n", | |
597 | req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS); | |
598 | return -EINVAL; | |
6ee73861 BS |
599 | } |
600 | ||
a1606a95 BS |
601 | if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) { |
602 | NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n", | |
603 | req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS); | |
6ee73861 BS |
604 | return -EINVAL; |
605 | } | |
606 | ||
a1606a95 BS |
607 | push = u_memcpya(req->push, req->nr_push, sizeof(*push)); |
608 | if (IS_ERR(push)) | |
609 | return PTR_ERR(push); | |
610 | ||
6ee73861 | 611 | bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); |
a1606a95 BS |
612 | if (IS_ERR(bo)) { |
613 | kfree(push); | |
6ee73861 | 614 | return PTR_ERR(bo); |
a1606a95 | 615 | } |
6ee73861 BS |
616 | |
617 | mutex_lock(&dev->struct_mutex); | |
618 | ||
415e6186 BS |
619 | /* Mark push buffers as being used on PFIFO, the validation code |
620 | * will then make sure that if the pushbuf bo moves, that they | |
621 | * happen on the kernel channel, which will in turn cause a sync | |
622 | * to happen before we try and submit the push buffer. | |
623 | */ | |
624 | for (i = 0; i < req->nr_push; i++) { | |
625 | if (push[i].bo_index >= req->nr_buffers) { | |
626 | NV_ERROR(dev, "push %d buffer not in list\n", i); | |
627 | ret = -EINVAL; | |
628 | goto out; | |
629 | } | |
630 | ||
631 | bo[push[i].bo_index].read_domains |= (1 << 31); | |
632 | } | |
633 | ||
6ee73861 BS |
634 | /* Validate buffer list */ |
635 | ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers, | |
636 | req->nr_buffers, &op, &do_reloc); | |
637 | if (ret) { | |
638 | NV_ERROR(dev, "validate: %d\n", ret); | |
639 | goto out; | |
640 | } | |
641 | ||
6ee73861 BS |
642 | /* Apply any relocations that are required */ |
643 | if (do_reloc) { | |
a1606a95 | 644 | ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo); |
6ee73861 | 645 | if (ret) { |
6ee73861 | 646 | NV_ERROR(dev, "reloc apply: %d\n", ret); |
6ee73861 BS |
647 | goto out; |
648 | } | |
6ee73861 | 649 | } |
6ee73861 | 650 | |
9a391ad8 | 651 | if (chan->dma.ib_max) { |
a1606a95 | 652 | ret = nouveau_dma_wait(chan, req->nr_push + 1, 6); |
6ee73861 | 653 | if (ret) { |
9a391ad8 | 654 | NV_INFO(dev, "nv50cal_space: %d\n", ret); |
6ee73861 BS |
655 | goto out; |
656 | } | |
6ee73861 | 657 | |
a1606a95 BS |
658 | for (i = 0; i < req->nr_push; i++) { |
659 | struct nouveau_bo *nvbo = (void *)(unsigned long) | |
660 | bo[push[i].bo_index].user_priv; | |
661 | ||
662 | nv50_dma_push(chan, nvbo, push[i].offset, | |
663 | push[i].length); | |
664 | } | |
9a391ad8 | 665 | } else |
2ccb04ec | 666 | if (dev_priv->card_type >= NV_20) { |
a1606a95 | 667 | ret = RING_SPACE(chan, req->nr_push * 2); |
6ee73861 BS |
668 | if (ret) { |
669 | NV_ERROR(dev, "cal_space: %d\n", ret); | |
670 | goto out; | |
671 | } | |
a1606a95 BS |
672 | |
673 | for (i = 0; i < req->nr_push; i++) { | |
674 | struct nouveau_bo *nvbo = (void *)(unsigned long) | |
675 | bo[push[i].bo_index].user_priv; | |
676 | struct drm_mm_node *mem = nvbo->bo.mem.mm_node; | |
677 | ||
678 | OUT_RING(chan, ((mem->start << PAGE_SHIFT) + | |
679 | push[i].offset) | 2); | |
680 | OUT_RING(chan, 0); | |
681 | } | |
6ee73861 | 682 | } else { |
a1606a95 | 683 | ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS)); |
6ee73861 BS |
684 | if (ret) { |
685 | NV_ERROR(dev, "jmp_space: %d\n", ret); | |
686 | goto out; | |
687 | } | |
6ee73861 | 688 | |
a1606a95 BS |
689 | for (i = 0; i < req->nr_push; i++) { |
690 | struct nouveau_bo *nvbo = (void *)(unsigned long) | |
691 | bo[push[i].bo_index].user_priv; | |
692 | struct drm_mm_node *mem = nvbo->bo.mem.mm_node; | |
693 | uint32_t cmd; | |
694 | ||
695 | cmd = chan->pushbuf_base + ((chan->dma.cur + 2) << 2); | |
696 | cmd |= 0x20000000; | |
697 | if (unlikely(cmd != req->suffix0)) { | |
698 | if (!nvbo->kmap.virtual) { | |
699 | ret = ttm_bo_kmap(&nvbo->bo, 0, | |
700 | nvbo->bo.mem. | |
701 | num_pages, | |
702 | &nvbo->kmap); | |
703 | if (ret) { | |
704 | WIND_RING(chan); | |
705 | goto out; | |
706 | } | |
707 | nvbo->validate_mapped = true; | |
708 | } | |
709 | ||
710 | nouveau_bo_wr32(nvbo, (push[i].offset + | |
711 | push[i].length - 8) / 4, cmd); | |
712 | } | |
713 | ||
714 | OUT_RING(chan, ((mem->start << PAGE_SHIFT) + | |
715 | push[i].offset) | 0x20000000); | |
6ee73861 | 716 | OUT_RING(chan, 0); |
a1606a95 BS |
717 | for (j = 0; j < NOUVEAU_DMA_SKIPS; j++) |
718 | OUT_RING(chan, 0); | |
719 | } | |
6ee73861 BS |
720 | } |
721 | ||
234896a7 | 722 | ret = nouveau_fence_new(chan, &fence, true); |
6ee73861 BS |
723 | if (ret) { |
724 | NV_ERROR(dev, "error fencing pushbuf: %d\n", ret); | |
725 | WIND_RING(chan); | |
726 | goto out; | |
727 | } | |
728 | ||
729 | out: | |
234896a7 LB |
730 | validate_fini(&op, fence); |
731 | nouveau_fence_unref((void**)&fence); | |
6ee73861 BS |
732 | mutex_unlock(&dev->struct_mutex); |
733 | kfree(bo); | |
a1606a95 | 734 | kfree(push); |
6ee73861 BS |
735 | |
736 | out_next: | |
9a391ad8 BS |
737 | if (chan->dma.ib_max) { |
738 | req->suffix0 = 0x00000000; | |
739 | req->suffix1 = 0x00000000; | |
740 | } else | |
2ccb04ec | 741 | if (dev_priv->card_type >= NV_20) { |
6ee73861 BS |
742 | req->suffix0 = 0x00020000; |
743 | req->suffix1 = 0x00000000; | |
744 | } else { | |
745 | req->suffix0 = 0x20000000 | | |
746 | (chan->pushbuf_base + ((chan->dma.cur + 2) << 2)); | |
747 | req->suffix1 = 0x00000000; | |
748 | } | |
749 | ||
750 | return ret; | |
751 | } | |
752 | ||
6ee73861 BS |
753 | static inline uint32_t |
754 | domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain) | |
755 | { | |
756 | uint32_t flags = 0; | |
757 | ||
758 | if (domain & NOUVEAU_GEM_DOMAIN_VRAM) | |
759 | flags |= TTM_PL_FLAG_VRAM; | |
760 | if (domain & NOUVEAU_GEM_DOMAIN_GART) | |
761 | flags |= TTM_PL_FLAG_TT; | |
762 | ||
763 | return flags; | |
764 | } | |
765 | ||
6ee73861 BS |
766 | int |
767 | nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, | |
768 | struct drm_file *file_priv) | |
769 | { | |
770 | struct drm_nouveau_gem_cpu_prep *req = data; | |
771 | struct drm_gem_object *gem; | |
772 | struct nouveau_bo *nvbo; | |
773 | bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT); | |
774 | int ret = -EINVAL; | |
775 | ||
6ee73861 BS |
776 | gem = drm_gem_object_lookup(dev, file_priv, req->handle); |
777 | if (!gem) | |
bf79cb91 | 778 | return -ENOENT; |
6ee73861 BS |
779 | nvbo = nouveau_gem_object(gem); |
780 | ||
781 | if (nvbo->cpu_filp) { | |
782 | if (nvbo->cpu_filp == file_priv) | |
783 | goto out; | |
784 | ||
785 | ret = ttm_bo_wait_cpu(&nvbo->bo, no_wait); | |
6ee73861 BS |
786 | if (ret) |
787 | goto out; | |
788 | } | |
789 | ||
790 | if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) { | |
f0fbe3eb | 791 | spin_lock(&nvbo->bo.lock); |
6ee73861 | 792 | ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait); |
f0fbe3eb | 793 | spin_unlock(&nvbo->bo.lock); |
6ee73861 BS |
794 | } else { |
795 | ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait); | |
6ee73861 BS |
796 | if (ret == 0) |
797 | nvbo->cpu_filp = file_priv; | |
798 | } | |
799 | ||
800 | out: | |
bc9025bd | 801 | drm_gem_object_unreference_unlocked(gem); |
6ee73861 BS |
802 | return ret; |
803 | } | |
804 | ||
805 | int | |
806 | nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data, | |
807 | struct drm_file *file_priv) | |
808 | { | |
809 | struct drm_nouveau_gem_cpu_prep *req = data; | |
810 | struct drm_gem_object *gem; | |
811 | struct nouveau_bo *nvbo; | |
812 | int ret = -EINVAL; | |
813 | ||
6ee73861 BS |
814 | gem = drm_gem_object_lookup(dev, file_priv, req->handle); |
815 | if (!gem) | |
bf79cb91 | 816 | return -ENOENT; |
6ee73861 BS |
817 | nvbo = nouveau_gem_object(gem); |
818 | ||
819 | if (nvbo->cpu_filp != file_priv) | |
820 | goto out; | |
821 | nvbo->cpu_filp = NULL; | |
822 | ||
823 | ttm_bo_synccpu_write_release(&nvbo->bo); | |
824 | ret = 0; | |
825 | ||
826 | out: | |
bc9025bd | 827 | drm_gem_object_unreference_unlocked(gem); |
6ee73861 BS |
828 | return ret; |
829 | } | |
830 | ||
831 | int | |
832 | nouveau_gem_ioctl_info(struct drm_device *dev, void *data, | |
833 | struct drm_file *file_priv) | |
834 | { | |
835 | struct drm_nouveau_gem_info *req = data; | |
836 | struct drm_gem_object *gem; | |
837 | int ret; | |
838 | ||
6ee73861 BS |
839 | gem = drm_gem_object_lookup(dev, file_priv, req->handle); |
840 | if (!gem) | |
bf79cb91 | 841 | return -ENOENT; |
6ee73861 BS |
842 | |
843 | ret = nouveau_gem_info(gem, req); | |
bc9025bd | 844 | drm_gem_object_unreference_unlocked(gem); |
6ee73861 BS |
845 | return ret; |
846 | } | |
847 |