]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/gpu/drm/nouveau/nouveau_object.c
drm/nouveau: add instmem flush() hook
[net-next-2.6.git] / drivers / gpu / drm / nouveau / nouveau_object.c
CommitLineData
6ee73861
BS
1/*
2 * Copyright (C) 2006 Ben Skeggs.
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28/*
29 * Authors:
30 * Ben Skeggs <darktama@iinet.net.au>
31 */
32
33#include "drmP.h"
34#include "drm.h"
35#include "nouveau_drv.h"
36#include "nouveau_drm.h"
37
38/* NVidia uses context objects to drive drawing operations.
39
40 Context objects can be selected into 8 subchannels in the FIFO,
41 and then used via DMA command buffers.
42
43 A context object is referenced by a user defined handle (CARD32). The HW
44 looks up graphics objects in a hash table in the instance RAM.
45
46 An entry in the hash table consists of 2 CARD32. The first CARD32 contains
47 the handle, the second one a bitfield, that contains the address of the
48 object in instance RAM.
49
50 The format of the second CARD32 seems to be:
51
52 NV4 to NV30:
53
54 15: 0 instance_addr >> 4
55 17:16 engine (here uses 1 = graphics)
56 28:24 channel id (here uses 0)
57 31 valid (use 1)
58
59 NV40:
60
61 15: 0 instance_addr >> 4 (maybe 19-0)
62 21:20 engine (here uses 1 = graphics)
63 I'm unsure about the other bits, but using 0 seems to work.
64
65 The key into the hash table depends on the object handle and channel id and
66 is given as:
67*/
68static uint32_t
69nouveau_ramht_hash_handle(struct drm_device *dev, int channel, uint32_t handle)
70{
71 struct drm_nouveau_private *dev_priv = dev->dev_private;
72 uint32_t hash = 0;
73 int i;
74
75 NV_DEBUG(dev, "ch%d handle=0x%08x\n", channel, handle);
76
77 for (i = 32; i > 0; i -= dev_priv->ramht_bits) {
78 hash ^= (handle & ((1 << dev_priv->ramht_bits) - 1));
79 handle >>= dev_priv->ramht_bits;
80 }
81
82 if (dev_priv->card_type < NV_50)
83 hash ^= channel << (dev_priv->ramht_bits - 4);
84 hash <<= 3;
85
86 NV_DEBUG(dev, "hash=0x%08x\n", hash);
87 return hash;
88}
89
90static int
91nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht,
92 uint32_t offset)
93{
94 struct drm_nouveau_private *dev_priv = dev->dev_private;
95 uint32_t ctx = nv_ro32(dev, ramht, (offset + 4)/4);
96
97 if (dev_priv->card_type < NV_40)
98 return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0);
99 return (ctx != 0);
100}
101
102static int
103nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
104{
105 struct drm_nouveau_private *dev_priv = dev->dev_private;
106 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
107 struct nouveau_channel *chan = ref->channel;
108 struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
109 uint32_t ctx, co, ho;
110
111 if (!ramht) {
112 NV_ERROR(dev, "No hash table!\n");
113 return -EINVAL;
114 }
115
116 if (dev_priv->card_type < NV_40) {
117 ctx = NV_RAMHT_CONTEXT_VALID | (ref->instance >> 4) |
118 (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
119 (ref->gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
120 } else
121 if (dev_priv->card_type < NV_50) {
122 ctx = (ref->instance >> 4) |
123 (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
124 (ref->gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
125 } else {
126 if (ref->gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
127 ctx = (ref->instance << 10) | 2;
128 } else {
129 ctx = (ref->instance >> 4) |
130 ((ref->gpuobj->engine <<
131 NV40_RAMHT_CONTEXT_ENGINE_SHIFT));
132 }
133 }
134
6ee73861
BS
135 co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
136 do {
137 if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
138 NV_DEBUG(dev,
139 "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
140 chan->id, co, ref->handle, ctx);
141 nv_wo32(dev, ramht, (co + 0)/4, ref->handle);
142 nv_wo32(dev, ramht, (co + 4)/4, ctx);
143
144 list_add_tail(&ref->list, &chan->ramht_refs);
f56cb86f 145 instmem->flush(dev);
6ee73861
BS
146 return 0;
147 }
148 NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n",
149 chan->id, co, nv_ro32(dev, ramht, co/4));
150
151 co += 8;
152 if (co >= dev_priv->ramht_size)
153 co = 0;
154 } while (co != ho);
6ee73861
BS
155
156 NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id);
157 return -ENOMEM;
158}
159
160static void
161nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
162{
163 struct drm_nouveau_private *dev_priv = dev->dev_private;
164 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
165 struct nouveau_channel *chan = ref->channel;
166 struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
167 uint32_t co, ho;
168
169 if (!ramht) {
170 NV_ERROR(dev, "No hash table!\n");
171 return;
172 }
173
6ee73861
BS
174 co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
175 do {
176 if (nouveau_ramht_entry_valid(dev, ramht, co) &&
177 (ref->handle == nv_ro32(dev, ramht, (co/4)))) {
178 NV_DEBUG(dev,
179 "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
180 chan->id, co, ref->handle,
181 nv_ro32(dev, ramht, (co + 4)));
182 nv_wo32(dev, ramht, (co + 0)/4, 0x00000000);
183 nv_wo32(dev, ramht, (co + 4)/4, 0x00000000);
184
185 list_del(&ref->list);
f56cb86f 186 instmem->flush(dev);
6ee73861
BS
187 return;
188 }
189
190 co += 8;
191 if (co >= dev_priv->ramht_size)
192 co = 0;
193 } while (co != ho);
194 list_del(&ref->list);
6ee73861
BS
195
196 NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
197 chan->id, ref->handle);
198}
199
200int
201nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
202 uint32_t size, int align, uint32_t flags,
203 struct nouveau_gpuobj **gpuobj_ret)
204{
205 struct drm_nouveau_private *dev_priv = dev->dev_private;
206 struct nouveau_engine *engine = &dev_priv->engine;
207 struct nouveau_gpuobj *gpuobj;
b833ac26 208 struct drm_mm *pramin = NULL;
6ee73861
BS
209 int ret;
210
211 NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
212 chan ? chan->id : -1, size, align, flags);
213
214 if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL)
215 return -EINVAL;
216
217 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
218 if (!gpuobj)
219 return -ENOMEM;
220 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
221 gpuobj->flags = flags;
222 gpuobj->im_channel = chan;
223
224 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
225
226 /* Choose between global instmem heap, and per-channel private
227 * instmem heap. On <NV50 allow requests for private instmem
228 * to be satisfied from global heap if no per-channel area
229 * available.
230 */
231 if (chan) {
b833ac26 232 if (chan->ramin_heap.ml_entry.next) {
6ee73861 233 NV_DEBUG(dev, "private heap\n");
b833ac26 234 pramin = &chan->ramin_heap;
6ee73861
BS
235 } else
236 if (dev_priv->card_type < NV_50) {
237 NV_DEBUG(dev, "global heap fallback\n");
b833ac26 238 pramin = &dev_priv->ramin_heap;
6ee73861
BS
239 }
240 } else {
241 NV_DEBUG(dev, "global heap\n");
b833ac26 242 pramin = &dev_priv->ramin_heap;
6ee73861
BS
243 }
244
245 if (!pramin) {
246 NV_ERROR(dev, "No PRAMIN heap!\n");
247 return -EINVAL;
248 }
249
250 if (!chan) {
251 ret = engine->instmem.populate(dev, gpuobj, &size);
252 if (ret) {
253 nouveau_gpuobj_del(dev, &gpuobj);
254 return ret;
255 }
256 }
257
258 /* Allocate a chunk of the PRAMIN aperture */
b833ac26
BS
259 gpuobj->im_pramin = drm_mm_search_free(pramin, size, align, 0);
260 if (gpuobj->im_pramin)
261 gpuobj->im_pramin = drm_mm_get_block(gpuobj->im_pramin, size, align);
262
6ee73861
BS
263 if (!gpuobj->im_pramin) {
264 nouveau_gpuobj_del(dev, &gpuobj);
265 return -ENOMEM;
266 }
267
268 if (!chan) {
269 ret = engine->instmem.bind(dev, gpuobj);
270 if (ret) {
271 nouveau_gpuobj_del(dev, &gpuobj);
272 return ret;
273 }
274 }
275
276 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
277 int i;
278
6ee73861
BS
279 for (i = 0; i < gpuobj->im_pramin->size; i += 4)
280 nv_wo32(dev, gpuobj, i/4, 0);
f56cb86f 281 engine->instmem.flush(dev);
6ee73861
BS
282 }
283
284 *gpuobj_ret = gpuobj;
285 return 0;
286}
287
288int
289nouveau_gpuobj_early_init(struct drm_device *dev)
290{
291 struct drm_nouveau_private *dev_priv = dev->dev_private;
292
293 NV_DEBUG(dev, "\n");
294
295 INIT_LIST_HEAD(&dev_priv->gpuobj_list);
296
297 return 0;
298}
299
300int
301nouveau_gpuobj_init(struct drm_device *dev)
302{
303 struct drm_nouveau_private *dev_priv = dev->dev_private;
304 int ret;
305
306 NV_DEBUG(dev, "\n");
307
308 if (dev_priv->card_type < NV_50) {
309 ret = nouveau_gpuobj_new_fake(dev,
310 dev_priv->ramht_offset, ~0, dev_priv->ramht_size,
311 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ALLOW_NO_REFS,
312 &dev_priv->ramht, NULL);
313 if (ret)
314 return ret;
315 }
316
317 return 0;
318}
319
320void
321nouveau_gpuobj_takedown(struct drm_device *dev)
322{
323 struct drm_nouveau_private *dev_priv = dev->dev_private;
324
325 NV_DEBUG(dev, "\n");
326
327 nouveau_gpuobj_del(dev, &dev_priv->ramht);
328}
329
330void
331nouveau_gpuobj_late_takedown(struct drm_device *dev)
332{
333 struct drm_nouveau_private *dev_priv = dev->dev_private;
334 struct nouveau_gpuobj *gpuobj = NULL;
335 struct list_head *entry, *tmp;
336
337 NV_DEBUG(dev, "\n");
338
339 list_for_each_safe(entry, tmp, &dev_priv->gpuobj_list) {
340 gpuobj = list_entry(entry, struct nouveau_gpuobj, list);
341
342 NV_ERROR(dev, "gpuobj %p still exists at takedown, refs=%d\n",
343 gpuobj, gpuobj->refcount);
344 gpuobj->refcount = 0;
345 nouveau_gpuobj_del(dev, &gpuobj);
346 }
347}
348
349int
350nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj)
351{
352 struct drm_nouveau_private *dev_priv = dev->dev_private;
353 struct nouveau_engine *engine = &dev_priv->engine;
354 struct nouveau_gpuobj *gpuobj;
355 int i;
356
357 NV_DEBUG(dev, "gpuobj %p\n", pgpuobj ? *pgpuobj : NULL);
358
359 if (!dev_priv || !pgpuobj || !(*pgpuobj))
360 return -EINVAL;
361 gpuobj = *pgpuobj;
362
363 if (gpuobj->refcount != 0) {
364 NV_ERROR(dev, "gpuobj refcount is %d\n", gpuobj->refcount);
365 return -EINVAL;
366 }
367
368 if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
6ee73861
BS
369 for (i = 0; i < gpuobj->im_pramin->size; i += 4)
370 nv_wo32(dev, gpuobj, i/4, 0);
f56cb86f 371 engine->instmem.flush(dev);
6ee73861
BS
372 }
373
374 if (gpuobj->dtor)
375 gpuobj->dtor(dev, gpuobj);
376
377 if (gpuobj->im_backing && !(gpuobj->flags & NVOBJ_FLAG_FAKE))
378 engine->instmem.clear(dev, gpuobj);
379
380 if (gpuobj->im_pramin) {
381 if (gpuobj->flags & NVOBJ_FLAG_FAKE)
382 kfree(gpuobj->im_pramin);
383 else
b833ac26 384 drm_mm_put_block(gpuobj->im_pramin);
6ee73861
BS
385 }
386
387 list_del(&gpuobj->list);
388
389 *pgpuobj = NULL;
390 kfree(gpuobj);
391 return 0;
392}
393
394static int
395nouveau_gpuobj_instance_get(struct drm_device *dev,
396 struct nouveau_channel *chan,
397 struct nouveau_gpuobj *gpuobj, uint32_t *inst)
398{
399 struct drm_nouveau_private *dev_priv = dev->dev_private;
400 struct nouveau_gpuobj *cpramin;
401
402 /* <NV50 use PRAMIN address everywhere */
403 if (dev_priv->card_type < NV_50) {
404 *inst = gpuobj->im_pramin->start;
405 return 0;
406 }
407
408 if (chan && gpuobj->im_channel != chan) {
409 NV_ERROR(dev, "Channel mismatch: obj %d, ref %d\n",
410 gpuobj->im_channel->id, chan->id);
411 return -EINVAL;
412 }
413
414 /* NV50 channel-local instance */
415 if (chan) {
416 cpramin = chan->ramin->gpuobj;
417 *inst = gpuobj->im_pramin->start - cpramin->im_pramin->start;
418 return 0;
419 }
420
421 /* NV50 global (VRAM) instance */
422 if (!gpuobj->im_channel) {
423 /* ...from global heap */
424 if (!gpuobj->im_backing) {
425 NV_ERROR(dev, "AII, no VRAM backing gpuobj\n");
426 return -EINVAL;
427 }
428 *inst = gpuobj->im_backing_start;
429 return 0;
430 } else {
431 /* ...from local heap */
432 cpramin = gpuobj->im_channel->ramin->gpuobj;
433 *inst = cpramin->im_backing_start +
434 (gpuobj->im_pramin->start - cpramin->im_pramin->start);
435 return 0;
436 }
437
438 return -EINVAL;
439}
440
441int
442nouveau_gpuobj_ref_add(struct drm_device *dev, struct nouveau_channel *chan,
443 uint32_t handle, struct nouveau_gpuobj *gpuobj,
444 struct nouveau_gpuobj_ref **ref_ret)
445{
446 struct drm_nouveau_private *dev_priv = dev->dev_private;
447 struct nouveau_gpuobj_ref *ref;
448 uint32_t instance;
449 int ret;
450
451 NV_DEBUG(dev, "ch%d h=0x%08x gpuobj=%p\n",
452 chan ? chan->id : -1, handle, gpuobj);
453
454 if (!dev_priv || !gpuobj || (ref_ret && *ref_ret != NULL))
455 return -EINVAL;
456
457 if (!chan && !ref_ret)
458 return -EINVAL;
459
460 if (gpuobj->engine == NVOBJ_ENGINE_SW && !gpuobj->im_pramin) {
461 /* sw object */
462 instance = 0x40;
463 } else {
464 ret = nouveau_gpuobj_instance_get(dev, chan, gpuobj, &instance);
465 if (ret)
466 return ret;
467 }
468
469 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
470 if (!ref)
471 return -ENOMEM;
472 INIT_LIST_HEAD(&ref->list);
473 ref->gpuobj = gpuobj;
474 ref->channel = chan;
475 ref->instance = instance;
476
477 if (!ref_ret) {
478 ref->handle = handle;
479
480 ret = nouveau_ramht_insert(dev, ref);
481 if (ret) {
482 kfree(ref);
483 return ret;
484 }
485 } else {
486 ref->handle = ~0;
487 *ref_ret = ref;
488 }
489
490 ref->gpuobj->refcount++;
491 return 0;
492}
493
494int nouveau_gpuobj_ref_del(struct drm_device *dev, struct nouveau_gpuobj_ref **pref)
495{
496 struct nouveau_gpuobj_ref *ref;
497
498 NV_DEBUG(dev, "ref %p\n", pref ? *pref : NULL);
499
500 if (!dev || !pref || *pref == NULL)
501 return -EINVAL;
502 ref = *pref;
503
504 if (ref->handle != ~0)
505 nouveau_ramht_remove(dev, ref);
506
507 if (ref->gpuobj) {
508 ref->gpuobj->refcount--;
509
510 if (ref->gpuobj->refcount == 0) {
511 if (!(ref->gpuobj->flags & NVOBJ_FLAG_ALLOW_NO_REFS))
512 nouveau_gpuobj_del(dev, &ref->gpuobj);
513 }
514 }
515
516 *pref = NULL;
517 kfree(ref);
518 return 0;
519}
520
521int
522nouveau_gpuobj_new_ref(struct drm_device *dev,
523 struct nouveau_channel *oc, struct nouveau_channel *rc,
524 uint32_t handle, uint32_t size, int align,
525 uint32_t flags, struct nouveau_gpuobj_ref **ref)
526{
527 struct nouveau_gpuobj *gpuobj = NULL;
528 int ret;
529
530 ret = nouveau_gpuobj_new(dev, oc, size, align, flags, &gpuobj);
531 if (ret)
532 return ret;
533
534 ret = nouveau_gpuobj_ref_add(dev, rc, handle, gpuobj, ref);
535 if (ret) {
536 nouveau_gpuobj_del(dev, &gpuobj);
537 return ret;
538 }
539
540 return 0;
541}
542
543int
544nouveau_gpuobj_ref_find(struct nouveau_channel *chan, uint32_t handle,
545 struct nouveau_gpuobj_ref **ref_ret)
546{
547 struct nouveau_gpuobj_ref *ref;
548 struct list_head *entry, *tmp;
549
550 list_for_each_safe(entry, tmp, &chan->ramht_refs) {
551 ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
552
553 if (ref->handle == handle) {
554 if (ref_ret)
555 *ref_ret = ref;
556 return 0;
557 }
558 }
559
560 return -EINVAL;
561}
562
563int
564nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset,
565 uint32_t b_offset, uint32_t size,
566 uint32_t flags, struct nouveau_gpuobj **pgpuobj,
567 struct nouveau_gpuobj_ref **pref)
568{
569 struct drm_nouveau_private *dev_priv = dev->dev_private;
570 struct nouveau_gpuobj *gpuobj = NULL;
571 int i;
572
573 NV_DEBUG(dev,
574 "p_offset=0x%08x b_offset=0x%08x size=0x%08x flags=0x%08x\n",
575 p_offset, b_offset, size, flags);
576
577 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
578 if (!gpuobj)
579 return -ENOMEM;
580 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
581 gpuobj->im_channel = NULL;
582 gpuobj->flags = flags | NVOBJ_FLAG_FAKE;
583
584 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
585
586 if (p_offset != ~0) {
b833ac26 587 gpuobj->im_pramin = kzalloc(sizeof(struct drm_mm_node),
6ee73861
BS
588 GFP_KERNEL);
589 if (!gpuobj->im_pramin) {
590 nouveau_gpuobj_del(dev, &gpuobj);
591 return -ENOMEM;
592 }
593 gpuobj->im_pramin->start = p_offset;
594 gpuobj->im_pramin->size = size;
595 }
596
597 if (b_offset != ~0) {
598 gpuobj->im_backing = (struct nouveau_bo *)-1;
599 gpuobj->im_backing_start = b_offset;
600 }
601
602 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
6ee73861
BS
603 for (i = 0; i < gpuobj->im_pramin->size; i += 4)
604 nv_wo32(dev, gpuobj, i/4, 0);
f56cb86f 605 dev_priv->engine.instmem.flush(dev);
6ee73861
BS
606 }
607
608 if (pref) {
609 i = nouveau_gpuobj_ref_add(dev, NULL, 0, gpuobj, pref);
610 if (i) {
611 nouveau_gpuobj_del(dev, &gpuobj);
612 return i;
613 }
614 }
615
616 if (pgpuobj)
617 *pgpuobj = gpuobj;
618 return 0;
619}
620
621
622static uint32_t
623nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
624{
625 struct drm_nouveau_private *dev_priv = dev->dev_private;
626
627 /*XXX: dodgy hack for now */
628 if (dev_priv->card_type >= NV_50)
629 return 24;
630 if (dev_priv->card_type >= NV_40)
631 return 32;
632 return 16;
633}
634
635/*
636 DMA objects are used to reference a piece of memory in the
637 framebuffer, PCI or AGP address space. Each object is 16 bytes big
638 and looks as follows:
639
640 entry[0]
641 11:0 class (seems like I can always use 0 here)
642 12 page table present?
643 13 page entry linear?
644 15:14 access: 0 rw, 1 ro, 2 wo
645 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP
646 31:20 dma adjust (bits 0-11 of the address)
647 entry[1]
648 dma limit (size of transfer)
649 entry[X]
650 1 0 readonly, 1 readwrite
651 31:12 dma frame address of the page (bits 12-31 of the address)
652 entry[N]
653 page table terminator, same value as the first pte, as does nvidia
654 rivatv uses 0xffffffff
655
656 Non linear page tables need a list of frame addresses afterwards,
657 the rivatv project has some info on this.
658
659 The method below creates a DMA object in instance RAM and returns a handle
660 to it that can be used to set up context objects.
661*/
662int
663nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
664 uint64_t offset, uint64_t size, int access,
665 int target, struct nouveau_gpuobj **gpuobj)
666{
667 struct drm_device *dev = chan->dev;
668 struct drm_nouveau_private *dev_priv = dev->dev_private;
669 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
670 int ret;
671
672 NV_DEBUG(dev, "ch%d class=0x%04x offset=0x%llx size=0x%llx\n",
673 chan->id, class, offset, size);
674 NV_DEBUG(dev, "access=%d target=%d\n", access, target);
675
676 switch (target) {
677 case NV_DMA_TARGET_AGP:
678 offset += dev_priv->gart_info.aper_base;
679 break;
680 default:
681 break;
682 }
683
684 ret = nouveau_gpuobj_new(dev, chan,
685 nouveau_gpuobj_class_instmem_size(dev, class),
686 16, NVOBJ_FLAG_ZERO_ALLOC |
687 NVOBJ_FLAG_ZERO_FREE, gpuobj);
688 if (ret) {
689 NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
690 return ret;
691 }
692
6ee73861
BS
693 if (dev_priv->card_type < NV_50) {
694 uint32_t frame, adjust, pte_flags = 0;
695
696 if (access != NV_DMA_ACCESS_RO)
697 pte_flags |= (1<<1);
698 adjust = offset & 0x00000fff;
699 frame = offset & ~0x00000fff;
700
701 nv_wo32(dev, *gpuobj, 0, ((1<<12) | (1<<13) |
702 (adjust << 20) |
703 (access << 14) |
704 (target << 16) |
705 class));
706 nv_wo32(dev, *gpuobj, 1, size - 1);
707 nv_wo32(dev, *gpuobj, 2, frame | pte_flags);
708 nv_wo32(dev, *gpuobj, 3, frame | pte_flags);
709 } else {
710 uint64_t limit = offset + size - 1;
711 uint32_t flags0, flags5;
712
713 if (target == NV_DMA_TARGET_VIDMEM) {
714 flags0 = 0x00190000;
715 flags5 = 0x00010000;
716 } else {
717 flags0 = 0x7fc00000;
718 flags5 = 0x00080000;
719 }
720
721 nv_wo32(dev, *gpuobj, 0, flags0 | class);
722 nv_wo32(dev, *gpuobj, 1, lower_32_bits(limit));
723 nv_wo32(dev, *gpuobj, 2, lower_32_bits(offset));
724 nv_wo32(dev, *gpuobj, 3, ((upper_32_bits(limit) & 0xff) << 24) |
725 (upper_32_bits(offset) & 0xff));
726 nv_wo32(dev, *gpuobj, 5, flags5);
727 }
728
f56cb86f 729 instmem->flush(dev);
6ee73861
BS
730
731 (*gpuobj)->engine = NVOBJ_ENGINE_SW;
732 (*gpuobj)->class = class;
733 return 0;
734}
735
736int
737nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
738 uint64_t offset, uint64_t size, int access,
739 struct nouveau_gpuobj **gpuobj,
740 uint32_t *o_ret)
741{
742 struct drm_device *dev = chan->dev;
743 struct drm_nouveau_private *dev_priv = dev->dev_private;
744 int ret;
745
746 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP ||
747 (dev_priv->card_type >= NV_50 &&
748 dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) {
749 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
750 offset + dev_priv->vm_gart_base,
751 size, access, NV_DMA_TARGET_AGP,
752 gpuobj);
753 if (o_ret)
754 *o_ret = 0;
755 } else
756 if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
757 *gpuobj = dev_priv->gart_info.sg_ctxdma;
758 if (offset & ~0xffffffffULL) {
759 NV_ERROR(dev, "obj offset exceeds 32-bits\n");
760 return -EINVAL;
761 }
762 if (o_ret)
763 *o_ret = (uint32_t)offset;
764 ret = (*gpuobj != NULL) ? 0 : -EINVAL;
765 } else {
766 NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
767 return -EINVAL;
768 }
769
770 return ret;
771}
772
773/* Context objects in the instance RAM have the following structure.
774 * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.
775
776 NV4 - NV30:
777
778 entry[0]
779 11:0 class
780 12 chroma key enable
781 13 user clip enable
782 14 swizzle enable
783 17:15 patch config:
784 scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre
785 18 synchronize enable
786 19 endian: 1 big, 0 little
787 21:20 dither mode
788 23 single step enable
789 24 patch status: 0 invalid, 1 valid
790 25 context_surface 0: 1 valid
791 26 context surface 1: 1 valid
792 27 context pattern: 1 valid
793 28 context rop: 1 valid
794 29,30 context beta, beta4
795 entry[1]
796 7:0 mono format
797 15:8 color format
798 31:16 notify instance address
799 entry[2]
800 15:0 dma 0 instance address
801 31:16 dma 1 instance address
802 entry[3]
803 dma method traps
804
805 NV40:
806 No idea what the exact format is. Here's what can be deducted:
807
808 entry[0]:
809 11:0 class (maybe uses more bits here?)
810 17 user clip enable
811 21:19 patch config
812 25 patch status valid ?
813 entry[1]:
814 15:0 DMA notifier (maybe 20:0)
815 entry[2]:
816 15:0 DMA 0 instance (maybe 20:0)
817 24 big endian
818 entry[3]:
819 15:0 DMA 1 instance (maybe 20:0)
820 entry[4]:
821 entry[5]:
822 set to 0?
823*/
824int
825nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
826 struct nouveau_gpuobj **gpuobj)
827{
828 struct drm_device *dev = chan->dev;
829 struct drm_nouveau_private *dev_priv = dev->dev_private;
830 int ret;
831
832 NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
833
834 ret = nouveau_gpuobj_new(dev, chan,
835 nouveau_gpuobj_class_instmem_size(dev, class),
836 16,
837 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
838 gpuobj);
839 if (ret) {
840 NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
841 return ret;
842 }
843
6ee73861
BS
844 if (dev_priv->card_type >= NV_50) {
845 nv_wo32(dev, *gpuobj, 0, class);
846 nv_wo32(dev, *gpuobj, 5, 0x00010000);
847 } else {
848 switch (class) {
849 case NV_CLASS_NULL:
850 nv_wo32(dev, *gpuobj, 0, 0x00001030);
851 nv_wo32(dev, *gpuobj, 1, 0xFFFFFFFF);
852 break;
853 default:
854 if (dev_priv->card_type >= NV_40) {
855 nv_wo32(dev, *gpuobj, 0, class);
856#ifdef __BIG_ENDIAN
857 nv_wo32(dev, *gpuobj, 2, 0x01000000);
858#endif
859 } else {
860#ifdef __BIG_ENDIAN
861 nv_wo32(dev, *gpuobj, 0, class | 0x00080000);
862#else
863 nv_wo32(dev, *gpuobj, 0, class);
864#endif
865 }
866 }
867 }
f56cb86f 868 dev_priv->engine.instmem.flush(dev);
6ee73861
BS
869
870 (*gpuobj)->engine = NVOBJ_ENGINE_GR;
871 (*gpuobj)->class = class;
872 return 0;
873}
874
f03a314b 875int
6ee73861
BS
876nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
877 struct nouveau_gpuobj **gpuobj_ret)
878{
dd19e44b 879 struct drm_nouveau_private *dev_priv;
6ee73861
BS
880 struct nouveau_gpuobj *gpuobj;
881
882 if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
883 return -EINVAL;
dd19e44b 884 dev_priv = chan->dev->dev_private;
6ee73861
BS
885
886 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
887 if (!gpuobj)
888 return -ENOMEM;
889 gpuobj->engine = NVOBJ_ENGINE_SW;
890 gpuobj->class = class;
891
892 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
893 *gpuobj_ret = gpuobj;
894 return 0;
895}
896
897static int
898nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
899{
900 struct drm_device *dev = chan->dev;
901 struct drm_nouveau_private *dev_priv = dev->dev_private;
902 struct nouveau_gpuobj *pramin = NULL;
903 uint32_t size;
904 uint32_t base;
905 int ret;
906
907 NV_DEBUG(dev, "ch%d\n", chan->id);
908
909 /* Base amount for object storage (4KiB enough?) */
910 size = 0x1000;
911 base = 0;
912
913 /* PGRAPH context */
914
915 if (dev_priv->card_type == NV_50) {
916 /* Various fixed table thingos */
917 size += 0x1400; /* mostly unknown stuff */
918 size += 0x4000; /* vm pd */
919 base = 0x6000;
920 /* RAMHT, not sure about setting size yet, 32KiB to be safe */
921 size += 0x8000;
922 /* RAMFC */
923 size += 0x1000;
924 /* PGRAPH context */
925 size += 0x70000;
926 }
927
928 NV_DEBUG(dev, "ch%d PRAMIN size: 0x%08x bytes, base alloc=0x%08x\n",
929 chan->id, size, base);
930 ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, size, 0x1000, 0,
931 &chan->ramin);
932 if (ret) {
933 NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret);
934 return ret;
935 }
936 pramin = chan->ramin->gpuobj;
937
b833ac26 938 ret = drm_mm_init(&chan->ramin_heap, pramin->im_pramin->start + base, size);
6ee73861
BS
939 if (ret) {
940 NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
941 nouveau_gpuobj_ref_del(dev, &chan->ramin);
942 return ret;
943 }
944
945 return 0;
946}
947
948int
949nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
950 uint32_t vram_h, uint32_t tt_h)
951{
952 struct drm_device *dev = chan->dev;
953 struct drm_nouveau_private *dev_priv = dev->dev_private;
954 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
955 struct nouveau_gpuobj *vram = NULL, *tt = NULL;
956 int ret, i;
957
958 INIT_LIST_HEAD(&chan->ramht_refs);
959
960 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
961
962 /* Reserve a block of PRAMIN for the channel
963 *XXX: maybe on <NV50 too at some point
964 */
965 if (0 || dev_priv->card_type == NV_50) {
966 ret = nouveau_gpuobj_channel_init_pramin(chan);
967 if (ret) {
968 NV_ERROR(dev, "init pramin\n");
969 return ret;
970 }
971 }
972
973 /* NV50 VM
974 * - Allocate per-channel page-directory
975 * - Map GART and VRAM into the channel's address space at the
976 * locations determined during init.
977 */
978 if (dev_priv->card_type >= NV_50) {
979 uint32_t vm_offset, pde;
980
6ee73861
BS
981 vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200;
982 vm_offset += chan->ramin->gpuobj->im_pramin->start;
983
984 ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000,
985 0, &chan->vm_pd, NULL);
f56cb86f 986 if (ret)
6ee73861 987 return ret;
6ee73861
BS
988 for (i = 0; i < 0x4000; i += 8) {
989 nv_wo32(dev, chan->vm_pd, (i+0)/4, 0x00000000);
990 nv_wo32(dev, chan->vm_pd, (i+4)/4, 0xdeadcafe);
991 }
992
993 pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 2;
994 ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
995 dev_priv->gart_info.sg_ctxdma,
996 &chan->vm_gart_pt);
f56cb86f 997 if (ret)
6ee73861 998 return ret;
6ee73861
BS
999 nv_wo32(dev, chan->vm_pd, pde++,
1000 chan->vm_gart_pt->instance | 0x03);
1001 nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
1002
1003 pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 2;
1004 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
1005 ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
1006 dev_priv->vm_vram_pt[i],
1007 &chan->vm_vram_pt[i]);
f56cb86f 1008 if (ret)
6ee73861 1009 return ret;
6ee73861
BS
1010
1011 nv_wo32(dev, chan->vm_pd, pde++,
1012 chan->vm_vram_pt[i]->instance | 0x61);
1013 nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
1014 }
1015
f56cb86f 1016 instmem->flush(dev);
6ee73861
BS
1017 }
1018
1019 /* RAMHT */
1020 if (dev_priv->card_type < NV_50) {
1021 ret = nouveau_gpuobj_ref_add(dev, NULL, 0, dev_priv->ramht,
1022 &chan->ramht);
1023 if (ret)
1024 return ret;
1025 } else {
1026 ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0,
1027 0x8000, 16,
1028 NVOBJ_FLAG_ZERO_ALLOC,
1029 &chan->ramht);
1030 if (ret)
1031 return ret;
1032 }
1033
1034 /* VRAM ctxdma */
1035 if (dev_priv->card_type >= NV_50) {
1036 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
1037 0, dev_priv->vm_end,
1038 NV_DMA_ACCESS_RW,
1039 NV_DMA_TARGET_AGP, &vram);
1040 if (ret) {
1041 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
1042 return ret;
1043 }
1044 } else {
1045 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
1046 0, dev_priv->fb_available_size,
1047 NV_DMA_ACCESS_RW,
1048 NV_DMA_TARGET_VIDMEM, &vram);
1049 if (ret) {
1050 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
1051 return ret;
1052 }
1053 }
1054
1055 ret = nouveau_gpuobj_ref_add(dev, chan, vram_h, vram, NULL);
1056 if (ret) {
1057 NV_ERROR(dev, "Error referencing VRAM ctxdma: %d\n", ret);
1058 return ret;
1059 }
1060
1061 /* TT memory ctxdma */
1062 if (dev_priv->card_type >= NV_50) {
1063 tt = vram;
1064 } else
1065 if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
1066 ret = nouveau_gpuobj_gart_dma_new(chan, 0,
1067 dev_priv->gart_info.aper_size,
1068 NV_DMA_ACCESS_RW, &tt, NULL);
1069 } else {
1070 NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
1071 ret = -EINVAL;
1072 }
1073
1074 if (ret) {
1075 NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret);
1076 return ret;
1077 }
1078
1079 ret = nouveau_gpuobj_ref_add(dev, chan, tt_h, tt, NULL);
1080 if (ret) {
1081 NV_ERROR(dev, "Error referencing TT ctxdma: %d\n", ret);
1082 return ret;
1083 }
1084
1085 return 0;
1086}
1087
1088void
1089nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
1090{
1091 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
1092 struct drm_device *dev = chan->dev;
1093 struct list_head *entry, *tmp;
1094 struct nouveau_gpuobj_ref *ref;
1095 int i;
1096
1097 NV_DEBUG(dev, "ch%d\n", chan->id);
1098
1099 if (!chan->ramht_refs.next)
1100 return;
1101
1102 list_for_each_safe(entry, tmp, &chan->ramht_refs) {
1103 ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
1104
1105 nouveau_gpuobj_ref_del(dev, &ref);
1106 }
1107
1108 nouveau_gpuobj_ref_del(dev, &chan->ramht);
1109
1110 nouveau_gpuobj_del(dev, &chan->vm_pd);
1111 nouveau_gpuobj_ref_del(dev, &chan->vm_gart_pt);
1112 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
1113 nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]);
1114
b833ac26
BS
1115 if (chan->ramin_heap.free_stack.next)
1116 drm_mm_takedown(&chan->ramin_heap);
6ee73861
BS
1117 if (chan->ramin)
1118 nouveau_gpuobj_ref_del(dev, &chan->ramin);
1119
1120}
1121
1122int
1123nouveau_gpuobj_suspend(struct drm_device *dev)
1124{
1125 struct drm_nouveau_private *dev_priv = dev->dev_private;
1126 struct nouveau_gpuobj *gpuobj;
1127 int i;
1128
1129 if (dev_priv->card_type < NV_50) {
1130 dev_priv->susres.ramin_copy = vmalloc(dev_priv->ramin_rsvd_vram);
1131 if (!dev_priv->susres.ramin_copy)
1132 return -ENOMEM;
1133
1134 for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
1135 dev_priv->susres.ramin_copy[i/4] = nv_ri32(dev, i);
1136 return 0;
1137 }
1138
1139 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
1140 if (!gpuobj->im_backing || (gpuobj->flags & NVOBJ_FLAG_FAKE))
1141 continue;
1142
1143 gpuobj->im_backing_suspend = vmalloc(gpuobj->im_pramin->size);
1144 if (!gpuobj->im_backing_suspend) {
1145 nouveau_gpuobj_resume(dev);
1146 return -ENOMEM;
1147 }
1148
6ee73861
BS
1149 for (i = 0; i < gpuobj->im_pramin->size / 4; i++)
1150 gpuobj->im_backing_suspend[i] = nv_ro32(dev, gpuobj, i);
6ee73861
BS
1151 }
1152
1153 return 0;
1154}
1155
1156void
1157nouveau_gpuobj_suspend_cleanup(struct drm_device *dev)
1158{
1159 struct drm_nouveau_private *dev_priv = dev->dev_private;
1160 struct nouveau_gpuobj *gpuobj;
1161
1162 if (dev_priv->card_type < NV_50) {
1163 vfree(dev_priv->susres.ramin_copy);
1164 dev_priv->susres.ramin_copy = NULL;
1165 return;
1166 }
1167
1168 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
1169 if (!gpuobj->im_backing_suspend)
1170 continue;
1171
1172 vfree(gpuobj->im_backing_suspend);
1173 gpuobj->im_backing_suspend = NULL;
1174 }
1175}
1176
1177void
1178nouveau_gpuobj_resume(struct drm_device *dev)
1179{
1180 struct drm_nouveau_private *dev_priv = dev->dev_private;
1181 struct nouveau_gpuobj *gpuobj;
1182 int i;
1183
1184 if (dev_priv->card_type < NV_50) {
1185 for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
1186 nv_wi32(dev, i, dev_priv->susres.ramin_copy[i/4]);
1187 nouveau_gpuobj_suspend_cleanup(dev);
1188 return;
1189 }
1190
1191 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
1192 if (!gpuobj->im_backing_suspend)
1193 continue;
1194
6ee73861
BS
1195 for (i = 0; i < gpuobj->im_pramin->size / 4; i++)
1196 nv_wo32(dev, gpuobj, i, gpuobj->im_backing_suspend[i]);
f56cb86f 1197 dev_priv->engine.instmem.flush(dev);
6ee73861
BS
1198 }
1199
1200 nouveau_gpuobj_suspend_cleanup(dev);
1201}
1202
1203int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
1204 struct drm_file *file_priv)
1205{
1206 struct drm_nouveau_private *dev_priv = dev->dev_private;
1207 struct drm_nouveau_grobj_alloc *init = data;
1208 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
1209 struct nouveau_pgraph_object_class *grc;
1210 struct nouveau_gpuobj *gr = NULL;
1211 struct nouveau_channel *chan;
1212 int ret;
1213
6ee73861
BS
1214 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan);
1215
1216 if (init->handle == ~0)
1217 return -EINVAL;
1218
1219 grc = pgraph->grclass;
1220 while (grc->id) {
1221 if (grc->id == init->class)
1222 break;
1223 grc++;
1224 }
1225
1226 if (!grc->id) {
1227 NV_ERROR(dev, "Illegal object class: 0x%x\n", init->class);
1228 return -EPERM;
1229 }
1230
1231 if (nouveau_gpuobj_ref_find(chan, init->handle, NULL) == 0)
1232 return -EEXIST;
1233
1234 if (!grc->software)
1235 ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr);
1236 else
1237 ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr);
1238
1239 if (ret) {
1240 NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
1241 ret, init->channel, init->handle);
1242 return ret;
1243 }
1244
1245 ret = nouveau_gpuobj_ref_add(dev, chan, init->handle, gr, NULL);
1246 if (ret) {
1247 NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n",
1248 ret, init->channel, init->handle);
1249 nouveau_gpuobj_del(dev, &gr);
1250 return ret;
1251 }
1252
1253 return 0;
1254}
1255
1256int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
1257 struct drm_file *file_priv)
1258{
1259 struct drm_nouveau_gpuobj_free *objfree = data;
1260 struct nouveau_gpuobj_ref *ref;
1261 struct nouveau_channel *chan;
1262 int ret;
1263
6ee73861
BS
1264 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan);
1265
1266 ret = nouveau_gpuobj_ref_find(chan, objfree->handle, &ref);
1267 if (ret)
1268 return ret;
1269 nouveau_gpuobj_ref_del(dev, &ref);
1270
1271 return 0;
1272}