2 * Copyright (C) 2007 Ben Skeggs.
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #include "nouveau_drv.h"
31 struct nv50_fifo_priv {
32 struct nouveau_gpuobj_ref *thingo[2];
36 #define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50)
39 nv50_fifo_init_thingo(struct drm_device *dev)
41 struct drm_nouveau_private *dev_priv = dev->dev_private;
42 struct nv50_fifo_priv *priv = dev_priv->engine.fifo.priv;
43 struct nouveau_gpuobj_ref *cur;
48 cur = priv->thingo[priv->cur_thingo];
49 priv->cur_thingo = !priv->cur_thingo;
51 /* We never schedule channel 0 or 127 */
52 for (i = 1, nr = 0; i < 127; i++) {
53 if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc)
54 nv_wo32(dev, cur->gpuobj, nr++, i);
56 dev_priv->engine.instmem.flush(dev);
58 nv_wr32(dev, 0x32f4, cur->instance >> 12);
59 nv_wr32(dev, 0x32ec, nr);
60 nv_wr32(dev, 0x2500, 0x101);
64 nv50_fifo_channel_enable(struct drm_device *dev, int channel, bool nt)
66 struct drm_nouveau_private *dev_priv = dev->dev_private;
67 struct nouveau_channel *chan = dev_priv->fifos[channel];
70 NV_DEBUG(dev, "ch%d\n", channel);
76 inst = chan->ramfc->instance >> 12;
78 inst = chan->ramfc->instance >> 8;
79 nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel),
80 inst | NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED);
83 nv50_fifo_init_thingo(dev);
88 nv50_fifo_channel_disable(struct drm_device *dev, int channel, bool nt)
90 struct drm_nouveau_private *dev_priv = dev->dev_private;
93 NV_DEBUG(dev, "ch%d, nt=%d\n", channel, nt);
96 inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80;
98 inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84;
99 nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst);
102 nv50_fifo_init_thingo(dev);
106 nv50_fifo_init_reset(struct drm_device *dev)
108 uint32_t pmc_e = NV_PMC_ENABLE_PFIFO;
112 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e);
113 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | pmc_e);
117 nv50_fifo_init_intr(struct drm_device *dev)
121 nv_wr32(dev, NV03_PFIFO_INTR_0, 0xFFFFFFFF);
122 nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
126 nv50_fifo_init_context_table(struct drm_device *dev)
128 struct drm_nouveau_private *dev_priv = dev->dev_private;
133 for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) {
134 if (dev_priv->fifos[i])
135 nv50_fifo_channel_enable(dev, i, true);
137 nv50_fifo_channel_disable(dev, i, true);
140 nv50_fifo_init_thingo(dev);
144 nv50_fifo_init_regs__nv(struct drm_device *dev)
148 nv_wr32(dev, 0x250c, 0x6f3cfc34);
152 nv50_fifo_init_regs(struct drm_device *dev)
156 nv_wr32(dev, 0x2500, 0);
157 nv_wr32(dev, 0x3250, 0);
158 nv_wr32(dev, 0x3220, 0);
159 nv_wr32(dev, 0x3204, 0);
160 nv_wr32(dev, 0x3210, 0);
161 nv_wr32(dev, 0x3270, 0);
163 /* Enable dummy channels setup by nv50_instmem.c */
164 nv50_fifo_channel_enable(dev, 0, true);
165 nv50_fifo_channel_enable(dev, 127, true);
169 nv50_fifo_init(struct drm_device *dev)
171 struct drm_nouveau_private *dev_priv = dev->dev_private;
172 struct nv50_fifo_priv *priv;
177 priv = dev_priv->engine.fifo.priv;
179 priv->cur_thingo = !priv->cur_thingo;
183 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
186 dev_priv->engine.fifo.priv = priv;
188 ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000,
189 NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[0]);
191 NV_ERROR(dev, "error creating thingo0: %d\n", ret);
195 ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000,
196 NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[1]);
198 NV_ERROR(dev, "error creating thingo1: %d\n", ret);
203 nv50_fifo_init_reset(dev);
204 nv50_fifo_init_intr(dev);
205 nv50_fifo_init_context_table(dev);
206 nv50_fifo_init_regs__nv(dev);
207 nv50_fifo_init_regs(dev);
208 dev_priv->engine.fifo.enable(dev);
209 dev_priv->engine.fifo.reassign(dev, true);
215 nv50_fifo_takedown(struct drm_device *dev)
217 struct drm_nouveau_private *dev_priv = dev->dev_private;
218 struct nv50_fifo_priv *priv = dev_priv->engine.fifo.priv;
225 nouveau_gpuobj_ref_del(dev, &priv->thingo[0]);
226 nouveau_gpuobj_ref_del(dev, &priv->thingo[1]);
228 dev_priv->engine.fifo.priv = NULL;
233 nv50_fifo_channel_id(struct drm_device *dev)
235 return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
236 NV50_PFIFO_CACHE1_PUSH1_CHID_MASK;
240 nv50_fifo_create_context(struct nouveau_channel *chan)
242 struct drm_device *dev = chan->dev;
243 struct drm_nouveau_private *dev_priv = dev->dev_private;
244 struct nouveau_gpuobj *ramfc = NULL;
248 NV_DEBUG(dev, "ch%d\n", chan->id);
251 uint32_t ramin_poffset = chan->ramin->gpuobj->im_pramin->start;
252 uint32_t ramin_voffset = chan->ramin->gpuobj->im_backing_start;
254 ret = nouveau_gpuobj_new_fake(dev, ramin_poffset, ramin_voffset,
255 0x100, NVOBJ_FLAG_ZERO_ALLOC |
256 NVOBJ_FLAG_ZERO_FREE, &ramfc,
261 ret = nouveau_gpuobj_new_fake(dev, ramin_poffset + 0x0400,
262 ramin_voffset + 0x0400, 4096,
263 0, NULL, &chan->cache);
267 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 0x100, 256,
268 NVOBJ_FLAG_ZERO_ALLOC |
269 NVOBJ_FLAG_ZERO_FREE,
273 ramfc = chan->ramfc->gpuobj;
275 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 1024,
281 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
283 nv_wo32(dev, ramfc, 0x48/4, chan->pushbuf->instance >> 4);
284 nv_wo32(dev, ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4));
285 nv_wo32(dev, ramfc, 0x44/4, 0x2101ffff);
286 nv_wo32(dev, ramfc, 0x60/4, 0x7fffffff);
287 nv_wo32(dev, ramfc, 0x40/4, 0x00000000);
288 nv_wo32(dev, ramfc, 0x7c/4, 0x30000001);
289 nv_wo32(dev, ramfc, 0x78/4, 0x00000000);
290 nv_wo32(dev, ramfc, 0x3c/4, 0x403f6078);
291 nv_wo32(dev, ramfc, 0x50/4, chan->pushbuf_base +
292 chan->dma.ib_base * 4);
293 nv_wo32(dev, ramfc, 0x54/4, drm_order(chan->dma.ib_max + 1) << 16);
296 nv_wo32(dev, chan->ramin->gpuobj, 0, chan->id);
297 nv_wo32(dev, chan->ramin->gpuobj, 1,
298 chan->ramfc->instance >> 8);
300 nv_wo32(dev, ramfc, 0x88/4, chan->cache->instance >> 10);
301 nv_wo32(dev, ramfc, 0x98/4, chan->ramin->instance >> 12);
304 dev_priv->engine.instmem.flush(dev);
306 ret = nv50_fifo_channel_enable(dev, chan->id, false);
308 NV_ERROR(dev, "error enabling ch%d: %d\n", chan->id, ret);
309 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
310 nouveau_gpuobj_ref_del(dev, &chan->ramfc);
314 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
319 nv50_fifo_destroy_context(struct nouveau_channel *chan)
321 struct drm_device *dev = chan->dev;
322 struct nouveau_gpuobj_ref *ramfc = chan->ramfc;
324 NV_DEBUG(dev, "ch%d\n", chan->id);
326 /* This will ensure the channel is seen as disabled. */
328 nv50_fifo_channel_disable(dev, chan->id, false);
330 /* Dummy channel, also used on ch 127 */
332 nv50_fifo_channel_disable(dev, 127, false);
334 nouveau_gpuobj_ref_del(dev, &ramfc);
335 nouveau_gpuobj_ref_del(dev, &chan->cache);
339 nv50_fifo_load_context(struct nouveau_channel *chan)
341 struct drm_device *dev = chan->dev;
342 struct drm_nouveau_private *dev_priv = dev->dev_private;
343 struct nouveau_gpuobj *ramfc = chan->ramfc->gpuobj;
344 struct nouveau_gpuobj *cache = chan->cache->gpuobj;
347 NV_DEBUG(dev, "ch%d\n", chan->id);
349 nv_wr32(dev, 0x3330, nv_ro32(dev, ramfc, 0x00/4));
350 nv_wr32(dev, 0x3334, nv_ro32(dev, ramfc, 0x04/4));
351 nv_wr32(dev, 0x3240, nv_ro32(dev, ramfc, 0x08/4));
352 nv_wr32(dev, 0x3320, nv_ro32(dev, ramfc, 0x0c/4));
353 nv_wr32(dev, 0x3244, nv_ro32(dev, ramfc, 0x10/4));
354 nv_wr32(dev, 0x3328, nv_ro32(dev, ramfc, 0x14/4));
355 nv_wr32(dev, 0x3368, nv_ro32(dev, ramfc, 0x18/4));
356 nv_wr32(dev, 0x336c, nv_ro32(dev, ramfc, 0x1c/4));
357 nv_wr32(dev, 0x3370, nv_ro32(dev, ramfc, 0x20/4));
358 nv_wr32(dev, 0x3374, nv_ro32(dev, ramfc, 0x24/4));
359 nv_wr32(dev, 0x3378, nv_ro32(dev, ramfc, 0x28/4));
360 nv_wr32(dev, 0x337c, nv_ro32(dev, ramfc, 0x2c/4));
361 nv_wr32(dev, 0x3228, nv_ro32(dev, ramfc, 0x30/4));
362 nv_wr32(dev, 0x3364, nv_ro32(dev, ramfc, 0x34/4));
363 nv_wr32(dev, 0x32a0, nv_ro32(dev, ramfc, 0x38/4));
364 nv_wr32(dev, 0x3224, nv_ro32(dev, ramfc, 0x3c/4));
365 nv_wr32(dev, 0x324c, nv_ro32(dev, ramfc, 0x40/4));
366 nv_wr32(dev, 0x2044, nv_ro32(dev, ramfc, 0x44/4));
367 nv_wr32(dev, 0x322c, nv_ro32(dev, ramfc, 0x48/4));
368 nv_wr32(dev, 0x3234, nv_ro32(dev, ramfc, 0x4c/4));
369 nv_wr32(dev, 0x3340, nv_ro32(dev, ramfc, 0x50/4));
370 nv_wr32(dev, 0x3344, nv_ro32(dev, ramfc, 0x54/4));
371 nv_wr32(dev, 0x3280, nv_ro32(dev, ramfc, 0x58/4));
372 nv_wr32(dev, 0x3254, nv_ro32(dev, ramfc, 0x5c/4));
373 nv_wr32(dev, 0x3260, nv_ro32(dev, ramfc, 0x60/4));
374 nv_wr32(dev, 0x3264, nv_ro32(dev, ramfc, 0x64/4));
375 nv_wr32(dev, 0x3268, nv_ro32(dev, ramfc, 0x68/4));
376 nv_wr32(dev, 0x326c, nv_ro32(dev, ramfc, 0x6c/4));
377 nv_wr32(dev, 0x32e4, nv_ro32(dev, ramfc, 0x70/4));
378 nv_wr32(dev, 0x3248, nv_ro32(dev, ramfc, 0x74/4));
379 nv_wr32(dev, 0x2088, nv_ro32(dev, ramfc, 0x78/4));
380 nv_wr32(dev, 0x2058, nv_ro32(dev, ramfc, 0x7c/4));
381 nv_wr32(dev, 0x2210, nv_ro32(dev, ramfc, 0x80/4));
383 cnt = nv_ro32(dev, ramfc, 0x84/4);
384 for (ptr = 0; ptr < cnt; ptr++) {
385 nv_wr32(dev, NV40_PFIFO_CACHE1_METHOD(ptr),
386 nv_ro32(dev, cache, (ptr * 2) + 0));
387 nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr),
388 nv_ro32(dev, cache, (ptr * 2) + 1));
390 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, cnt << 2);
391 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
393 /* guessing that all the 0x34xx regs aren't on NV50 */
395 nv_wr32(dev, 0x340c, nv_ro32(dev, ramfc, 0x88/4));
396 nv_wr32(dev, 0x3400, nv_ro32(dev, ramfc, 0x8c/4));
397 nv_wr32(dev, 0x3404, nv_ro32(dev, ramfc, 0x90/4));
398 nv_wr32(dev, 0x3408, nv_ro32(dev, ramfc, 0x94/4));
399 nv_wr32(dev, 0x3410, nv_ro32(dev, ramfc, 0x98/4));
402 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16));
407 nv50_fifo_unload_context(struct drm_device *dev)
409 struct drm_nouveau_private *dev_priv = dev->dev_private;
410 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
411 struct nouveau_gpuobj *ramfc, *cache;
412 struct nouveau_channel *chan = NULL;
413 int chid, get, put, ptr;
417 chid = pfifo->channel_id(dev);
418 if (chid < 1 || chid >= dev_priv->engine.fifo.channels - 1)
421 chan = dev_priv->fifos[chid];
423 NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
426 NV_DEBUG(dev, "ch%d\n", chan->id);
427 ramfc = chan->ramfc->gpuobj;
428 cache = chan->cache->gpuobj;
430 nv_wo32(dev, ramfc, 0x00/4, nv_rd32(dev, 0x3330));
431 nv_wo32(dev, ramfc, 0x04/4, nv_rd32(dev, 0x3334));
432 nv_wo32(dev, ramfc, 0x08/4, nv_rd32(dev, 0x3240));
433 nv_wo32(dev, ramfc, 0x0c/4, nv_rd32(dev, 0x3320));
434 nv_wo32(dev, ramfc, 0x10/4, nv_rd32(dev, 0x3244));
435 nv_wo32(dev, ramfc, 0x14/4, nv_rd32(dev, 0x3328));
436 nv_wo32(dev, ramfc, 0x18/4, nv_rd32(dev, 0x3368));
437 nv_wo32(dev, ramfc, 0x1c/4, nv_rd32(dev, 0x336c));
438 nv_wo32(dev, ramfc, 0x20/4, nv_rd32(dev, 0x3370));
439 nv_wo32(dev, ramfc, 0x24/4, nv_rd32(dev, 0x3374));
440 nv_wo32(dev, ramfc, 0x28/4, nv_rd32(dev, 0x3378));
441 nv_wo32(dev, ramfc, 0x2c/4, nv_rd32(dev, 0x337c));
442 nv_wo32(dev, ramfc, 0x30/4, nv_rd32(dev, 0x3228));
443 nv_wo32(dev, ramfc, 0x34/4, nv_rd32(dev, 0x3364));
444 nv_wo32(dev, ramfc, 0x38/4, nv_rd32(dev, 0x32a0));
445 nv_wo32(dev, ramfc, 0x3c/4, nv_rd32(dev, 0x3224));
446 nv_wo32(dev, ramfc, 0x40/4, nv_rd32(dev, 0x324c));
447 nv_wo32(dev, ramfc, 0x44/4, nv_rd32(dev, 0x2044));
448 nv_wo32(dev, ramfc, 0x48/4, nv_rd32(dev, 0x322c));
449 nv_wo32(dev, ramfc, 0x4c/4, nv_rd32(dev, 0x3234));
450 nv_wo32(dev, ramfc, 0x50/4, nv_rd32(dev, 0x3340));
451 nv_wo32(dev, ramfc, 0x54/4, nv_rd32(dev, 0x3344));
452 nv_wo32(dev, ramfc, 0x58/4, nv_rd32(dev, 0x3280));
453 nv_wo32(dev, ramfc, 0x5c/4, nv_rd32(dev, 0x3254));
454 nv_wo32(dev, ramfc, 0x60/4, nv_rd32(dev, 0x3260));
455 nv_wo32(dev, ramfc, 0x64/4, nv_rd32(dev, 0x3264));
456 nv_wo32(dev, ramfc, 0x68/4, nv_rd32(dev, 0x3268));
457 nv_wo32(dev, ramfc, 0x6c/4, nv_rd32(dev, 0x326c));
458 nv_wo32(dev, ramfc, 0x70/4, nv_rd32(dev, 0x32e4));
459 nv_wo32(dev, ramfc, 0x74/4, nv_rd32(dev, 0x3248));
460 nv_wo32(dev, ramfc, 0x78/4, nv_rd32(dev, 0x2088));
461 nv_wo32(dev, ramfc, 0x7c/4, nv_rd32(dev, 0x2058));
462 nv_wo32(dev, ramfc, 0x80/4, nv_rd32(dev, 0x2210));
464 put = (nv_rd32(dev, NV03_PFIFO_CACHE1_PUT) & 0x7ff) >> 2;
465 get = (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) & 0x7ff) >> 2;
468 nv_wo32(dev, cache, ptr++,
469 nv_rd32(dev, NV40_PFIFO_CACHE1_METHOD(get)));
470 nv_wo32(dev, cache, ptr++,
471 nv_rd32(dev, NV40_PFIFO_CACHE1_DATA(get)));
472 get = (get + 1) & 0x1ff;
475 /* guessing that all the 0x34xx regs aren't on NV50 */
477 nv_wo32(dev, ramfc, 0x84/4, ptr >> 1);
478 nv_wo32(dev, ramfc, 0x88/4, nv_rd32(dev, 0x340c));
479 nv_wo32(dev, ramfc, 0x8c/4, nv_rd32(dev, 0x3400));
480 nv_wo32(dev, ramfc, 0x90/4, nv_rd32(dev, 0x3404));
481 nv_wo32(dev, ramfc, 0x94/4, nv_rd32(dev, 0x3408));
482 nv_wo32(dev, ramfc, 0x98/4, nv_rd32(dev, 0x3410));
485 dev_priv->engine.instmem.flush(dev);
487 /*XXX: probably reload ch127 (NULL) state back too */
488 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, 127);