]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/gpu/drm/nouveau/nv40_graph.c
Merge branch 'drm-fixes' of /home/airlied/kernel/linux-2.6 into drm-core-next
[net-next-2.6.git] / drivers / gpu / drm / nouveau / nv40_graph.c
CommitLineData
6ee73861
BS
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
6ee73861
BS
27#include "drmP.h"
28#include "drm.h"
29#include "nouveau_drv.h"
054b93e4 30#include "nouveau_grctx.h"
6ee73861
BS
31
32struct nouveau_channel *
33nv40_graph_channel(struct drm_device *dev)
34{
35 struct drm_nouveau_private *dev_priv = dev->dev_private;
36 uint32_t inst;
37 int i;
38
39 inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR);
40 if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED))
41 return NULL;
42 inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4;
43
44 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
45 struct nouveau_channel *chan = dev_priv->fifos[i];
46
47 if (chan && chan->ramin_grctx &&
a8eaebc6 48 chan->ramin_grctx->pinst == inst)
6ee73861
BS
49 return chan;
50 }
51
52 return NULL;
53}
54
55int
56nv40_graph_create_context(struct nouveau_channel *chan)
57{
58 struct drm_device *dev = chan->dev;
59 struct drm_nouveau_private *dev_priv = dev->dev_private;
054b93e4 60 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
ec91db26 61 struct nouveau_grctx ctx = {};
6ee73861
BS
62 int ret;
63
a8eaebc6
BS
64 ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 16,
65 NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin_grctx);
6ee73861
BS
66 if (ret)
67 return ret;
6ee73861
BS
68
69 /* Initialise default context values */
ec91db26
BS
70 ctx.dev = chan->dev;
71 ctx.mode = NOUVEAU_GRCTX_VALS;
a8eaebc6 72 ctx.data = chan->ramin_grctx;
ec91db26 73 nv40_grctx_init(&ctx);
6ee73861 74
5125bfd8 75 nv_wo32(chan->ramin_grctx, 0, chan->ramin_grctx->pinst);
6ee73861
BS
76 return 0;
77}
78
79void
80nv40_graph_destroy_context(struct nouveau_channel *chan)
81{
a8eaebc6 82 nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
6ee73861
BS
83}
84
85static int
86nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save)
87{
88 uint32_t old_cp, tv = 1000, tmp;
89 int i;
90
91 old_cp = nv_rd32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER);
92 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
93
94 tmp = nv_rd32(dev, NV40_PGRAPH_CTXCTL_0310);
95 tmp |= save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE :
96 NV40_PGRAPH_CTXCTL_0310_XFER_LOAD;
97 nv_wr32(dev, NV40_PGRAPH_CTXCTL_0310, tmp);
98
99 tmp = nv_rd32(dev, NV40_PGRAPH_CTXCTL_0304);
100 tmp |= NV40_PGRAPH_CTXCTL_0304_XFER_CTX;
101 nv_wr32(dev, NV40_PGRAPH_CTXCTL_0304, tmp);
102
103 nouveau_wait_for_idle(dev);
104
105 for (i = 0; i < tv; i++) {
106 if (nv_rd32(dev, NV40_PGRAPH_CTXCTL_030C) == 0)
107 break;
108 }
109
110 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp);
111
112 if (i == tv) {
113 uint32_t ucstat = nv_rd32(dev, NV40_PGRAPH_CTXCTL_UCODE_STAT);
114 NV_ERROR(dev, "Failed: Instance=0x%08x Save=%d\n", inst, save);
115 NV_ERROR(dev, "IP: 0x%02x, Opcode: 0x%08x\n",
116 ucstat >> NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT,
117 ucstat & NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK);
118 NV_ERROR(dev, "0x40030C = 0x%08x\n",
119 nv_rd32(dev, NV40_PGRAPH_CTXCTL_030C));
120 return -EBUSY;
121 }
122
123 return 0;
124}
125
126/* Restore the context for a specific channel into PGRAPH */
127int
128nv40_graph_load_context(struct nouveau_channel *chan)
129{
130 struct drm_device *dev = chan->dev;
131 uint32_t inst;
132 int ret;
133
134 if (!chan->ramin_grctx)
135 return -EINVAL;
a8eaebc6 136 inst = chan->ramin_grctx->pinst >> 4;
6ee73861
BS
137
138 ret = nv40_graph_transfer_context(dev, inst, 0);
139 if (ret)
140 return ret;
141
142 /* 0x40032C, no idea of it's exact function. Could simply be a
143 * record of the currently active PGRAPH context. It's currently
144 * unknown as to what bit 24 does. The nv ddx has it set, so we will
145 * set it here too.
146 */
147 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
148 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR,
149 (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) |
150 NV40_PGRAPH_CTXCTL_CUR_LOADED);
151 /* 0x32E0 records the instance address of the active FIFO's PGRAPH
152 * context. If at any time this doesn't match 0x40032C, you will
153 * recieve PGRAPH_INTR_CONTEXT_SWITCH
154 */
155 nv_wr32(dev, NV40_PFIFO_GRCTX_INSTANCE, inst);
156 return 0;
157}
158
159int
160nv40_graph_unload_context(struct drm_device *dev)
161{
162 uint32_t inst;
163 int ret;
164
165 inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR);
166 if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED))
167 return 0;
168 inst &= NV40_PGRAPH_CTXCTL_CUR_INSTANCE;
169
170 ret = nv40_graph_transfer_context(dev, inst, 1);
171
172 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, inst);
173 return ret;
174}
175
0d87c100
FJ
176void
177nv40_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
178 uint32_t size, uint32_t pitch)
179{
180 struct drm_nouveau_private *dev_priv = dev->dev_private;
181 uint32_t limit = max(1u, addr + size) - 1;
182
183 if (pitch)
184 addr |= 1;
185
186 switch (dev_priv->chipset) {
187 case 0x44:
188 case 0x4a:
189 case 0x4e:
190 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
191 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
192 nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
193 break;
194
195 case 0x46:
196 case 0x47:
197 case 0x49:
198 case 0x4b:
199 nv_wr32(dev, NV47_PGRAPH_TSIZE(i), pitch);
200 nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), limit);
201 nv_wr32(dev, NV47_PGRAPH_TILE(i), addr);
202 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch);
203 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit);
204 nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr);
205 break;
206
207 default:
208 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
209 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
210 nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
211 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch);
212 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit);
213 nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr);
214 break;
215 }
216}
217
6ee73861
BS
218/*
219 * G70 0x47
220 * G71 0x49
221 * NV45 0x48
222 * G72[M] 0x46
223 * G73 0x4b
224 * C51_G7X 0x4c
225 * C51 0x4e
226 */
227int
228nv40_graph_init(struct drm_device *dev)
229{
230 struct drm_nouveau_private *dev_priv =
231 (struct drm_nouveau_private *)dev->dev_private;
0d87c100 232 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
ec91db26
BS
233 struct nouveau_grctx ctx = {};
234 uint32_t vramsz, *cp;
6ee73861
BS
235 int i, j;
236
237 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
238 ~NV_PMC_ENABLE_PGRAPH);
239 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
240 NV_PMC_ENABLE_PGRAPH);
241
ec91db26
BS
242 cp = kmalloc(sizeof(*cp) * 256, GFP_KERNEL);
243 if (!cp)
244 return -ENOMEM;
054b93e4 245
ec91db26
BS
246 ctx.dev = dev;
247 ctx.mode = NOUVEAU_GRCTX_PROG;
248 ctx.data = cp;
249 ctx.ctxprog_max = 256;
250 nv40_grctx_init(&ctx);
251 dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4;
054b93e4 252
ec91db26
BS
253 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
254 for (i = 0; i < ctx.ctxprog_len; i++)
255 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
f49d273d 256
ec91db26 257 kfree(cp);
6ee73861
BS
258
259 /* No context present currently */
260 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
261
262 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
263 nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
264
265 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
266 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
267 nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x401287c0);
268 nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
269 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00008000);
270 nv_wr32(dev, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
271
272 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
273 nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
274
275 j = nv_rd32(dev, 0x1540) & 0xff;
276 if (j) {
277 for (i = 0; !(j & 1); j >>= 1, i++)
278 ;
279 nv_wr32(dev, 0x405000, i);
280 }
281
282 if (dev_priv->chipset == 0x40) {
283 nv_wr32(dev, 0x4009b0, 0x83280fff);
284 nv_wr32(dev, 0x4009b4, 0x000000a0);
285 } else {
286 nv_wr32(dev, 0x400820, 0x83280eff);
287 nv_wr32(dev, 0x400824, 0x000000a0);
288 }
289
290 switch (dev_priv->chipset) {
291 case 0x40:
292 case 0x45:
293 nv_wr32(dev, 0x4009b8, 0x0078e366);
294 nv_wr32(dev, 0x4009bc, 0x0000014c);
295 break;
296 case 0x41:
297 case 0x42: /* pciid also 0x00Cx */
298 /* case 0x0120: XXX (pciid) */
299 nv_wr32(dev, 0x400828, 0x007596ff);
300 nv_wr32(dev, 0x40082c, 0x00000108);
301 break;
302 case 0x43:
303 nv_wr32(dev, 0x400828, 0x0072cb77);
304 nv_wr32(dev, 0x40082c, 0x00000108);
305 break;
306 case 0x44:
307 case 0x46: /* G72 */
308 case 0x4a:
309 case 0x4c: /* G7x-based C51 */
310 case 0x4e:
311 nv_wr32(dev, 0x400860, 0);
312 nv_wr32(dev, 0x400864, 0);
313 break;
314 case 0x47: /* G70 */
315 case 0x49: /* G71 */
316 case 0x4b: /* G73 */
317 nv_wr32(dev, 0x400828, 0x07830610);
318 nv_wr32(dev, 0x40082c, 0x0000016A);
319 break;
320 default:
321 break;
322 }
323
324 nv_wr32(dev, 0x400b38, 0x2ffff800);
325 nv_wr32(dev, 0x400b3c, 0x00006000);
326
2295e17a
FJ
327 /* Tiling related stuff. */
328 switch (dev_priv->chipset) {
329 case 0x44:
330 case 0x4a:
331 nv_wr32(dev, 0x400bc4, 0x1003d888);
332 nv_wr32(dev, 0x400bbc, 0xb7a7b500);
333 break;
334 case 0x46:
335 nv_wr32(dev, 0x400bc4, 0x0000e024);
336 nv_wr32(dev, 0x400bbc, 0xb7a7b520);
337 break;
338 case 0x4c:
339 case 0x4e:
340 case 0x67:
341 nv_wr32(dev, 0x400bc4, 0x1003d888);
342 nv_wr32(dev, 0x400bbc, 0xb7a7b540);
343 break;
344 default:
345 break;
346 }
347
0d87c100
FJ
348 /* Turn all the tiling regions off. */
349 for (i = 0; i < pfb->num_tiles; i++)
350 nv40_graph_set_region_tiling(dev, i, 0, 0, 0);
6ee73861
BS
351
352 /* begin RAM config */
01d73a69 353 vramsz = pci_resource_len(dev->pdev, 0) - 1;
6ee73861
BS
354 switch (dev_priv->chipset) {
355 case 0x40:
356 nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
357 nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
358 nv_wr32(dev, 0x4069A4, nv_rd32(dev, NV04_PFB_CFG0));
359 nv_wr32(dev, 0x4069A8, nv_rd32(dev, NV04_PFB_CFG1));
360 nv_wr32(dev, 0x400820, 0);
361 nv_wr32(dev, 0x400824, 0);
362 nv_wr32(dev, 0x400864, vramsz);
363 nv_wr32(dev, 0x400868, vramsz);
364 break;
365 default:
366 switch (dev_priv->chipset) {
367 case 0x46:
368 case 0x47:
369 case 0x49:
370 case 0x4b:
371 nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0));
372 nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1));
373 break;
374 default:
375 nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0));
376 nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1));
377 break;
378 }
379 nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0));
380 nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1));
381 nv_wr32(dev, 0x400840, 0);
382 nv_wr32(dev, 0x400844, 0);
383 nv_wr32(dev, 0x4008A0, vramsz);
384 nv_wr32(dev, 0x4008A4, vramsz);
385 break;
386 }
387
388 return 0;
389}
390
391void nv40_graph_takedown(struct drm_device *dev)
392{
393}
394
395struct nouveau_pgraph_object_class nv40_graph_grclass[] = {
396 { 0x0030, false, NULL }, /* null */
397 { 0x0039, false, NULL }, /* m2mf */
398 { 0x004a, false, NULL }, /* gdirect */
399 { 0x009f, false, NULL }, /* imageblit (nv12) */
400 { 0x008a, false, NULL }, /* ifc */
401 { 0x0089, false, NULL }, /* sifm */
402 { 0x3089, false, NULL }, /* sifm (nv40) */
403 { 0x0062, false, NULL }, /* surf2d */
404 { 0x3062, false, NULL }, /* surf2d (nv40) */
405 { 0x0043, false, NULL }, /* rop */
406 { 0x0012, false, NULL }, /* beta1 */
407 { 0x0072, false, NULL }, /* beta4 */
408 { 0x0019, false, NULL }, /* cliprect */
409 { 0x0044, false, NULL }, /* pattern */
410 { 0x309e, false, NULL }, /* swzsurf */
411 { 0x4097, false, NULL }, /* curie (nv40) */
412 { 0x4497, false, NULL }, /* curie (nv44) */
413 {}
414};
415