]>
Commit | Line | Data |
---|---|---|
6ee73861 BS |
1 | /* |
2 | * Copyright (C) 2007 Ben Skeggs. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining | |
6 | * a copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sublicense, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice (including the | |
14 | * next paragraph) shall be included in all copies or substantial | |
15 | * portions of the Software. | |
16 | * | |
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | |
20 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | |
21 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | |
22 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | |
23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |
24 | * | |
25 | */ | |
26 | ||
27 | #include "drmP.h" | |
28 | #include "drm.h" | |
29 | #include "nouveau_drv.h" | |
30 | ||
31 | struct nv50_fifo_priv { | |
32 | struct nouveau_gpuobj_ref *thingo[2]; | |
33 | int cur_thingo; | |
34 | }; | |
35 | ||
36 | #define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50) | |
37 | ||
38 | static void | |
39 | nv50_fifo_init_thingo(struct drm_device *dev) | |
40 | { | |
41 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
42 | struct nv50_fifo_priv *priv = dev_priv->engine.fifo.priv; | |
43 | struct nouveau_gpuobj_ref *cur; | |
44 | int i, nr; | |
45 | ||
46 | NV_DEBUG(dev, "\n"); | |
47 | ||
48 | cur = priv->thingo[priv->cur_thingo]; | |
49 | priv->cur_thingo = !priv->cur_thingo; | |
50 | ||
51 | /* We never schedule channel 0 or 127 */ | |
6ee73861 BS |
52 | for (i = 1, nr = 0; i < 127; i++) { |
53 | if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc) | |
54 | nv_wo32(dev, cur->gpuobj, nr++, i); | |
55 | } | |
f56cb86f | 56 | dev_priv->engine.instmem.flush(dev); |
6ee73861 BS |
57 | |
58 | nv_wr32(dev, 0x32f4, cur->instance >> 12); | |
59 | nv_wr32(dev, 0x32ec, nr); | |
60 | nv_wr32(dev, 0x2500, 0x101); | |
61 | } | |
62 | ||
63 | static int | |
64 | nv50_fifo_channel_enable(struct drm_device *dev, int channel, bool nt) | |
65 | { | |
66 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
67 | struct nouveau_channel *chan = dev_priv->fifos[channel]; | |
68 | uint32_t inst; | |
69 | ||
70 | NV_DEBUG(dev, "ch%d\n", channel); | |
71 | ||
72 | if (!chan->ramfc) | |
73 | return -EINVAL; | |
74 | ||
75 | if (IS_G80) | |
76 | inst = chan->ramfc->instance >> 12; | |
77 | else | |
78 | inst = chan->ramfc->instance >> 8; | |
79 | nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), | |
80 | inst | NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED); | |
81 | ||
82 | if (!nt) | |
83 | nv50_fifo_init_thingo(dev); | |
84 | return 0; | |
85 | } | |
86 | ||
87 | static void | |
88 | nv50_fifo_channel_disable(struct drm_device *dev, int channel, bool nt) | |
89 | { | |
90 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
91 | uint32_t inst; | |
92 | ||
93 | NV_DEBUG(dev, "ch%d, nt=%d\n", channel, nt); | |
94 | ||
95 | if (IS_G80) | |
96 | inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80; | |
97 | else | |
98 | inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84; | |
99 | nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst); | |
100 | ||
101 | if (!nt) | |
102 | nv50_fifo_init_thingo(dev); | |
103 | } | |
104 | ||
105 | static void | |
106 | nv50_fifo_init_reset(struct drm_device *dev) | |
107 | { | |
108 | uint32_t pmc_e = NV_PMC_ENABLE_PFIFO; | |
109 | ||
110 | NV_DEBUG(dev, "\n"); | |
111 | ||
112 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e); | |
113 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | pmc_e); | |
114 | } | |
115 | ||
116 | static void | |
117 | nv50_fifo_init_intr(struct drm_device *dev) | |
118 | { | |
119 | NV_DEBUG(dev, "\n"); | |
120 | ||
121 | nv_wr32(dev, NV03_PFIFO_INTR_0, 0xFFFFFFFF); | |
122 | nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF); | |
123 | } | |
124 | ||
125 | static void | |
126 | nv50_fifo_init_context_table(struct drm_device *dev) | |
127 | { | |
128 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
129 | int i; | |
130 | ||
131 | NV_DEBUG(dev, "\n"); | |
132 | ||
133 | for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) { | |
134 | if (dev_priv->fifos[i]) | |
135 | nv50_fifo_channel_enable(dev, i, true); | |
136 | else | |
137 | nv50_fifo_channel_disable(dev, i, true); | |
138 | } | |
139 | ||
140 | nv50_fifo_init_thingo(dev); | |
141 | } | |
142 | ||
143 | static void | |
144 | nv50_fifo_init_regs__nv(struct drm_device *dev) | |
145 | { | |
146 | NV_DEBUG(dev, "\n"); | |
147 | ||
148 | nv_wr32(dev, 0x250c, 0x6f3cfc34); | |
149 | } | |
150 | ||
151 | static void | |
152 | nv50_fifo_init_regs(struct drm_device *dev) | |
153 | { | |
154 | NV_DEBUG(dev, "\n"); | |
155 | ||
156 | nv_wr32(dev, 0x2500, 0); | |
157 | nv_wr32(dev, 0x3250, 0); | |
158 | nv_wr32(dev, 0x3220, 0); | |
159 | nv_wr32(dev, 0x3204, 0); | |
160 | nv_wr32(dev, 0x3210, 0); | |
161 | nv_wr32(dev, 0x3270, 0); | |
162 | ||
163 | /* Enable dummy channels setup by nv50_instmem.c */ | |
164 | nv50_fifo_channel_enable(dev, 0, true); | |
165 | nv50_fifo_channel_enable(dev, 127, true); | |
166 | } | |
167 | ||
168 | int | |
169 | nv50_fifo_init(struct drm_device *dev) | |
170 | { | |
171 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
172 | struct nv50_fifo_priv *priv; | |
173 | int ret; | |
174 | ||
175 | NV_DEBUG(dev, "\n"); | |
176 | ||
177 | priv = dev_priv->engine.fifo.priv; | |
178 | if (priv) { | |
179 | priv->cur_thingo = !priv->cur_thingo; | |
180 | goto just_reset; | |
181 | } | |
182 | ||
183 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | |
184 | if (!priv) | |
185 | return -ENOMEM; | |
186 | dev_priv->engine.fifo.priv = priv; | |
187 | ||
188 | ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000, | |
189 | NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[0]); | |
190 | if (ret) { | |
191 | NV_ERROR(dev, "error creating thingo0: %d\n", ret); | |
192 | return ret; | |
193 | } | |
194 | ||
195 | ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000, | |
196 | NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[1]); | |
197 | if (ret) { | |
198 | NV_ERROR(dev, "error creating thingo1: %d\n", ret); | |
199 | return ret; | |
200 | } | |
201 | ||
202 | just_reset: | |
203 | nv50_fifo_init_reset(dev); | |
204 | nv50_fifo_init_intr(dev); | |
205 | nv50_fifo_init_context_table(dev); | |
206 | nv50_fifo_init_regs__nv(dev); | |
207 | nv50_fifo_init_regs(dev); | |
208 | dev_priv->engine.fifo.enable(dev); | |
209 | dev_priv->engine.fifo.reassign(dev, true); | |
210 | ||
211 | return 0; | |
212 | } | |
213 | ||
214 | void | |
215 | nv50_fifo_takedown(struct drm_device *dev) | |
216 | { | |
217 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
218 | struct nv50_fifo_priv *priv = dev_priv->engine.fifo.priv; | |
219 | ||
220 | NV_DEBUG(dev, "\n"); | |
221 | ||
222 | if (!priv) | |
223 | return; | |
224 | ||
225 | nouveau_gpuobj_ref_del(dev, &priv->thingo[0]); | |
226 | nouveau_gpuobj_ref_del(dev, &priv->thingo[1]); | |
227 | ||
228 | dev_priv->engine.fifo.priv = NULL; | |
229 | kfree(priv); | |
230 | } | |
231 | ||
232 | int | |
233 | nv50_fifo_channel_id(struct drm_device *dev) | |
234 | { | |
235 | return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & | |
236 | NV50_PFIFO_CACHE1_PUSH1_CHID_MASK; | |
237 | } | |
238 | ||
239 | int | |
240 | nv50_fifo_create_context(struct nouveau_channel *chan) | |
241 | { | |
242 | struct drm_device *dev = chan->dev; | |
243 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
244 | struct nouveau_gpuobj *ramfc = NULL; | |
ff9e5279 | 245 | unsigned long flags; |
6ee73861 BS |
246 | int ret; |
247 | ||
248 | NV_DEBUG(dev, "ch%d\n", chan->id); | |
249 | ||
250 | if (IS_G80) { | |
251 | uint32_t ramin_poffset = chan->ramin->gpuobj->im_pramin->start; | |
252 | uint32_t ramin_voffset = chan->ramin->gpuobj->im_backing_start; | |
253 | ||
254 | ret = nouveau_gpuobj_new_fake(dev, ramin_poffset, ramin_voffset, | |
255 | 0x100, NVOBJ_FLAG_ZERO_ALLOC | | |
256 | NVOBJ_FLAG_ZERO_FREE, &ramfc, | |
257 | &chan->ramfc); | |
258 | if (ret) | |
259 | return ret; | |
260 | ||
261 | ret = nouveau_gpuobj_new_fake(dev, ramin_poffset + 0x0400, | |
262 | ramin_voffset + 0x0400, 4096, | |
263 | 0, NULL, &chan->cache); | |
264 | if (ret) | |
265 | return ret; | |
266 | } else { | |
267 | ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 0x100, 256, | |
268 | NVOBJ_FLAG_ZERO_ALLOC | | |
269 | NVOBJ_FLAG_ZERO_FREE, | |
270 | &chan->ramfc); | |
271 | if (ret) | |
272 | return ret; | |
273 | ramfc = chan->ramfc->gpuobj; | |
274 | ||
134f248b | 275 | ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 1024, |
6ee73861 BS |
276 | 0, &chan->cache); |
277 | if (ret) | |
278 | return ret; | |
279 | } | |
280 | ||
ff9e5279 MM |
281 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); |
282 | ||
6ee73861 BS |
283 | nv_wo32(dev, ramfc, 0x48/4, chan->pushbuf->instance >> 4); |
284 | nv_wo32(dev, ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4)); | |
6ee73861 BS |
285 | nv_wo32(dev, ramfc, 0x44/4, 0x2101ffff); |
286 | nv_wo32(dev, ramfc, 0x60/4, 0x7fffffff); | |
287 | nv_wo32(dev, ramfc, 0x40/4, 0x00000000); | |
288 | nv_wo32(dev, ramfc, 0x7c/4, 0x30000001); | |
289 | nv_wo32(dev, ramfc, 0x78/4, 0x00000000); | |
9a391ad8 BS |
290 | nv_wo32(dev, ramfc, 0x3c/4, 0x403f6078); |
291 | nv_wo32(dev, ramfc, 0x50/4, chan->pushbuf_base + | |
292 | chan->dma.ib_base * 4); | |
293 | nv_wo32(dev, ramfc, 0x54/4, drm_order(chan->dma.ib_max + 1) << 16); | |
6ee73861 BS |
294 | |
295 | if (!IS_G80) { | |
296 | nv_wo32(dev, chan->ramin->gpuobj, 0, chan->id); | |
297 | nv_wo32(dev, chan->ramin->gpuobj, 1, | |
298 | chan->ramfc->instance >> 8); | |
299 | ||
300 | nv_wo32(dev, ramfc, 0x88/4, chan->cache->instance >> 10); | |
301 | nv_wo32(dev, ramfc, 0x98/4, chan->ramin->instance >> 12); | |
302 | } | |
303 | ||
f56cb86f | 304 | dev_priv->engine.instmem.flush(dev); |
6ee73861 BS |
305 | |
306 | ret = nv50_fifo_channel_enable(dev, chan->id, false); | |
307 | if (ret) { | |
308 | NV_ERROR(dev, "error enabling ch%d: %d\n", chan->id, ret); | |
ff9e5279 | 309 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); |
6ee73861 BS |
310 | nouveau_gpuobj_ref_del(dev, &chan->ramfc); |
311 | return ret; | |
312 | } | |
313 | ||
ff9e5279 | 314 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); |
6ee73861 BS |
315 | return 0; |
316 | } | |
317 | ||
318 | void | |
319 | nv50_fifo_destroy_context(struct nouveau_channel *chan) | |
320 | { | |
321 | struct drm_device *dev = chan->dev; | |
a87ff62a | 322 | struct nouveau_gpuobj_ref *ramfc = chan->ramfc; |
6ee73861 BS |
323 | |
324 | NV_DEBUG(dev, "ch%d\n", chan->id); | |
325 | ||
a87ff62a MM |
326 | /* This will ensure the channel is seen as disabled. */ |
327 | chan->ramfc = NULL; | |
6ee73861 BS |
328 | nv50_fifo_channel_disable(dev, chan->id, false); |
329 | ||
330 | /* Dummy channel, also used on ch 127 */ | |
331 | if (chan->id == 0) | |
332 | nv50_fifo_channel_disable(dev, 127, false); | |
a87ff62a MM |
333 | |
334 | nouveau_gpuobj_ref_del(dev, &ramfc); | |
335 | nouveau_gpuobj_ref_del(dev, &chan->cache); | |
6ee73861 BS |
336 | } |
337 | ||
338 | int | |
339 | nv50_fifo_load_context(struct nouveau_channel *chan) | |
340 | { | |
341 | struct drm_device *dev = chan->dev; | |
342 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
343 | struct nouveau_gpuobj *ramfc = chan->ramfc->gpuobj; | |
344 | struct nouveau_gpuobj *cache = chan->cache->gpuobj; | |
345 | int ptr, cnt; | |
346 | ||
347 | NV_DEBUG(dev, "ch%d\n", chan->id); | |
348 | ||
6ee73861 BS |
349 | nv_wr32(dev, 0x3330, nv_ro32(dev, ramfc, 0x00/4)); |
350 | nv_wr32(dev, 0x3334, nv_ro32(dev, ramfc, 0x04/4)); | |
351 | nv_wr32(dev, 0x3240, nv_ro32(dev, ramfc, 0x08/4)); | |
352 | nv_wr32(dev, 0x3320, nv_ro32(dev, ramfc, 0x0c/4)); | |
353 | nv_wr32(dev, 0x3244, nv_ro32(dev, ramfc, 0x10/4)); | |
354 | nv_wr32(dev, 0x3328, nv_ro32(dev, ramfc, 0x14/4)); | |
355 | nv_wr32(dev, 0x3368, nv_ro32(dev, ramfc, 0x18/4)); | |
356 | nv_wr32(dev, 0x336c, nv_ro32(dev, ramfc, 0x1c/4)); | |
357 | nv_wr32(dev, 0x3370, nv_ro32(dev, ramfc, 0x20/4)); | |
358 | nv_wr32(dev, 0x3374, nv_ro32(dev, ramfc, 0x24/4)); | |
359 | nv_wr32(dev, 0x3378, nv_ro32(dev, ramfc, 0x28/4)); | |
360 | nv_wr32(dev, 0x337c, nv_ro32(dev, ramfc, 0x2c/4)); | |
361 | nv_wr32(dev, 0x3228, nv_ro32(dev, ramfc, 0x30/4)); | |
362 | nv_wr32(dev, 0x3364, nv_ro32(dev, ramfc, 0x34/4)); | |
363 | nv_wr32(dev, 0x32a0, nv_ro32(dev, ramfc, 0x38/4)); | |
364 | nv_wr32(dev, 0x3224, nv_ro32(dev, ramfc, 0x3c/4)); | |
365 | nv_wr32(dev, 0x324c, nv_ro32(dev, ramfc, 0x40/4)); | |
366 | nv_wr32(dev, 0x2044, nv_ro32(dev, ramfc, 0x44/4)); | |
367 | nv_wr32(dev, 0x322c, nv_ro32(dev, ramfc, 0x48/4)); | |
368 | nv_wr32(dev, 0x3234, nv_ro32(dev, ramfc, 0x4c/4)); | |
369 | nv_wr32(dev, 0x3340, nv_ro32(dev, ramfc, 0x50/4)); | |
370 | nv_wr32(dev, 0x3344, nv_ro32(dev, ramfc, 0x54/4)); | |
371 | nv_wr32(dev, 0x3280, nv_ro32(dev, ramfc, 0x58/4)); | |
372 | nv_wr32(dev, 0x3254, nv_ro32(dev, ramfc, 0x5c/4)); | |
373 | nv_wr32(dev, 0x3260, nv_ro32(dev, ramfc, 0x60/4)); | |
374 | nv_wr32(dev, 0x3264, nv_ro32(dev, ramfc, 0x64/4)); | |
375 | nv_wr32(dev, 0x3268, nv_ro32(dev, ramfc, 0x68/4)); | |
376 | nv_wr32(dev, 0x326c, nv_ro32(dev, ramfc, 0x6c/4)); | |
377 | nv_wr32(dev, 0x32e4, nv_ro32(dev, ramfc, 0x70/4)); | |
378 | nv_wr32(dev, 0x3248, nv_ro32(dev, ramfc, 0x74/4)); | |
379 | nv_wr32(dev, 0x2088, nv_ro32(dev, ramfc, 0x78/4)); | |
380 | nv_wr32(dev, 0x2058, nv_ro32(dev, ramfc, 0x7c/4)); | |
381 | nv_wr32(dev, 0x2210, nv_ro32(dev, ramfc, 0x80/4)); | |
382 | ||
383 | cnt = nv_ro32(dev, ramfc, 0x84/4); | |
384 | for (ptr = 0; ptr < cnt; ptr++) { | |
385 | nv_wr32(dev, NV40_PFIFO_CACHE1_METHOD(ptr), | |
386 | nv_ro32(dev, cache, (ptr * 2) + 0)); | |
387 | nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr), | |
388 | nv_ro32(dev, cache, (ptr * 2) + 1)); | |
389 | } | |
7fb8ec8e BS |
390 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, cnt << 2); |
391 | nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); | |
6ee73861 BS |
392 | |
393 | /* guessing that all the 0x34xx regs aren't on NV50 */ | |
394 | if (!IS_G80) { | |
395 | nv_wr32(dev, 0x340c, nv_ro32(dev, ramfc, 0x88/4)); | |
396 | nv_wr32(dev, 0x3400, nv_ro32(dev, ramfc, 0x8c/4)); | |
397 | nv_wr32(dev, 0x3404, nv_ro32(dev, ramfc, 0x90/4)); | |
398 | nv_wr32(dev, 0x3408, nv_ro32(dev, ramfc, 0x94/4)); | |
399 | nv_wr32(dev, 0x3410, nv_ro32(dev, ramfc, 0x98/4)); | |
400 | } | |
401 | ||
6ee73861 BS |
402 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16)); |
403 | return 0; | |
404 | } | |
405 | ||
406 | int | |
407 | nv50_fifo_unload_context(struct drm_device *dev) | |
408 | { | |
409 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
410 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | |
411 | struct nouveau_gpuobj *ramfc, *cache; | |
412 | struct nouveau_channel *chan = NULL; | |
413 | int chid, get, put, ptr; | |
414 | ||
415 | NV_DEBUG(dev, "\n"); | |
416 | ||
417 | chid = pfifo->channel_id(dev); | |
3c8868d3 | 418 | if (chid < 1 || chid >= dev_priv->engine.fifo.channels - 1) |
6ee73861 BS |
419 | return 0; |
420 | ||
421 | chan = dev_priv->fifos[chid]; | |
422 | if (!chan) { | |
423 | NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid); | |
424 | return -EINVAL; | |
425 | } | |
426 | NV_DEBUG(dev, "ch%d\n", chan->id); | |
427 | ramfc = chan->ramfc->gpuobj; | |
428 | cache = chan->cache->gpuobj; | |
429 | ||
6ee73861 BS |
430 | nv_wo32(dev, ramfc, 0x00/4, nv_rd32(dev, 0x3330)); |
431 | nv_wo32(dev, ramfc, 0x04/4, nv_rd32(dev, 0x3334)); | |
432 | nv_wo32(dev, ramfc, 0x08/4, nv_rd32(dev, 0x3240)); | |
433 | nv_wo32(dev, ramfc, 0x0c/4, nv_rd32(dev, 0x3320)); | |
434 | nv_wo32(dev, ramfc, 0x10/4, nv_rd32(dev, 0x3244)); | |
435 | nv_wo32(dev, ramfc, 0x14/4, nv_rd32(dev, 0x3328)); | |
436 | nv_wo32(dev, ramfc, 0x18/4, nv_rd32(dev, 0x3368)); | |
437 | nv_wo32(dev, ramfc, 0x1c/4, nv_rd32(dev, 0x336c)); | |
438 | nv_wo32(dev, ramfc, 0x20/4, nv_rd32(dev, 0x3370)); | |
439 | nv_wo32(dev, ramfc, 0x24/4, nv_rd32(dev, 0x3374)); | |
440 | nv_wo32(dev, ramfc, 0x28/4, nv_rd32(dev, 0x3378)); | |
441 | nv_wo32(dev, ramfc, 0x2c/4, nv_rd32(dev, 0x337c)); | |
442 | nv_wo32(dev, ramfc, 0x30/4, nv_rd32(dev, 0x3228)); | |
443 | nv_wo32(dev, ramfc, 0x34/4, nv_rd32(dev, 0x3364)); | |
444 | nv_wo32(dev, ramfc, 0x38/4, nv_rd32(dev, 0x32a0)); | |
445 | nv_wo32(dev, ramfc, 0x3c/4, nv_rd32(dev, 0x3224)); | |
446 | nv_wo32(dev, ramfc, 0x40/4, nv_rd32(dev, 0x324c)); | |
447 | nv_wo32(dev, ramfc, 0x44/4, nv_rd32(dev, 0x2044)); | |
448 | nv_wo32(dev, ramfc, 0x48/4, nv_rd32(dev, 0x322c)); | |
449 | nv_wo32(dev, ramfc, 0x4c/4, nv_rd32(dev, 0x3234)); | |
450 | nv_wo32(dev, ramfc, 0x50/4, nv_rd32(dev, 0x3340)); | |
451 | nv_wo32(dev, ramfc, 0x54/4, nv_rd32(dev, 0x3344)); | |
452 | nv_wo32(dev, ramfc, 0x58/4, nv_rd32(dev, 0x3280)); | |
453 | nv_wo32(dev, ramfc, 0x5c/4, nv_rd32(dev, 0x3254)); | |
454 | nv_wo32(dev, ramfc, 0x60/4, nv_rd32(dev, 0x3260)); | |
455 | nv_wo32(dev, ramfc, 0x64/4, nv_rd32(dev, 0x3264)); | |
456 | nv_wo32(dev, ramfc, 0x68/4, nv_rd32(dev, 0x3268)); | |
457 | nv_wo32(dev, ramfc, 0x6c/4, nv_rd32(dev, 0x326c)); | |
458 | nv_wo32(dev, ramfc, 0x70/4, nv_rd32(dev, 0x32e4)); | |
459 | nv_wo32(dev, ramfc, 0x74/4, nv_rd32(dev, 0x3248)); | |
460 | nv_wo32(dev, ramfc, 0x78/4, nv_rd32(dev, 0x2088)); | |
461 | nv_wo32(dev, ramfc, 0x7c/4, nv_rd32(dev, 0x2058)); | |
462 | nv_wo32(dev, ramfc, 0x80/4, nv_rd32(dev, 0x2210)); | |
463 | ||
464 | put = (nv_rd32(dev, NV03_PFIFO_CACHE1_PUT) & 0x7ff) >> 2; | |
465 | get = (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) & 0x7ff) >> 2; | |
466 | ptr = 0; | |
467 | while (put != get) { | |
468 | nv_wo32(dev, cache, ptr++, | |
469 | nv_rd32(dev, NV40_PFIFO_CACHE1_METHOD(get))); | |
470 | nv_wo32(dev, cache, ptr++, | |
471 | nv_rd32(dev, NV40_PFIFO_CACHE1_DATA(get))); | |
472 | get = (get + 1) & 0x1ff; | |
473 | } | |
474 | ||
475 | /* guessing that all the 0x34xx regs aren't on NV50 */ | |
476 | if (!IS_G80) { | |
477 | nv_wo32(dev, ramfc, 0x84/4, ptr >> 1); | |
478 | nv_wo32(dev, ramfc, 0x88/4, nv_rd32(dev, 0x340c)); | |
479 | nv_wo32(dev, ramfc, 0x8c/4, nv_rd32(dev, 0x3400)); | |
480 | nv_wo32(dev, ramfc, 0x90/4, nv_rd32(dev, 0x3404)); | |
481 | nv_wo32(dev, ramfc, 0x94/4, nv_rd32(dev, 0x3408)); | |
482 | nv_wo32(dev, ramfc, 0x98/4, nv_rd32(dev, 0x3410)); | |
483 | } | |
484 | ||
f56cb86f | 485 | dev_priv->engine.instmem.flush(dev); |
6ee73861 BS |
486 | |
487 | /*XXX: probably reload ch127 (NULL) state back too */ | |
488 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, 127); | |
489 | return 0; | |
490 | } | |
491 |