]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/gpu/drm/radeon/r300.c
drm/radeon/kms: fix typo in quirks
[net-next-2.6.git] / drivers / gpu / drm / radeon / r300.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/seq_file.h>
29#include "drmP.h"
30#include "drm.h"
31#include "radeon_reg.h"
32#include "radeon.h"
e024e110 33#include "radeon_drm.h"
c93bb85b 34#include "radeon_share.h"
551ebd83 35#include "r100_track.h"
3ce0a23d 36#include "r300d.h"
771fe6b9 37
50f15303
DA
38#include "r300_reg_safe.h"
39
771fe6b9
JG
40/* r300,r350,rv350,rv370,rv380 depends on : */
41void r100_hdp_reset(struct radeon_device *rdev);
42int r100_cp_reset(struct radeon_device *rdev);
43int r100_rb2d_reset(struct radeon_device *rdev);
44int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
45int r100_pci_gart_enable(struct radeon_device *rdev);
46void r100_pci_gart_disable(struct radeon_device *rdev);
47void r100_mc_setup(struct radeon_device *rdev);
48void r100_mc_disable_clients(struct radeon_device *rdev);
49int r100_gui_wait_for_idle(struct radeon_device *rdev);
50int r100_cs_packet_parse(struct radeon_cs_parser *p,
51 struct radeon_cs_packet *pkt,
52 unsigned idx);
531369e6 53int r100_cs_packet_parse_vline(struct radeon_cs_parser *p);
771fe6b9
JG
54int r100_cs_parse_packet0(struct radeon_cs_parser *p,
55 struct radeon_cs_packet *pkt,
068a117c 56 const unsigned *auth, unsigned n,
771fe6b9 57 radeon_packet0_check_t check);
068a117c
JG
58int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
59 struct radeon_cs_packet *pkt,
60 struct radeon_object *robj);
771fe6b9
JG
61
62/* This files gather functions specifics to:
63 * r300,r350,rv350,rv370,rv380
64 *
65 * Some of these functions might be used by newer ASICs.
66 */
67void r300_gpu_init(struct radeon_device *rdev);
68int r300_mc_wait_for_idle(struct radeon_device *rdev);
69int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
70
71
72/*
73 * rv370,rv380 PCIE GART
74 */
75void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
76{
77 uint32_t tmp;
78 int i;
79
80 /* Workaround HW bug do flush 2 times */
81 for (i = 0; i < 2; i++) {
82 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
83 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB);
84 (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
85 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
771fe6b9 86 }
de1b2898 87 mb();
771fe6b9
JG
88}
89
90int rv370_pcie_gart_enable(struct radeon_device *rdev)
91{
92 uint32_t table_addr;
93 uint32_t tmp;
94 int r;
95
96 /* Initialize common gart structure */
97 r = radeon_gart_init(rdev);
98 if (r) {
99 return r;
100 }
101 r = rv370_debugfs_pcie_gart_info_init(rdev);
102 if (r) {
103 DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
104 }
105 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
106 r = radeon_gart_table_vram_alloc(rdev);
107 if (r) {
108 return r;
109 }
110 /* discard memory request outside of configured range */
111 tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
112 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
113 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location);
114 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 4096;
115 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
116 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
117 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
118 table_addr = rdev->gart.table_addr;
119 WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr);
120 /* FIXME: setup default page */
121 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_location);
122 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
123 /* Clear error */
124 WREG32_PCIE(0x18, 0);
125 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
126 tmp |= RADEON_PCIE_TX_GART_EN;
127 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
128 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
129 rv370_pcie_gart_tlb_flush(rdev);
130 DRM_INFO("PCIE GART of %uM enabled (table at 0x%08X).\n",
3ce0a23d 131 (unsigned)(rdev->mc.gtt_size >> 20), table_addr);
771fe6b9
JG
132 rdev->gart.ready = true;
133 return 0;
134}
135
136void rv370_pcie_gart_disable(struct radeon_device *rdev)
137{
138 uint32_t tmp;
139
140 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
141 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
142 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
143 if (rdev->gart.table.vram.robj) {
144 radeon_object_kunmap(rdev->gart.table.vram.robj);
145 radeon_object_unpin(rdev->gart.table.vram.robj);
146 }
147}
148
149int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
150{
151 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
152
153 if (i < 0 || i > rdev->gart.num_gpu_pages) {
154 return -EINVAL;
155 }
ed10f95d
DA
156 addr = (lower_32_bits(addr) >> 8) |
157 ((upper_32_bits(addr) & 0xff) << 24) |
158 0xc;
77bd36f0
DA
159 /* on x86 we want this to be CPU endian, on powerpc
160 * on powerpc without HW swappers, it'll get swapped on way
161 * into VRAM - so no need for cpu_to_le32 on VRAM tables */
162 writel(addr, ((void __iomem *)ptr) + (i * 4));
771fe6b9
JG
163 return 0;
164}
165
166int r300_gart_enable(struct radeon_device *rdev)
167{
168#if __OS_HAS_AGP
169 if (rdev->flags & RADEON_IS_AGP) {
170 if (rdev->family > CHIP_RV350) {
171 rv370_pcie_gart_disable(rdev);
172 } else {
173 r100_pci_gart_disable(rdev);
174 }
175 return 0;
176 }
177#endif
178 if (rdev->flags & RADEON_IS_PCIE) {
179 rdev->asic->gart_disable = &rv370_pcie_gart_disable;
180 rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
181 rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
182 return rv370_pcie_gart_enable(rdev);
183 }
c000273e
JG
184 if (rdev->flags & RADEON_IS_PCI) {
185 rdev->asic->gart_disable = &r100_pci_gart_disable;
186 rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
187 rdev->asic->gart_set_page = &r100_pci_gart_set_page;
188 return r100_pci_gart_enable(rdev);
189 }
771fe6b9
JG
190 return r100_pci_gart_enable(rdev);
191}
192
193
194/*
195 * MC
196 */
197int r300_mc_init(struct radeon_device *rdev)
198{
199 int r;
200
201 if (r100_debugfs_rbbm_init(rdev)) {
202 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
203 }
204
205 r300_gpu_init(rdev);
206 r100_pci_gart_disable(rdev);
207 if (rdev->flags & RADEON_IS_PCIE) {
208 rv370_pcie_gart_disable(rdev);
209 }
210
211 /* Setup GPU memory space */
212 rdev->mc.vram_location = 0xFFFFFFFFUL;
213 rdev->mc.gtt_location = 0xFFFFFFFFUL;
214 if (rdev->flags & RADEON_IS_AGP) {
215 r = radeon_agp_init(rdev);
216 if (r) {
217 printk(KERN_WARNING "[drm] Disabling AGP\n");
218 rdev->flags &= ~RADEON_IS_AGP;
219 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
220 } else {
221 rdev->mc.gtt_location = rdev->mc.agp_base;
222 }
223 }
224 r = radeon_mc_setup(rdev);
225 if (r) {
226 return r;
227 }
228
229 /* Program GPU memory space */
230 r100_mc_disable_clients(rdev);
231 if (r300_mc_wait_for_idle(rdev)) {
232 printk(KERN_WARNING "Failed to wait MC idle while "
233 "programming pipes. Bad things might happen.\n");
234 }
235 r100_mc_setup(rdev);
236 return 0;
237}
238
239void r300_mc_fini(struct radeon_device *rdev)
240{
241 if (rdev->flags & RADEON_IS_PCIE) {
242 rv370_pcie_gart_disable(rdev);
243 radeon_gart_table_vram_free(rdev);
244 } else {
245 r100_pci_gart_disable(rdev);
246 radeon_gart_table_ram_free(rdev);
247 }
248 radeon_gart_fini(rdev);
249}
250
251
252/*
253 * Fence emission
254 */
255void r300_fence_ring_emit(struct radeon_device *rdev,
256 struct radeon_fence *fence)
257{
258 /* Who ever call radeon_fence_emit should call ring_lock and ask
259 * for enough space (today caller are ib schedule and buffer move) */
260 /* Write SC register so SC & US assert idle */
261 radeon_ring_write(rdev, PACKET0(0x43E0, 0));
262 radeon_ring_write(rdev, 0);
263 radeon_ring_write(rdev, PACKET0(0x43E4, 0));
264 radeon_ring_write(rdev, 0);
265 /* Flush 3D cache */
266 radeon_ring_write(rdev, PACKET0(0x4E4C, 0));
267 radeon_ring_write(rdev, (2 << 0));
268 radeon_ring_write(rdev, PACKET0(0x4F18, 0));
269 radeon_ring_write(rdev, (1 << 0));
270 /* Wait until IDLE & CLEAN */
271 radeon_ring_write(rdev, PACKET0(0x1720, 0));
272 radeon_ring_write(rdev, (1 << 17) | (1 << 16) | (1 << 9));
273 /* Emit fence sequence & fire IRQ */
274 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
275 radeon_ring_write(rdev, fence->seq);
276 radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
277 radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
278}
279
280
281/*
282 * Global GPU functions
283 */
284int r300_copy_dma(struct radeon_device *rdev,
285 uint64_t src_offset,
286 uint64_t dst_offset,
287 unsigned num_pages,
288 struct radeon_fence *fence)
289{
290 uint32_t size;
291 uint32_t cur_size;
292 int i, num_loops;
293 int r = 0;
294
295 /* radeon pitch is /64 */
296 size = num_pages << PAGE_SHIFT;
297 num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
298 r = radeon_ring_lock(rdev, num_loops * 4 + 64);
299 if (r) {
300 DRM_ERROR("radeon: moving bo (%d).\n", r);
301 return r;
302 }
303 /* Must wait for 2D idle & clean before DMA or hangs might happen */
068a117c 304 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0 ));
771fe6b9
JG
305 radeon_ring_write(rdev, (1 << 16));
306 for (i = 0; i < num_loops; i++) {
307 cur_size = size;
308 if (cur_size > 0x1FFFFF) {
309 cur_size = 0x1FFFFF;
310 }
311 size -= cur_size;
312 radeon_ring_write(rdev, PACKET0(0x720, 2));
313 radeon_ring_write(rdev, src_offset);
314 radeon_ring_write(rdev, dst_offset);
315 radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
316 src_offset += cur_size;
317 dst_offset += cur_size;
318 }
319 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
320 radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
321 if (fence) {
322 r = radeon_fence_emit(rdev, fence);
323 }
324 radeon_ring_unlock_commit(rdev);
325 return r;
326}
327
328void r300_ring_start(struct radeon_device *rdev)
329{
330 unsigned gb_tile_config;
331 int r;
332
333 /* Sub pixel 1/12 so we can have 4K rendering according to doc */
334 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
068a117c 335 switch(rdev->num_gb_pipes) {
771fe6b9
JG
336 case 2:
337 gb_tile_config |= R300_PIPE_COUNT_R300;
338 break;
339 case 3:
340 gb_tile_config |= R300_PIPE_COUNT_R420_3P;
341 break;
342 case 4:
343 gb_tile_config |= R300_PIPE_COUNT_R420;
344 break;
345 case 1:
346 default:
347 gb_tile_config |= R300_PIPE_COUNT_RV350;
348 break;
349 }
350
351 r = radeon_ring_lock(rdev, 64);
352 if (r) {
353 return;
354 }
355 radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
356 radeon_ring_write(rdev,
357 RADEON_ISYNC_ANY2D_IDLE3D |
358 RADEON_ISYNC_ANY3D_IDLE2D |
359 RADEON_ISYNC_WAIT_IDLEGUI |
360 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
361 radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0));
362 radeon_ring_write(rdev, gb_tile_config);
363 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
364 radeon_ring_write(rdev,
365 RADEON_WAIT_2D_IDLECLEAN |
366 RADEON_WAIT_3D_IDLECLEAN);
367 radeon_ring_write(rdev, PACKET0(0x170C, 0));
368 radeon_ring_write(rdev, 1 << 31);
369 radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
370 radeon_ring_write(rdev, 0);
371 radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
372 radeon_ring_write(rdev, 0);
373 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
374 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
375 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
376 radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
377 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
378 radeon_ring_write(rdev,
379 RADEON_WAIT_2D_IDLECLEAN |
380 RADEON_WAIT_3D_IDLECLEAN);
381 radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0));
382 radeon_ring_write(rdev, 0);
383 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
384 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
385 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
386 radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
387 radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0));
388 radeon_ring_write(rdev,
389 ((6 << R300_MS_X0_SHIFT) |
390 (6 << R300_MS_Y0_SHIFT) |
391 (6 << R300_MS_X1_SHIFT) |
392 (6 << R300_MS_Y1_SHIFT) |
393 (6 << R300_MS_X2_SHIFT) |
394 (6 << R300_MS_Y2_SHIFT) |
395 (6 << R300_MSBD0_Y_SHIFT) |
396 (6 << R300_MSBD0_X_SHIFT)));
397 radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0));
398 radeon_ring_write(rdev,
399 ((6 << R300_MS_X3_SHIFT) |
400 (6 << R300_MS_Y3_SHIFT) |
401 (6 << R300_MS_X4_SHIFT) |
402 (6 << R300_MS_Y4_SHIFT) |
403 (6 << R300_MS_X5_SHIFT) |
404 (6 << R300_MS_Y5_SHIFT) |
405 (6 << R300_MSBD1_SHIFT)));
406 radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0));
407 radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
408 radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0));
409 radeon_ring_write(rdev,
410 R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE);
411 radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0));
412 radeon_ring_write(rdev,
413 R300_GEOMETRY_ROUND_NEAREST |
414 R300_COLOR_ROUND_NEAREST);
415 radeon_ring_unlock_commit(rdev);
416}
417
418void r300_errata(struct radeon_device *rdev)
419{
420 rdev->pll_errata = 0;
421
422 if (rdev->family == CHIP_R300 &&
423 (RREG32(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) == RADEON_CFG_ATI_REV_A11) {
424 rdev->pll_errata |= CHIP_ERRATA_R300_CG;
425 }
426}
427
428int r300_mc_wait_for_idle(struct radeon_device *rdev)
429{
430 unsigned i;
431 uint32_t tmp;
432
433 for (i = 0; i < rdev->usec_timeout; i++) {
434 /* read MC_STATUS */
435 tmp = RREG32(0x0150);
436 if (tmp & (1 << 4)) {
437 return 0;
438 }
439 DRM_UDELAY(1);
440 }
441 return -1;
442}
443
444void r300_gpu_init(struct radeon_device *rdev)
445{
446 uint32_t gb_tile_config, tmp;
447
448 r100_hdp_reset(rdev);
449 /* FIXME: rv380 one pipes ? */
450 if ((rdev->family == CHIP_R300) || (rdev->family == CHIP_R350)) {
451 /* r300,r350 */
452 rdev->num_gb_pipes = 2;
453 } else {
454 /* rv350,rv370,rv380 */
455 rdev->num_gb_pipes = 1;
456 }
f779b3e5 457 rdev->num_z_pipes = 1;
771fe6b9
JG
458 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
459 switch (rdev->num_gb_pipes) {
460 case 2:
461 gb_tile_config |= R300_PIPE_COUNT_R300;
462 break;
463 case 3:
464 gb_tile_config |= R300_PIPE_COUNT_R420_3P;
465 break;
466 case 4:
467 gb_tile_config |= R300_PIPE_COUNT_R420;
468 break;
771fe6b9 469 default:
068a117c 470 case 1:
771fe6b9
JG
471 gb_tile_config |= R300_PIPE_COUNT_RV350;
472 break;
473 }
474 WREG32(R300_GB_TILE_CONFIG, gb_tile_config);
475
476 if (r100_gui_wait_for_idle(rdev)) {
477 printk(KERN_WARNING "Failed to wait GUI idle while "
478 "programming pipes. Bad things might happen.\n");
479 }
480
481 tmp = RREG32(0x170C);
482 WREG32(0x170C, tmp | (1 << 31));
483
484 WREG32(R300_RB2D_DSTCACHE_MODE,
485 R300_DC_AUTOFLUSH_ENABLE |
486 R300_DC_DC_DISABLE_IGNORE_PE);
487
488 if (r100_gui_wait_for_idle(rdev)) {
489 printk(KERN_WARNING "Failed to wait GUI idle while "
490 "programming pipes. Bad things might happen.\n");
491 }
492 if (r300_mc_wait_for_idle(rdev)) {
493 printk(KERN_WARNING "Failed to wait MC idle while "
494 "programming pipes. Bad things might happen.\n");
495 }
f779b3e5
AD
496 DRM_INFO("radeon: %d quad pipes, %d Z pipes initialized.\n",
497 rdev->num_gb_pipes, rdev->num_z_pipes);
771fe6b9
JG
498}
499
500int r300_ga_reset(struct radeon_device *rdev)
501{
502 uint32_t tmp;
503 bool reinit_cp;
504 int i;
505
506 reinit_cp = rdev->cp.ready;
507 rdev->cp.ready = false;
508 for (i = 0; i < rdev->usec_timeout; i++) {
509 WREG32(RADEON_CP_CSQ_MODE, 0);
510 WREG32(RADEON_CP_CSQ_CNTL, 0);
511 WREG32(RADEON_RBBM_SOFT_RESET, 0x32005);
512 (void)RREG32(RADEON_RBBM_SOFT_RESET);
513 udelay(200);
514 WREG32(RADEON_RBBM_SOFT_RESET, 0);
515 /* Wait to prevent race in RBBM_STATUS */
516 mdelay(1);
517 tmp = RREG32(RADEON_RBBM_STATUS);
518 if (tmp & ((1 << 20) | (1 << 26))) {
519 DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)", tmp);
520 /* GA still busy soft reset it */
521 WREG32(0x429C, 0x200);
522 WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0);
523 WREG32(0x43E0, 0);
524 WREG32(0x43E4, 0);
525 WREG32(0x24AC, 0);
526 }
527 /* Wait to prevent race in RBBM_STATUS */
528 mdelay(1);
529 tmp = RREG32(RADEON_RBBM_STATUS);
530 if (!(tmp & ((1 << 20) | (1 << 26)))) {
531 break;
532 }
533 }
534 for (i = 0; i < rdev->usec_timeout; i++) {
535 tmp = RREG32(RADEON_RBBM_STATUS);
536 if (!(tmp & ((1 << 20) | (1 << 26)))) {
537 DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
538 tmp);
539 if (reinit_cp) {
540 return r100_cp_init(rdev, rdev->cp.ring_size);
541 }
542 return 0;
543 }
544 DRM_UDELAY(1);
545 }
546 tmp = RREG32(RADEON_RBBM_STATUS);
547 DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
548 return -1;
549}
550
551int r300_gpu_reset(struct radeon_device *rdev)
552{
553 uint32_t status;
554
555 /* reset order likely matter */
556 status = RREG32(RADEON_RBBM_STATUS);
557 /* reset HDP */
558 r100_hdp_reset(rdev);
559 /* reset rb2d */
560 if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
561 r100_rb2d_reset(rdev);
562 }
563 /* reset GA */
564 if (status & ((1 << 20) | (1 << 26))) {
565 r300_ga_reset(rdev);
566 }
567 /* reset CP */
568 status = RREG32(RADEON_RBBM_STATUS);
569 if (status & (1 << 16)) {
570 r100_cp_reset(rdev);
571 }
572 /* Check if GPU is idle */
573 status = RREG32(RADEON_RBBM_STATUS);
574 if (status & (1 << 31)) {
575 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
576 return -1;
577 }
578 DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
579 return 0;
580}
581
582
583/*
584 * r300,r350,rv350,rv380 VRAM info
585 */
586void r300_vram_info(struct radeon_device *rdev)
587{
588 uint32_t tmp;
589
590 /* DDR for all card after R300 & IGP */
591 rdev->mc.vram_is_ddr = true;
592 tmp = RREG32(RADEON_MEM_CNTL);
593 if (tmp & R300_MEM_NUM_CHANNELS_MASK) {
594 rdev->mc.vram_width = 128;
595 } else {
596 rdev->mc.vram_width = 64;
597 }
771fe6b9 598
2a0f8918 599 r100_vram_init_sizes(rdev);
771fe6b9
JG
600}
601
602
771fe6b9
JG
603/*
604 * PCIE Lanes
605 */
606
607void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
608{
609 uint32_t link_width_cntl, mask;
610
611 if (rdev->flags & RADEON_IS_IGP)
612 return;
613
614 if (!(rdev->flags & RADEON_IS_PCIE))
615 return;
616
617 /* FIXME wait for idle */
618
619 switch (lanes) {
620 case 0:
621 mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
622 break;
623 case 1:
624 mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
625 break;
626 case 2:
627 mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
628 break;
629 case 4:
630 mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
631 break;
632 case 8:
633 mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
634 break;
635 case 12:
636 mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
637 break;
638 case 16:
639 default:
640 mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
641 break;
642 }
643
644 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
645
646 if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
647 (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
648 return;
649
650 link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
651 RADEON_PCIE_LC_RECONFIG_NOW |
652 RADEON_PCIE_LC_RECONFIG_LATER |
653 RADEON_PCIE_LC_SHORT_RECONFIG_EN);
654 link_width_cntl |= mask;
655 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
656 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
657 RADEON_PCIE_LC_RECONFIG_NOW));
658
659 /* wait for lane set to complete */
660 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
661 while (link_width_cntl == 0xffffffff)
662 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
663
664}
665
666
667/*
668 * Debugfs info
669 */
670#if defined(CONFIG_DEBUG_FS)
671static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data)
672{
673 struct drm_info_node *node = (struct drm_info_node *) m->private;
674 struct drm_device *dev = node->minor->dev;
675 struct radeon_device *rdev = dev->dev_private;
676 uint32_t tmp;
677
678 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
679 seq_printf(m, "PCIE_TX_GART_CNTL 0x%08x\n", tmp);
680 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_BASE);
681 seq_printf(m, "PCIE_TX_GART_BASE 0x%08x\n", tmp);
682 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_LO);
683 seq_printf(m, "PCIE_TX_GART_START_LO 0x%08x\n", tmp);
684 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_HI);
685 seq_printf(m, "PCIE_TX_GART_START_HI 0x%08x\n", tmp);
686 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_LO);
687 seq_printf(m, "PCIE_TX_GART_END_LO 0x%08x\n", tmp);
688 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_HI);
689 seq_printf(m, "PCIE_TX_GART_END_HI 0x%08x\n", tmp);
690 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_ERROR);
691 seq_printf(m, "PCIE_TX_GART_ERROR 0x%08x\n", tmp);
692 return 0;
693}
694
695static struct drm_info_list rv370_pcie_gart_info_list[] = {
696 {"rv370_pcie_gart_info", rv370_debugfs_pcie_gart_info, 0, NULL},
697};
698#endif
699
700int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
701{
702#if defined(CONFIG_DEBUG_FS)
703 return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1);
704#else
705 return 0;
706#endif
707}
708
709
710/*
711 * CS functions
712 */
771fe6b9
JG
713static int r300_packet0_check(struct radeon_cs_parser *p,
714 struct radeon_cs_packet *pkt,
715 unsigned idx, unsigned reg)
716{
717 struct radeon_cs_chunk *ib_chunk;
718 struct radeon_cs_reloc *reloc;
551ebd83 719 struct r100_cs_track *track;
771fe6b9 720 volatile uint32_t *ib;
e024e110 721 uint32_t tmp, tile_flags = 0;
771fe6b9
JG
722 unsigned i;
723 int r;
724
725 ib = p->ib->ptr;
726 ib_chunk = &p->chunks[p->chunk_ib_idx];
551ebd83 727 track = (struct r100_cs_track *)p->track;
068a117c 728 switch(reg) {
531369e6
DA
729 case AVIVO_D1MODE_VLINE_START_END:
730 case RADEON_CRTC_GUI_TRIG_VLINE:
731 r = r100_cs_packet_parse_vline(p);
732 if (r) {
733 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
734 idx, reg);
735 r100_cs_dump_packet(p, pkt);
736 return r;
737 }
738 break;
771fe6b9
JG
739 case RADEON_DST_PITCH_OFFSET:
740 case RADEON_SRC_PITCH_OFFSET:
551ebd83
DA
741 r = r100_reloc_pitch_offset(p, pkt, idx, reg);
742 if (r)
771fe6b9 743 return r;
771fe6b9
JG
744 break;
745 case R300_RB3D_COLOROFFSET0:
746 case R300_RB3D_COLOROFFSET1:
747 case R300_RB3D_COLOROFFSET2:
748 case R300_RB3D_COLOROFFSET3:
749 i = (reg - R300_RB3D_COLOROFFSET0) >> 2;
750 r = r100_cs_packet_next_reloc(p, &reloc);
751 if (r) {
752 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
753 idx, reg);
754 r100_cs_dump_packet(p, pkt);
755 return r;
756 }
757 track->cb[i].robj = reloc->robj;
758 track->cb[i].offset = ib_chunk->kdata[idx];
759 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
760 break;
761 case R300_ZB_DEPTHOFFSET:
762 r = r100_cs_packet_next_reloc(p, &reloc);
763 if (r) {
764 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
765 idx, reg);
766 r100_cs_dump_packet(p, pkt);
767 return r;
768 }
769 track->zb.robj = reloc->robj;
770 track->zb.offset = ib_chunk->kdata[idx];
771 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
772 break;
773 case R300_TX_OFFSET_0:
774 case R300_TX_OFFSET_0+4:
775 case R300_TX_OFFSET_0+8:
776 case R300_TX_OFFSET_0+12:
777 case R300_TX_OFFSET_0+16:
778 case R300_TX_OFFSET_0+20:
779 case R300_TX_OFFSET_0+24:
780 case R300_TX_OFFSET_0+28:
781 case R300_TX_OFFSET_0+32:
782 case R300_TX_OFFSET_0+36:
783 case R300_TX_OFFSET_0+40:
784 case R300_TX_OFFSET_0+44:
785 case R300_TX_OFFSET_0+48:
786 case R300_TX_OFFSET_0+52:
787 case R300_TX_OFFSET_0+56:
788 case R300_TX_OFFSET_0+60:
068a117c 789 i = (reg - R300_TX_OFFSET_0) >> 2;
771fe6b9
JG
790 r = r100_cs_packet_next_reloc(p, &reloc);
791 if (r) {
792 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
793 idx, reg);
794 r100_cs_dump_packet(p, pkt);
795 return r;
796 }
797 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
068a117c 798 track->textures[i].robj = reloc->robj;
771fe6b9
JG
799 break;
800 /* Tracked registers */
068a117c
JG
801 case 0x2084:
802 /* VAP_VF_CNTL */
803 track->vap_vf_cntl = ib_chunk->kdata[idx];
804 break;
805 case 0x20B4:
806 /* VAP_VTX_SIZE */
807 track->vtx_size = ib_chunk->kdata[idx] & 0x7F;
808 break;
809 case 0x2134:
810 /* VAP_VF_MAX_VTX_INDX */
811 track->max_indx = ib_chunk->kdata[idx] & 0x00FFFFFFUL;
812 break;
771fe6b9
JG
813 case 0x43E4:
814 /* SC_SCISSOR1 */
771fe6b9
JG
815 track->maxy = ((ib_chunk->kdata[idx] >> 13) & 0x1FFF) + 1;
816 if (p->rdev->family < CHIP_RV515) {
817 track->maxy -= 1440;
818 }
819 break;
820 case 0x4E00:
821 /* RB3D_CCTL */
822 track->num_cb = ((ib_chunk->kdata[idx] >> 5) & 0x3) + 1;
823 break;
824 case 0x4E38:
825 case 0x4E3C:
826 case 0x4E40:
827 case 0x4E44:
828 /* RB3D_COLORPITCH0 */
829 /* RB3D_COLORPITCH1 */
830 /* RB3D_COLORPITCH2 */
831 /* RB3D_COLORPITCH3 */
e024e110
DA
832 r = r100_cs_packet_next_reloc(p, &reloc);
833 if (r) {
834 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
835 idx, reg);
836 r100_cs_dump_packet(p, pkt);
837 return r;
838 }
839
840 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
841 tile_flags |= R300_COLOR_TILE_ENABLE;
842 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
843 tile_flags |= R300_COLOR_MICROTILE_ENABLE;
844
845 tmp = ib_chunk->kdata[idx] & ~(0x7 << 16);
846 tmp |= tile_flags;
847 ib[idx] = tmp;
848
771fe6b9
JG
849 i = (reg - 0x4E38) >> 2;
850 track->cb[i].pitch = ib_chunk->kdata[idx] & 0x3FFE;
851 switch (((ib_chunk->kdata[idx] >> 21) & 0xF)) {
852 case 9:
853 case 11:
854 case 12:
855 track->cb[i].cpp = 1;
856 break;
857 case 3:
858 case 4:
859 case 13:
860 case 15:
861 track->cb[i].cpp = 2;
862 break;
863 case 6:
864 track->cb[i].cpp = 4;
865 break;
866 case 10:
867 track->cb[i].cpp = 8;
868 break;
869 case 7:
870 track->cb[i].cpp = 16;
871 break;
872 default:
873 DRM_ERROR("Invalid color buffer format (%d) !\n",
874 ((ib_chunk->kdata[idx] >> 21) & 0xF));
875 return -EINVAL;
876 }
877 break;
878 case 0x4F00:
879 /* ZB_CNTL */
880 if (ib_chunk->kdata[idx] & 2) {
881 track->z_enabled = true;
882 } else {
883 track->z_enabled = false;
884 }
885 break;
886 case 0x4F10:
887 /* ZB_FORMAT */
888 switch ((ib_chunk->kdata[idx] & 0xF)) {
889 case 0:
890 case 1:
891 track->zb.cpp = 2;
892 break;
893 case 2:
894 track->zb.cpp = 4;
895 break;
896 default:
897 DRM_ERROR("Invalid z buffer format (%d) !\n",
898 (ib_chunk->kdata[idx] & 0xF));
899 return -EINVAL;
900 }
901 break;
902 case 0x4F24:
903 /* ZB_DEPTHPITCH */
e024e110
DA
904 r = r100_cs_packet_next_reloc(p, &reloc);
905 if (r) {
906 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
907 idx, reg);
908 r100_cs_dump_packet(p, pkt);
909 return r;
910 }
911
912 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
913 tile_flags |= R300_DEPTHMACROTILE_ENABLE;
914 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
915 tile_flags |= R300_DEPTHMICROTILE_TILED;;
916
917 tmp = ib_chunk->kdata[idx] & ~(0x7 << 16);
918 tmp |= tile_flags;
919 ib[idx] = tmp;
920
771fe6b9
JG
921 track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC;
922 break;
068a117c
JG
923 case 0x4104:
924 for (i = 0; i < 16; i++) {
925 bool enabled;
926
927 enabled = !!(ib_chunk->kdata[idx] & (1 << i));
928 track->textures[i].enabled = enabled;
929 }
930 break;
931 case 0x44C0:
932 case 0x44C4:
933 case 0x44C8:
934 case 0x44CC:
935 case 0x44D0:
936 case 0x44D4:
937 case 0x44D8:
938 case 0x44DC:
939 case 0x44E0:
940 case 0x44E4:
941 case 0x44E8:
942 case 0x44EC:
943 case 0x44F0:
944 case 0x44F4:
945 case 0x44F8:
946 case 0x44FC:
947 /* TX_FORMAT1_[0-15] */
948 i = (reg - 0x44C0) >> 2;
949 tmp = (ib_chunk->kdata[idx] >> 25) & 0x3;
950 track->textures[i].tex_coord_type = tmp;
951 switch ((ib_chunk->kdata[idx] & 0x1F)) {
551ebd83
DA
952 case R300_TX_FORMAT_X8:
953 case R300_TX_FORMAT_Y4X4:
954 case R300_TX_FORMAT_Z3Y3X2:
068a117c
JG
955 track->textures[i].cpp = 1;
956 break;
551ebd83
DA
957 case R300_TX_FORMAT_X16:
958 case R300_TX_FORMAT_Y8X8:
959 case R300_TX_FORMAT_Z5Y6X5:
960 case R300_TX_FORMAT_Z6Y5X5:
961 case R300_TX_FORMAT_W4Z4Y4X4:
962 case R300_TX_FORMAT_W1Z5Y5X5:
963 case R300_TX_FORMAT_DXT1:
964 case R300_TX_FORMAT_D3DMFT_CxV8U8:
965 case R300_TX_FORMAT_B8G8_B8G8:
966 case R300_TX_FORMAT_G8R8_G8B8:
068a117c
JG
967 track->textures[i].cpp = 2;
968 break;
551ebd83
DA
969 case R300_TX_FORMAT_Y16X16:
970 case R300_TX_FORMAT_Z11Y11X10:
971 case R300_TX_FORMAT_Z10Y11X11:
972 case R300_TX_FORMAT_W8Z8Y8X8:
973 case R300_TX_FORMAT_W2Z10Y10X10:
974 case 0x17:
975 case R300_TX_FORMAT_FL_I32:
976 case 0x1e:
977 case R300_TX_FORMAT_DXT3:
978 case R300_TX_FORMAT_DXT5:
068a117c
JG
979 track->textures[i].cpp = 4;
980 break;
551ebd83
DA
981 case R300_TX_FORMAT_W16Z16Y16X16:
982 case R300_TX_FORMAT_FL_R16G16B16A16:
983 case R300_TX_FORMAT_FL_I32A32:
068a117c
JG
984 track->textures[i].cpp = 8;
985 break;
551ebd83 986 case R300_TX_FORMAT_FL_R32G32B32A32:
068a117c
JG
987 track->textures[i].cpp = 16;
988 break;
989 default:
990 DRM_ERROR("Invalid texture format %u\n",
991 (ib_chunk->kdata[idx] & 0x1F));
992 return -EINVAL;
993 break;
994 }
995 break;
996 case 0x4400:
997 case 0x4404:
998 case 0x4408:
999 case 0x440C:
1000 case 0x4410:
1001 case 0x4414:
1002 case 0x4418:
1003 case 0x441C:
1004 case 0x4420:
1005 case 0x4424:
1006 case 0x4428:
1007 case 0x442C:
1008 case 0x4430:
1009 case 0x4434:
1010 case 0x4438:
1011 case 0x443C:
1012 /* TX_FILTER0_[0-15] */
1013 i = (reg - 0x4400) >> 2;
551ebd83 1014 tmp = ib_chunk->kdata[idx] & 0x7;
068a117c
JG
1015 if (tmp == 2 || tmp == 4 || tmp == 6) {
1016 track->textures[i].roundup_w = false;
1017 }
551ebd83 1018 tmp = (ib_chunk->kdata[idx] >> 3) & 0x7;
068a117c
JG
1019 if (tmp == 2 || tmp == 4 || tmp == 6) {
1020 track->textures[i].roundup_h = false;
1021 }
1022 break;
1023 case 0x4500:
1024 case 0x4504:
1025 case 0x4508:
1026 case 0x450C:
1027 case 0x4510:
1028 case 0x4514:
1029 case 0x4518:
1030 case 0x451C:
1031 case 0x4520:
1032 case 0x4524:
1033 case 0x4528:
1034 case 0x452C:
1035 case 0x4530:
1036 case 0x4534:
1037 case 0x4538:
1038 case 0x453C:
1039 /* TX_FORMAT2_[0-15] */
1040 i = (reg - 0x4500) >> 2;
1041 tmp = ib_chunk->kdata[idx] & 0x3FFF;
1042 track->textures[i].pitch = tmp + 1;
1043 if (p->rdev->family >= CHIP_RV515) {
1044 tmp = ((ib_chunk->kdata[idx] >> 15) & 1) << 11;
1045 track->textures[i].width_11 = tmp;
1046 tmp = ((ib_chunk->kdata[idx] >> 16) & 1) << 11;
1047 track->textures[i].height_11 = tmp;
1048 }
1049 break;
1050 case 0x4480:
1051 case 0x4484:
1052 case 0x4488:
1053 case 0x448C:
1054 case 0x4490:
1055 case 0x4494:
1056 case 0x4498:
1057 case 0x449C:
1058 case 0x44A0:
1059 case 0x44A4:
1060 case 0x44A8:
1061 case 0x44AC:
1062 case 0x44B0:
1063 case 0x44B4:
1064 case 0x44B8:
1065 case 0x44BC:
1066 /* TX_FORMAT0_[0-15] */
1067 i = (reg - 0x4480) >> 2;
1068 tmp = ib_chunk->kdata[idx] & 0x7FF;
1069 track->textures[i].width = tmp + 1;
1070 tmp = (ib_chunk->kdata[idx] >> 11) & 0x7FF;
1071 track->textures[i].height = tmp + 1;
1072 tmp = (ib_chunk->kdata[idx] >> 26) & 0xF;
1073 track->textures[i].num_levels = tmp;
1074 tmp = ib_chunk->kdata[idx] & (1 << 31);
1075 track->textures[i].use_pitch = !!tmp;
1076 tmp = (ib_chunk->kdata[idx] >> 22) & 0xF;
1077 track->textures[i].txdepth = tmp;
1078 break;
3f8befec
DA
1079 case R300_ZB_ZPASS_ADDR:
1080 r = r100_cs_packet_next_reloc(p, &reloc);
1081 if (r) {
1082 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1083 idx, reg);
1084 r100_cs_dump_packet(p, pkt);
1085 return r;
1086 }
1087 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1088 break;
1089 case 0x4be8:
1090 /* valid register only on RV530 */
1091 if (p->rdev->family == CHIP_RV530)
1092 break;
1093 /* fallthrough do not move */
771fe6b9 1094 default:
068a117c
JG
1095 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
1096 reg, idx);
771fe6b9
JG
1097 return -EINVAL;
1098 }
1099 return 0;
1100}
1101
1102static int r300_packet3_check(struct radeon_cs_parser *p,
1103 struct radeon_cs_packet *pkt)
1104{
1105 struct radeon_cs_chunk *ib_chunk;
551ebd83 1106
771fe6b9 1107 struct radeon_cs_reloc *reloc;
551ebd83 1108 struct r100_cs_track *track;
771fe6b9
JG
1109 volatile uint32_t *ib;
1110 unsigned idx;
1111 unsigned i, c;
1112 int r;
1113
1114 ib = p->ib->ptr;
1115 ib_chunk = &p->chunks[p->chunk_ib_idx];
1116 idx = pkt->idx + 1;
551ebd83 1117 track = (struct r100_cs_track *)p->track;
068a117c 1118 switch(pkt->opcode) {
771fe6b9 1119 case PACKET3_3D_LOAD_VBPNTR:
068a117c
JG
1120 c = ib_chunk->kdata[idx++] & 0x1F;
1121 track->num_arrays = c;
1122 for (i = 0; i < (c - 1); i+=2, idx+=3) {
771fe6b9
JG
1123 r = r100_cs_packet_next_reloc(p, &reloc);
1124 if (r) {
1125 DRM_ERROR("No reloc for packet3 %d\n",
1126 pkt->opcode);
1127 r100_cs_dump_packet(p, pkt);
1128 return r;
1129 }
1130 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
068a117c
JG
1131 track->arrays[i + 0].robj = reloc->robj;
1132 track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
1133 track->arrays[i + 0].esize &= 0x7F;
771fe6b9
JG
1134 r = r100_cs_packet_next_reloc(p, &reloc);
1135 if (r) {
1136 DRM_ERROR("No reloc for packet3 %d\n",
1137 pkt->opcode);
1138 r100_cs_dump_packet(p, pkt);
1139 return r;
1140 }
1141 ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
068a117c
JG
1142 track->arrays[i + 1].robj = reloc->robj;
1143 track->arrays[i + 1].esize = ib_chunk->kdata[idx] >> 24;
1144 track->arrays[i + 1].esize &= 0x7F;
771fe6b9
JG
1145 }
1146 if (c & 1) {
1147 r = r100_cs_packet_next_reloc(p, &reloc);
1148 if (r) {
1149 DRM_ERROR("No reloc for packet3 %d\n",
1150 pkt->opcode);
1151 r100_cs_dump_packet(p, pkt);
1152 return r;
1153 }
1154 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
068a117c
JG
1155 track->arrays[i + 0].robj = reloc->robj;
1156 track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
1157 track->arrays[i + 0].esize &= 0x7F;
771fe6b9
JG
1158 }
1159 break;
1160 case PACKET3_INDX_BUFFER:
1161 r = r100_cs_packet_next_reloc(p, &reloc);
1162 if (r) {
1163 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1164 r100_cs_dump_packet(p, pkt);
1165 return r;
1166 }
1167 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
068a117c
JG
1168 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1169 if (r) {
1170 return r;
1171 }
771fe6b9
JG
1172 break;
1173 /* Draw packet */
771fe6b9 1174 case PACKET3_3D_DRAW_IMMD:
068a117c
JG
1175 /* Number of dwords is vtx_size * (num_vertices - 1)
1176 * PRIM_WALK must be equal to 3 vertex data in embedded
1177 * in cmd stream */
1178 if (((ib_chunk->kdata[idx+1] >> 4) & 0x3) != 3) {
1179 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1180 return -EINVAL;
1181 }
1182 track->vap_vf_cntl = ib_chunk->kdata[idx+1];
1183 track->immd_dwords = pkt->count - 1;
551ebd83 1184 r = r100_cs_track_check(p->rdev, track);
068a117c
JG
1185 if (r) {
1186 return r;
1187 }
1188 break;
771fe6b9 1189 case PACKET3_3D_DRAW_IMMD_2:
068a117c
JG
1190 /* Number of dwords is vtx_size * (num_vertices - 1)
1191 * PRIM_WALK must be equal to 3 vertex data in embedded
1192 * in cmd stream */
1193 if (((ib_chunk->kdata[idx] >> 4) & 0x3) != 3) {
1194 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1195 return -EINVAL;
1196 }
1197 track->vap_vf_cntl = ib_chunk->kdata[idx];
1198 track->immd_dwords = pkt->count;
551ebd83 1199 r = r100_cs_track_check(p->rdev, track);
068a117c
JG
1200 if (r) {
1201 return r;
1202 }
1203 break;
1204 case PACKET3_3D_DRAW_VBUF:
1205 track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
551ebd83 1206 r = r100_cs_track_check(p->rdev, track);
068a117c
JG
1207 if (r) {
1208 return r;
1209 }
1210 break;
1211 case PACKET3_3D_DRAW_VBUF_2:
1212 track->vap_vf_cntl = ib_chunk->kdata[idx];
551ebd83 1213 r = r100_cs_track_check(p->rdev, track);
068a117c
JG
1214 if (r) {
1215 return r;
1216 }
1217 break;
1218 case PACKET3_3D_DRAW_INDX:
1219 track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
551ebd83 1220 r = r100_cs_track_check(p->rdev, track);
068a117c
JG
1221 if (r) {
1222 return r;
1223 }
1224 break;
771fe6b9 1225 case PACKET3_3D_DRAW_INDX_2:
068a117c 1226 track->vap_vf_cntl = ib_chunk->kdata[idx];
551ebd83 1227 r = r100_cs_track_check(p->rdev, track);
771fe6b9
JG
1228 if (r) {
1229 return r;
1230 }
1231 break;
1232 case PACKET3_NOP:
1233 break;
1234 default:
1235 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1236 return -EINVAL;
1237 }
1238 return 0;
1239}
1240
1241int r300_cs_parse(struct radeon_cs_parser *p)
1242{
1243 struct radeon_cs_packet pkt;
551ebd83 1244 struct r100_cs_track track;
771fe6b9
JG
1245 int r;
1246
551ebd83 1247 r100_cs_track_clear(p->rdev, &track);
771fe6b9
JG
1248 p->track = &track;
1249 do {
1250 r = r100_cs_packet_parse(p, &pkt, p->idx);
1251 if (r) {
1252 return r;
1253 }
1254 p->idx += pkt.count + 2;
1255 switch (pkt.type) {
1256 case PACKET_TYPE0:
1257 r = r100_cs_parse_packet0(p, &pkt,
068a117c
JG
1258 p->rdev->config.r300.reg_safe_bm,
1259 p->rdev->config.r300.reg_safe_bm_size,
771fe6b9
JG
1260 &r300_packet0_check);
1261 break;
1262 case PACKET_TYPE2:
1263 break;
1264 case PACKET_TYPE3:
1265 r = r300_packet3_check(p, &pkt);
1266 break;
1267 default:
1268 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
1269 return -EINVAL;
1270 }
1271 if (r) {
1272 return r;
1273 }
1274 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1275 return 0;
1276}
068a117c
JG
1277
1278int r300_init(struct radeon_device *rdev)
1279{
1280 rdev->config.r300.reg_safe_bm = r300_reg_safe_bm;
1281 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm);
1282 return 0;
1283}