2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/seq_file.h>
31 #include "radeon_reg.h"
34 /* r300,r350,rv350,rv370,rv380 depends on : */
35 void r100_hdp_reset(struct radeon_device *rdev);
36 int r100_cp_reset(struct radeon_device *rdev);
37 int r100_rb2d_reset(struct radeon_device *rdev);
38 int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
39 int r100_pci_gart_enable(struct radeon_device *rdev);
40 void r100_pci_gart_disable(struct radeon_device *rdev);
41 void r100_mc_setup(struct radeon_device *rdev);
42 void r100_mc_disable_clients(struct radeon_device *rdev);
43 int r100_gui_wait_for_idle(struct radeon_device *rdev);
44 int r100_cs_packet_parse(struct radeon_cs_parser *p,
45 struct radeon_cs_packet *pkt,
47 int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
48 struct radeon_cs_reloc **cs_reloc);
49 int r100_cs_parse_packet0(struct radeon_cs_parser *p,
50 struct radeon_cs_packet *pkt,
51 const unsigned *auth, unsigned n,
52 radeon_packet0_check_t check);
53 void r100_cs_dump_packet(struct radeon_cs_parser *p,
54 struct radeon_cs_packet *pkt);
55 int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
56 struct radeon_cs_packet *pkt,
57 struct radeon_object *robj);
59 /* This files gather functions specifics to:
60 * r300,r350,rv350,rv370,rv380
62 * Some of these functions might be used by newer ASICs.
64 void r300_gpu_init(struct radeon_device *rdev);
65 int r300_mc_wait_for_idle(struct radeon_device *rdev);
66 int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
70 * rv370,rv380 PCIE GART
72 void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
77 /* Workaround HW bug do flush 2 times */
78 for (i = 0; i < 2; i++) {
79 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
80 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB);
81 (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
82 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
87 int rv370_pcie_gart_enable(struct radeon_device *rdev)
93 /* Initialize common gart structure */
94 r = radeon_gart_init(rdev);
98 r = rv370_debugfs_pcie_gart_info_init(rdev);
100 DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
102 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
103 r = radeon_gart_table_vram_alloc(rdev);
107 /* discard memory request outside of configured range */
108 tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
109 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
110 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location);
111 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 4096;
112 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
113 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
114 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
115 table_addr = rdev->gart.table_addr;
116 WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr);
117 /* FIXME: setup default page */
118 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_location);
119 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
121 WREG32_PCIE(0x18, 0);
122 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
123 tmp |= RADEON_PCIE_TX_GART_EN;
124 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
125 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
126 rv370_pcie_gart_tlb_flush(rdev);
127 DRM_INFO("PCIE GART of %uM enabled (table at 0x%08X).\n",
128 rdev->mc.gtt_size >> 20, table_addr);
129 rdev->gart.ready = true;
133 void rv370_pcie_gart_disable(struct radeon_device *rdev)
137 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
138 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
139 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
140 if (rdev->gart.table.vram.robj) {
141 radeon_object_kunmap(rdev->gart.table.vram.robj);
142 radeon_object_unpin(rdev->gart.table.vram.robj);
146 int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
148 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
150 if (i < 0 || i > rdev->gart.num_gpu_pages) {
153 addr = (lower_32_bits(addr) >> 8) |
154 ((upper_32_bits(addr) & 0xff) << 24) |
156 writel(cpu_to_le32(addr), ((void __iomem *)ptr) + (i * 4));
160 int r300_gart_enable(struct radeon_device *rdev)
163 if (rdev->flags & RADEON_IS_AGP) {
164 if (rdev->family > CHIP_RV350) {
165 rv370_pcie_gart_disable(rdev);
167 r100_pci_gart_disable(rdev);
172 if (rdev->flags & RADEON_IS_PCIE) {
173 rdev->asic->gart_disable = &rv370_pcie_gart_disable;
174 rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
175 rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
176 return rv370_pcie_gart_enable(rdev);
178 return r100_pci_gart_enable(rdev);
185 int r300_mc_init(struct radeon_device *rdev)
189 if (r100_debugfs_rbbm_init(rdev)) {
190 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
194 r100_pci_gart_disable(rdev);
195 if (rdev->flags & RADEON_IS_PCIE) {
196 rv370_pcie_gart_disable(rdev);
199 /* Setup GPU memory space */
200 rdev->mc.vram_location = 0xFFFFFFFFUL;
201 rdev->mc.gtt_location = 0xFFFFFFFFUL;
202 if (rdev->flags & RADEON_IS_AGP) {
203 r = radeon_agp_init(rdev);
205 printk(KERN_WARNING "[drm] Disabling AGP\n");
206 rdev->flags &= ~RADEON_IS_AGP;
207 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
209 rdev->mc.gtt_location = rdev->mc.agp_base;
212 r = radeon_mc_setup(rdev);
217 /* Program GPU memory space */
218 r100_mc_disable_clients(rdev);
219 if (r300_mc_wait_for_idle(rdev)) {
220 printk(KERN_WARNING "Failed to wait MC idle while "
221 "programming pipes. Bad things might happen.\n");
227 void r300_mc_fini(struct radeon_device *rdev)
229 if (rdev->flags & RADEON_IS_PCIE) {
230 rv370_pcie_gart_disable(rdev);
231 radeon_gart_table_vram_free(rdev);
233 r100_pci_gart_disable(rdev);
234 radeon_gart_table_ram_free(rdev);
236 radeon_gart_fini(rdev);
243 void r300_fence_ring_emit(struct radeon_device *rdev,
244 struct radeon_fence *fence)
246 /* Who ever call radeon_fence_emit should call ring_lock and ask
247 * for enough space (today caller are ib schedule and buffer move) */
248 /* Write SC register so SC & US assert idle */
249 radeon_ring_write(rdev, PACKET0(0x43E0, 0));
250 radeon_ring_write(rdev, 0);
251 radeon_ring_write(rdev, PACKET0(0x43E4, 0));
252 radeon_ring_write(rdev, 0);
254 radeon_ring_write(rdev, PACKET0(0x4E4C, 0));
255 radeon_ring_write(rdev, (2 << 0));
256 radeon_ring_write(rdev, PACKET0(0x4F18, 0));
257 radeon_ring_write(rdev, (1 << 0));
258 /* Wait until IDLE & CLEAN */
259 radeon_ring_write(rdev, PACKET0(0x1720, 0));
260 radeon_ring_write(rdev, (1 << 17) | (1 << 16) | (1 << 9));
261 /* Emit fence sequence & fire IRQ */
262 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
263 radeon_ring_write(rdev, fence->seq);
264 radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
265 radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
270 * Global GPU functions
272 int r300_copy_dma(struct radeon_device *rdev,
276 struct radeon_fence *fence)
283 /* radeon pitch is /64 */
284 size = num_pages << PAGE_SHIFT;
285 num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
286 r = radeon_ring_lock(rdev, num_loops * 4 + 64);
288 DRM_ERROR("radeon: moving bo (%d).\n", r);
291 /* Must wait for 2D idle & clean before DMA or hangs might happen */
292 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0 ));
293 radeon_ring_write(rdev, (1 << 16));
294 for (i = 0; i < num_loops; i++) {
296 if (cur_size > 0x1FFFFF) {
300 radeon_ring_write(rdev, PACKET0(0x720, 2));
301 radeon_ring_write(rdev, src_offset);
302 radeon_ring_write(rdev, dst_offset);
303 radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
304 src_offset += cur_size;
305 dst_offset += cur_size;
307 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
308 radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
310 r = radeon_fence_emit(rdev, fence);
312 radeon_ring_unlock_commit(rdev);
316 void r300_ring_start(struct radeon_device *rdev)
318 unsigned gb_tile_config;
321 /* Sub pixel 1/12 so we can have 4K rendering according to doc */
322 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
323 switch(rdev->num_gb_pipes) {
325 gb_tile_config |= R300_PIPE_COUNT_R300;
328 gb_tile_config |= R300_PIPE_COUNT_R420_3P;
331 gb_tile_config |= R300_PIPE_COUNT_R420;
335 gb_tile_config |= R300_PIPE_COUNT_RV350;
339 r = radeon_ring_lock(rdev, 64);
343 radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
344 radeon_ring_write(rdev,
345 RADEON_ISYNC_ANY2D_IDLE3D |
346 RADEON_ISYNC_ANY3D_IDLE2D |
347 RADEON_ISYNC_WAIT_IDLEGUI |
348 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
349 radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0));
350 radeon_ring_write(rdev, gb_tile_config);
351 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
352 radeon_ring_write(rdev,
353 RADEON_WAIT_2D_IDLECLEAN |
354 RADEON_WAIT_3D_IDLECLEAN);
355 radeon_ring_write(rdev, PACKET0(0x170C, 0));
356 radeon_ring_write(rdev, 1 << 31);
357 radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
358 radeon_ring_write(rdev, 0);
359 radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
360 radeon_ring_write(rdev, 0);
361 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
362 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
363 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
364 radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
365 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
366 radeon_ring_write(rdev,
367 RADEON_WAIT_2D_IDLECLEAN |
368 RADEON_WAIT_3D_IDLECLEAN);
369 radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0));
370 radeon_ring_write(rdev, 0);
371 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
372 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
373 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
374 radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
375 radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0));
376 radeon_ring_write(rdev,
377 ((6 << R300_MS_X0_SHIFT) |
378 (6 << R300_MS_Y0_SHIFT) |
379 (6 << R300_MS_X1_SHIFT) |
380 (6 << R300_MS_Y1_SHIFT) |
381 (6 << R300_MS_X2_SHIFT) |
382 (6 << R300_MS_Y2_SHIFT) |
383 (6 << R300_MSBD0_Y_SHIFT) |
384 (6 << R300_MSBD0_X_SHIFT)));
385 radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0));
386 radeon_ring_write(rdev,
387 ((6 << R300_MS_X3_SHIFT) |
388 (6 << R300_MS_Y3_SHIFT) |
389 (6 << R300_MS_X4_SHIFT) |
390 (6 << R300_MS_Y4_SHIFT) |
391 (6 << R300_MS_X5_SHIFT) |
392 (6 << R300_MS_Y5_SHIFT) |
393 (6 << R300_MSBD1_SHIFT)));
394 radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0));
395 radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
396 radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0));
397 radeon_ring_write(rdev,
398 R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE);
399 radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0));
400 radeon_ring_write(rdev,
401 R300_GEOMETRY_ROUND_NEAREST |
402 R300_COLOR_ROUND_NEAREST);
403 radeon_ring_unlock_commit(rdev);
406 void r300_errata(struct radeon_device *rdev)
408 rdev->pll_errata = 0;
410 if (rdev->family == CHIP_R300 &&
411 (RREG32(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) == RADEON_CFG_ATI_REV_A11) {
412 rdev->pll_errata |= CHIP_ERRATA_R300_CG;
416 int r300_mc_wait_for_idle(struct radeon_device *rdev)
421 for (i = 0; i < rdev->usec_timeout; i++) {
423 tmp = RREG32(0x0150);
424 if (tmp & (1 << 4)) {
432 void r300_gpu_init(struct radeon_device *rdev)
434 uint32_t gb_tile_config, tmp;
436 r100_hdp_reset(rdev);
437 /* FIXME: rv380 one pipes ? */
438 if ((rdev->family == CHIP_R300) || (rdev->family == CHIP_R350)) {
440 rdev->num_gb_pipes = 2;
442 /* rv350,rv370,rv380 */
443 rdev->num_gb_pipes = 1;
445 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
446 switch (rdev->num_gb_pipes) {
448 gb_tile_config |= R300_PIPE_COUNT_R300;
451 gb_tile_config |= R300_PIPE_COUNT_R420_3P;
454 gb_tile_config |= R300_PIPE_COUNT_R420;
458 gb_tile_config |= R300_PIPE_COUNT_RV350;
461 WREG32(R300_GB_TILE_CONFIG, gb_tile_config);
463 if (r100_gui_wait_for_idle(rdev)) {
464 printk(KERN_WARNING "Failed to wait GUI idle while "
465 "programming pipes. Bad things might happen.\n");
468 tmp = RREG32(0x170C);
469 WREG32(0x170C, tmp | (1 << 31));
471 WREG32(R300_RB2D_DSTCACHE_MODE,
472 R300_DC_AUTOFLUSH_ENABLE |
473 R300_DC_DC_DISABLE_IGNORE_PE);
475 if (r100_gui_wait_for_idle(rdev)) {
476 printk(KERN_WARNING "Failed to wait GUI idle while "
477 "programming pipes. Bad things might happen.\n");
479 if (r300_mc_wait_for_idle(rdev)) {
480 printk(KERN_WARNING "Failed to wait MC idle while "
481 "programming pipes. Bad things might happen.\n");
483 DRM_INFO("radeon: %d pipes initialized.\n", rdev->num_gb_pipes);
486 int r300_ga_reset(struct radeon_device *rdev)
492 reinit_cp = rdev->cp.ready;
493 rdev->cp.ready = false;
494 for (i = 0; i < rdev->usec_timeout; i++) {
495 WREG32(RADEON_CP_CSQ_MODE, 0);
496 WREG32(RADEON_CP_CSQ_CNTL, 0);
497 WREG32(RADEON_RBBM_SOFT_RESET, 0x32005);
498 (void)RREG32(RADEON_RBBM_SOFT_RESET);
500 WREG32(RADEON_RBBM_SOFT_RESET, 0);
501 /* Wait to prevent race in RBBM_STATUS */
503 tmp = RREG32(RADEON_RBBM_STATUS);
504 if (tmp & ((1 << 20) | (1 << 26))) {
505 DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)", tmp);
506 /* GA still busy soft reset it */
507 WREG32(0x429C, 0x200);
508 WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0);
513 /* Wait to prevent race in RBBM_STATUS */
515 tmp = RREG32(RADEON_RBBM_STATUS);
516 if (!(tmp & ((1 << 20) | (1 << 26)))) {
520 for (i = 0; i < rdev->usec_timeout; i++) {
521 tmp = RREG32(RADEON_RBBM_STATUS);
522 if (!(tmp & ((1 << 20) | (1 << 26)))) {
523 DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
526 return r100_cp_init(rdev, rdev->cp.ring_size);
532 tmp = RREG32(RADEON_RBBM_STATUS);
533 DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
537 int r300_gpu_reset(struct radeon_device *rdev)
541 /* reset order likely matter */
542 status = RREG32(RADEON_RBBM_STATUS);
544 r100_hdp_reset(rdev);
546 if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
547 r100_rb2d_reset(rdev);
550 if (status & ((1 << 20) | (1 << 26))) {
554 status = RREG32(RADEON_RBBM_STATUS);
555 if (status & (1 << 16)) {
558 /* Check if GPU is idle */
559 status = RREG32(RADEON_RBBM_STATUS);
560 if (status & (1 << 31)) {
561 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
564 DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
570 * r300,r350,rv350,rv380 VRAM info
572 void r300_vram_info(struct radeon_device *rdev)
576 /* DDR for all card after R300 & IGP */
577 rdev->mc.vram_is_ddr = true;
578 tmp = RREG32(RADEON_MEM_CNTL);
579 if (tmp & R300_MEM_NUM_CHANNELS_MASK) {
580 rdev->mc.vram_width = 128;
582 rdev->mc.vram_width = 64;
584 rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
586 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
587 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
592 * Indirect registers accessor
594 uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
598 WREG8(RADEON_PCIE_INDEX, ((reg) & 0xff));
599 (void)RREG32(RADEON_PCIE_INDEX);
600 r = RREG32(RADEON_PCIE_DATA);
604 void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
606 WREG8(RADEON_PCIE_INDEX, ((reg) & 0xff));
607 (void)RREG32(RADEON_PCIE_INDEX);
608 WREG32(RADEON_PCIE_DATA, (v));
609 (void)RREG32(RADEON_PCIE_DATA);
616 void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
618 uint32_t link_width_cntl, mask;
620 if (rdev->flags & RADEON_IS_IGP)
623 if (!(rdev->flags & RADEON_IS_PCIE))
626 /* FIXME wait for idle */
630 mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
633 mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
636 mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
639 mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
642 mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
645 mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
649 mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
653 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
655 if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
656 (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
659 link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
660 RADEON_PCIE_LC_RECONFIG_NOW |
661 RADEON_PCIE_LC_RECONFIG_LATER |
662 RADEON_PCIE_LC_SHORT_RECONFIG_EN);
663 link_width_cntl |= mask;
664 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
665 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
666 RADEON_PCIE_LC_RECONFIG_NOW));
668 /* wait for lane set to complete */
669 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
670 while (link_width_cntl == 0xffffffff)
671 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
679 #if defined(CONFIG_DEBUG_FS)
680 static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data)
682 struct drm_info_node *node = (struct drm_info_node *) m->private;
683 struct drm_device *dev = node->minor->dev;
684 struct radeon_device *rdev = dev->dev_private;
687 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
688 seq_printf(m, "PCIE_TX_GART_CNTL 0x%08x\n", tmp);
689 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_BASE);
690 seq_printf(m, "PCIE_TX_GART_BASE 0x%08x\n", tmp);
691 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_LO);
692 seq_printf(m, "PCIE_TX_GART_START_LO 0x%08x\n", tmp);
693 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_HI);
694 seq_printf(m, "PCIE_TX_GART_START_HI 0x%08x\n", tmp);
695 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_LO);
696 seq_printf(m, "PCIE_TX_GART_END_LO 0x%08x\n", tmp);
697 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_HI);
698 seq_printf(m, "PCIE_TX_GART_END_HI 0x%08x\n", tmp);
699 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_ERROR);
700 seq_printf(m, "PCIE_TX_GART_ERROR 0x%08x\n", tmp);
704 static struct drm_info_list rv370_pcie_gart_info_list[] = {
705 {"rv370_pcie_gart_info", rv370_debugfs_pcie_gart_info, 0, NULL},
709 int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
711 #if defined(CONFIG_DEBUG_FS)
712 return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1);
722 struct r300_cs_track_cb {
723 struct radeon_object *robj;
729 struct r300_cs_track_array {
730 struct radeon_object *robj;
734 struct r300_cs_track_texture {
735 struct radeon_object *robj;
741 unsigned tex_coord_type;
751 struct r300_cs_track {
755 unsigned vap_vf_cntl;
756 unsigned immd_dwords;
759 struct r300_cs_track_array arrays[11];
760 struct r300_cs_track_cb cb[4];
761 struct r300_cs_track_cb zb;
762 struct r300_cs_track_texture textures[16];
766 static inline void r300_cs_track_texture_print(struct r300_cs_track_texture *t)
768 DRM_ERROR("pitch %d\n", t->pitch);
769 DRM_ERROR("width %d\n", t->width);
770 DRM_ERROR("height %d\n", t->height);
771 DRM_ERROR("num levels %d\n", t->num_levels);
772 DRM_ERROR("depth %d\n", t->txdepth);
773 DRM_ERROR("bpp %d\n", t->cpp);
774 DRM_ERROR("coordinate type %d\n", t->tex_coord_type);
775 DRM_ERROR("width round to power of 2 %d\n", t->roundup_w);
776 DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
779 static inline int r300_cs_track_texture_check(struct radeon_device *rdev,
780 struct r300_cs_track *track)
782 struct radeon_object *robj;
786 for (u = 0; u < 16; u++) {
787 if (!track->textures[u].enabled)
789 robj = track->textures[u].robj;
791 DRM_ERROR("No texture bound to unit %u\n", u);
795 for (i = 0; i <= track->textures[u].num_levels; i++) {
796 if (track->textures[u].use_pitch) {
797 w = track->textures[u].pitch / (1 << i);
799 w = track->textures[u].width / (1 << i);
800 if (rdev->family >= CHIP_RV515)
801 w |= track->textures[u].width_11;
802 if (track->textures[u].roundup_w)
803 w = roundup_pow_of_two(w);
805 h = track->textures[u].height / (1 << i);
806 if (rdev->family >= CHIP_RV515)
807 h |= track->textures[u].height_11;
808 if (track->textures[u].roundup_h)
809 h = roundup_pow_of_two(h);
812 size *= track->textures[u].cpp;
813 switch (track->textures[u].tex_coord_type) {
817 size *= (1 << track->textures[u].txdepth);
823 DRM_ERROR("Invalid texture coordinate type %u for unit "
824 "%u\n", track->textures[u].tex_coord_type, u);
827 if (size > radeon_object_size(robj)) {
828 DRM_ERROR("Texture of unit %u needs %lu bytes but is "
829 "%lu\n", u, size, radeon_object_size(robj));
830 r300_cs_track_texture_print(&track->textures[u]);
837 int r300_cs_track_check(struct radeon_device *rdev, struct r300_cs_track *track)
844 for (i = 0; i < track->num_cb; i++) {
845 if (track->cb[i].robj == NULL) {
846 DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
849 size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
850 size += track->cb[i].offset;
851 if (size > radeon_object_size(track->cb[i].robj)) {
852 DRM_ERROR("[drm] Buffer too small for color buffer %d "
853 "(need %lu have %lu) !\n", i, size,
854 radeon_object_size(track->cb[i].robj));
855 DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
856 i, track->cb[i].pitch, track->cb[i].cpp,
857 track->cb[i].offset, track->maxy);
861 if (track->z_enabled) {
862 if (track->zb.robj == NULL) {
863 DRM_ERROR("[drm] No buffer for z buffer !\n");
866 size = track->zb.pitch * track->zb.cpp * track->maxy;
867 size += track->zb.offset;
868 if (size > radeon_object_size(track->zb.robj)) {
869 DRM_ERROR("[drm] Buffer too small for z buffer "
870 "(need %lu have %lu) !\n", size,
871 radeon_object_size(track->zb.robj));
875 prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
876 nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
879 for (i = 0; i < track->num_arrays; i++) {
880 size = track->arrays[i].esize * track->max_indx * 4;
881 if (track->arrays[i].robj == NULL) {
882 DRM_ERROR("(PW %u) Vertex array %u no buffer "
883 "bound\n", prim_walk, i);
886 if (size > radeon_object_size(track->arrays[i].robj)) {
887 DRM_ERROR("(PW %u) Vertex array %u need %lu dwords "
888 "have %lu dwords\n", prim_walk, i,
890 radeon_object_size(track->arrays[i].robj) >> 2);
891 DRM_ERROR("Max indices %u\n", track->max_indx);
897 for (i = 0; i < track->num_arrays; i++) {
898 size = track->arrays[i].esize * (nverts - 1) * 4;
899 if (track->arrays[i].robj == NULL) {
900 DRM_ERROR("(PW %u) Vertex array %u no buffer "
901 "bound\n", prim_walk, i);
904 if (size > radeon_object_size(track->arrays[i].robj)) {
905 DRM_ERROR("(PW %u) Vertex array %u need %lu dwords "
906 "have %lu dwords\n", prim_walk, i, size >> 2,
907 radeon_object_size(track->arrays[i].robj) >> 2);
913 size = track->vtx_size * nverts;
914 if (size != track->immd_dwords) {
915 DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
916 track->immd_dwords, size);
917 DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
918 nverts, track->vtx_size);
923 DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
927 return r300_cs_track_texture_check(rdev, track);
930 static inline void r300_cs_track_clear(struct r300_cs_track *track)
936 for (i = 0; i < track->num_cb; i++) {
937 track->cb[i].robj = NULL;
938 track->cb[i].pitch = 8192;
939 track->cb[i].cpp = 16;
940 track->cb[i].offset = 0;
942 track->z_enabled = true;
943 track->zb.robj = NULL;
944 track->zb.pitch = 8192;
946 track->zb.offset = 0;
947 track->vtx_size = 0x7F;
948 track->immd_dwords = 0xFFFFFFFFUL;
949 track->num_arrays = 11;
950 track->max_indx = 0x00FFFFFFUL;
951 for (i = 0; i < track->num_arrays; i++) {
952 track->arrays[i].robj = NULL;
953 track->arrays[i].esize = 0x7F;
955 for (i = 0; i < 16; i++) {
956 track->textures[i].pitch = 16536;
957 track->textures[i].width = 16536;
958 track->textures[i].height = 16536;
959 track->textures[i].width_11 = 1 << 11;
960 track->textures[i].height_11 = 1 << 11;
961 track->textures[i].num_levels = 12;
962 track->textures[i].txdepth = 16;
963 track->textures[i].cpp = 64;
964 track->textures[i].tex_coord_type = 1;
965 track->textures[i].robj = NULL;
966 /* CS IB emission code makes sure texture unit are disabled */
967 track->textures[i].enabled = false;
968 track->textures[i].roundup_w = true;
969 track->textures[i].roundup_h = true;
973 static const unsigned r300_reg_safe_bm[159] = {
974 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
975 0xFFFFFFBF, 0xFFFFFFFF, 0xFFFFFFBF, 0xFFFFFFFF,
976 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
977 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
978 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
979 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
980 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
981 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
982 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
983 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
984 0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF,
985 0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF,
986 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
987 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
988 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F,
989 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
990 0xFFFFFFFF, 0xFFFFEFCE, 0xF00EBFFF, 0x007C0000,
991 0xF0000078, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF,
992 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
993 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
994 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
995 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
996 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
997 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
998 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
999 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
1000 0xFFFFF7FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
1001 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
1002 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
1003 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
1004 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
1005 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
1006 0xFFFFFC78, 0xFFFFFFFF, 0xFFFFFFFE, 0xFFFFFFFF,
1007 0x38FF8F50, 0xFFF88082, 0xF000000C, 0xFAE009FF,
1008 0x0000FFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000,
1009 0x00000000, 0x0000C100, 0x00000000, 0x00000000,
1010 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1011 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFF80FFFF,
1012 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1013 0x0003FC01, 0xFFFFFFF8, 0xFE800B19,
1016 static int r300_packet0_check(struct radeon_cs_parser *p,
1017 struct radeon_cs_packet *pkt,
1018 unsigned idx, unsigned reg)
1020 struct radeon_cs_chunk *ib_chunk;
1021 struct radeon_cs_reloc *reloc;
1022 struct r300_cs_track *track;
1023 volatile uint32_t *ib;
1029 ib_chunk = &p->chunks[p->chunk_ib_idx];
1030 track = (struct r300_cs_track*)p->track;
1032 case RADEON_DST_PITCH_OFFSET:
1033 case RADEON_SRC_PITCH_OFFSET:
1034 r = r100_cs_packet_next_reloc(p, &reloc);
1036 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1038 r100_cs_dump_packet(p, pkt);
1041 tmp = ib_chunk->kdata[idx] & 0x003fffff;
1042 tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
1043 ib[idx] = (ib_chunk->kdata[idx] & 0xffc00000) | tmp;
1045 case R300_RB3D_COLOROFFSET0:
1046 case R300_RB3D_COLOROFFSET1:
1047 case R300_RB3D_COLOROFFSET2:
1048 case R300_RB3D_COLOROFFSET3:
1049 i = (reg - R300_RB3D_COLOROFFSET0) >> 2;
1050 r = r100_cs_packet_next_reloc(p, &reloc);
1052 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1054 r100_cs_dump_packet(p, pkt);
1057 track->cb[i].robj = reloc->robj;
1058 track->cb[i].offset = ib_chunk->kdata[idx];
1059 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1061 case R300_ZB_DEPTHOFFSET:
1062 r = r100_cs_packet_next_reloc(p, &reloc);
1064 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1066 r100_cs_dump_packet(p, pkt);
1069 track->zb.robj = reloc->robj;
1070 track->zb.offset = ib_chunk->kdata[idx];
1071 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1073 case R300_TX_OFFSET_0:
1074 case R300_TX_OFFSET_0+4:
1075 case R300_TX_OFFSET_0+8:
1076 case R300_TX_OFFSET_0+12:
1077 case R300_TX_OFFSET_0+16:
1078 case R300_TX_OFFSET_0+20:
1079 case R300_TX_OFFSET_0+24:
1080 case R300_TX_OFFSET_0+28:
1081 case R300_TX_OFFSET_0+32:
1082 case R300_TX_OFFSET_0+36:
1083 case R300_TX_OFFSET_0+40:
1084 case R300_TX_OFFSET_0+44:
1085 case R300_TX_OFFSET_0+48:
1086 case R300_TX_OFFSET_0+52:
1087 case R300_TX_OFFSET_0+56:
1088 case R300_TX_OFFSET_0+60:
1089 i = (reg - R300_TX_OFFSET_0) >> 2;
1090 r = r100_cs_packet_next_reloc(p, &reloc);
1092 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1094 r100_cs_dump_packet(p, pkt);
1097 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1098 track->textures[i].robj = reloc->robj;
1100 /* Tracked registers */
1103 track->vap_vf_cntl = ib_chunk->kdata[idx];
1107 track->vtx_size = ib_chunk->kdata[idx] & 0x7F;
1110 /* VAP_VF_MAX_VTX_INDX */
1111 track->max_indx = ib_chunk->kdata[idx] & 0x00FFFFFFUL;
1115 track->maxy = ((ib_chunk->kdata[idx] >> 13) & 0x1FFF) + 1;
1116 if (p->rdev->family < CHIP_RV515) {
1117 track->maxy -= 1440;
1122 track->num_cb = ((ib_chunk->kdata[idx] >> 5) & 0x3) + 1;
1128 /* RB3D_COLORPITCH0 */
1129 /* RB3D_COLORPITCH1 */
1130 /* RB3D_COLORPITCH2 */
1131 /* RB3D_COLORPITCH3 */
1132 i = (reg - 0x4E38) >> 2;
1133 track->cb[i].pitch = ib_chunk->kdata[idx] & 0x3FFE;
1134 switch (((ib_chunk->kdata[idx] >> 21) & 0xF)) {
1138 track->cb[i].cpp = 1;
1144 track->cb[i].cpp = 2;
1147 track->cb[i].cpp = 4;
1150 track->cb[i].cpp = 8;
1153 track->cb[i].cpp = 16;
1156 DRM_ERROR("Invalid color buffer format (%d) !\n",
1157 ((ib_chunk->kdata[idx] >> 21) & 0xF));
1163 if (ib_chunk->kdata[idx] & 2) {
1164 track->z_enabled = true;
1166 track->z_enabled = false;
1171 switch ((ib_chunk->kdata[idx] & 0xF)) {
1180 DRM_ERROR("Invalid z buffer format (%d) !\n",
1181 (ib_chunk->kdata[idx] & 0xF));
1187 track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC;
1190 for (i = 0; i < 16; i++) {
1193 enabled = !!(ib_chunk->kdata[idx] & (1 << i));
1194 track->textures[i].enabled = enabled;
1213 /* TX_FORMAT1_[0-15] */
1214 i = (reg - 0x44C0) >> 2;
1215 tmp = (ib_chunk->kdata[idx] >> 25) & 0x3;
1216 track->textures[i].tex_coord_type = tmp;
1217 switch ((ib_chunk->kdata[idx] & 0x1F)) {
1224 track->textures[i].cpp = 1;
1235 track->textures[i].cpp = 2;
1246 track->textures[i].cpp = 4;
1251 track->textures[i].cpp = 8;
1254 track->textures[i].cpp = 16;
1257 DRM_ERROR("Invalid texture format %u\n",
1258 (ib_chunk->kdata[idx] & 0x1F));
1279 /* TX_FILTER0_[0-15] */
1280 i = (reg - 0x4400) >> 2;
1281 tmp = ib_chunk->kdata[idx] & 0x7;;
1282 if (tmp == 2 || tmp == 4 || tmp == 6) {
1283 track->textures[i].roundup_w = false;
1285 tmp = (ib_chunk->kdata[idx] >> 3) & 0x7;;
1286 if (tmp == 2 || tmp == 4 || tmp == 6) {
1287 track->textures[i].roundup_h = false;
1306 /* TX_FORMAT2_[0-15] */
1307 i = (reg - 0x4500) >> 2;
1308 tmp = ib_chunk->kdata[idx] & 0x3FFF;
1309 track->textures[i].pitch = tmp + 1;
1310 if (p->rdev->family >= CHIP_RV515) {
1311 tmp = ((ib_chunk->kdata[idx] >> 15) & 1) << 11;
1312 track->textures[i].width_11 = tmp;
1313 tmp = ((ib_chunk->kdata[idx] >> 16) & 1) << 11;
1314 track->textures[i].height_11 = tmp;
1333 /* TX_FORMAT0_[0-15] */
1334 i = (reg - 0x4480) >> 2;
1335 tmp = ib_chunk->kdata[idx] & 0x7FF;
1336 track->textures[i].width = tmp + 1;
1337 tmp = (ib_chunk->kdata[idx] >> 11) & 0x7FF;
1338 track->textures[i].height = tmp + 1;
1339 tmp = (ib_chunk->kdata[idx] >> 26) & 0xF;
1340 track->textures[i].num_levels = tmp;
1341 tmp = ib_chunk->kdata[idx] & (1 << 31);
1342 track->textures[i].use_pitch = !!tmp;
1343 tmp = (ib_chunk->kdata[idx] >> 22) & 0xF;
1344 track->textures[i].txdepth = tmp;
1347 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
1354 static int r300_packet3_check(struct radeon_cs_parser *p,
1355 struct radeon_cs_packet *pkt)
1357 struct radeon_cs_chunk *ib_chunk;
1358 struct radeon_cs_reloc *reloc;
1359 struct r300_cs_track *track;
1360 volatile uint32_t *ib;
1366 ib_chunk = &p->chunks[p->chunk_ib_idx];
1368 track = (struct r300_cs_track*)p->track;
1369 switch(pkt->opcode) {
1370 case PACKET3_3D_LOAD_VBPNTR:
1371 c = ib_chunk->kdata[idx++] & 0x1F;
1372 track->num_arrays = c;
1373 for (i = 0; i < (c - 1); i+=2, idx+=3) {
1374 r = r100_cs_packet_next_reloc(p, &reloc);
1376 DRM_ERROR("No reloc for packet3 %d\n",
1378 r100_cs_dump_packet(p, pkt);
1381 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1382 track->arrays[i + 0].robj = reloc->robj;
1383 track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
1384 track->arrays[i + 0].esize &= 0x7F;
1385 r = r100_cs_packet_next_reloc(p, &reloc);
1387 DRM_ERROR("No reloc for packet3 %d\n",
1389 r100_cs_dump_packet(p, pkt);
1392 ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
1393 track->arrays[i + 1].robj = reloc->robj;
1394 track->arrays[i + 1].esize = ib_chunk->kdata[idx] >> 24;
1395 track->arrays[i + 1].esize &= 0x7F;
1398 r = r100_cs_packet_next_reloc(p, &reloc);
1400 DRM_ERROR("No reloc for packet3 %d\n",
1402 r100_cs_dump_packet(p, pkt);
1405 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1406 track->arrays[i + 0].robj = reloc->robj;
1407 track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
1408 track->arrays[i + 0].esize &= 0x7F;
1411 case PACKET3_INDX_BUFFER:
1412 r = r100_cs_packet_next_reloc(p, &reloc);
1414 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1415 r100_cs_dump_packet(p, pkt);
1418 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1419 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1425 case PACKET3_3D_DRAW_IMMD:
1426 /* Number of dwords is vtx_size * (num_vertices - 1)
1427 * PRIM_WALK must be equal to 3 vertex data in embedded
1429 if (((ib_chunk->kdata[idx+1] >> 4) & 0x3) != 3) {
1430 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1433 track->vap_vf_cntl = ib_chunk->kdata[idx+1];
1434 track->immd_dwords = pkt->count - 1;
1435 r = r300_cs_track_check(p->rdev, track);
1440 case PACKET3_3D_DRAW_IMMD_2:
1441 /* Number of dwords is vtx_size * (num_vertices - 1)
1442 * PRIM_WALK must be equal to 3 vertex data in embedded
1444 if (((ib_chunk->kdata[idx] >> 4) & 0x3) != 3) {
1445 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1448 track->vap_vf_cntl = ib_chunk->kdata[idx];
1449 track->immd_dwords = pkt->count;
1450 r = r300_cs_track_check(p->rdev, track);
1455 case PACKET3_3D_DRAW_VBUF:
1456 track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
1457 r = r300_cs_track_check(p->rdev, track);
1462 case PACKET3_3D_DRAW_VBUF_2:
1463 track->vap_vf_cntl = ib_chunk->kdata[idx];
1464 r = r300_cs_track_check(p->rdev, track);
1469 case PACKET3_3D_DRAW_INDX:
1470 track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
1471 r = r300_cs_track_check(p->rdev, track);
1476 case PACKET3_3D_DRAW_INDX_2:
1477 track->vap_vf_cntl = ib_chunk->kdata[idx];
1478 r = r300_cs_track_check(p->rdev, track);
1486 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1492 int r300_cs_parse(struct radeon_cs_parser *p)
1494 struct radeon_cs_packet pkt;
1495 struct r300_cs_track track;
1498 r300_cs_track_clear(&track);
1501 r = r100_cs_packet_parse(p, &pkt, p->idx);
1505 p->idx += pkt.count + 2;
1508 r = r100_cs_parse_packet0(p, &pkt,
1509 p->rdev->config.r300.reg_safe_bm,
1510 p->rdev->config.r300.reg_safe_bm_size,
1511 &r300_packet0_check);
1516 r = r300_packet3_check(p, &pkt);
1519 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
1525 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1529 int r300_init(struct radeon_device *rdev)
1531 rdev->config.r300.reg_safe_bm = r300_reg_safe_bm;
1532 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm);