2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/seq_file.h>
31 #include "radeon_reg.h"
33 #include "radeon_drm.h"
34 #include "radeon_share.h"
36 /* r300,r350,rv350,rv370,rv380 depends on : */
37 void r100_hdp_reset(struct radeon_device *rdev);
38 int r100_cp_reset(struct radeon_device *rdev);
39 int r100_rb2d_reset(struct radeon_device *rdev);
40 int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
41 int r100_pci_gart_enable(struct radeon_device *rdev);
42 void r100_pci_gart_disable(struct radeon_device *rdev);
43 void r100_mc_setup(struct radeon_device *rdev);
44 void r100_mc_disable_clients(struct radeon_device *rdev);
45 int r100_gui_wait_for_idle(struct radeon_device *rdev);
46 int r100_cs_packet_parse(struct radeon_cs_parser *p,
47 struct radeon_cs_packet *pkt,
49 int r100_cs_packet_parse_vline(struct radeon_cs_parser *p);
50 int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
51 struct radeon_cs_reloc **cs_reloc);
52 int r100_cs_parse_packet0(struct radeon_cs_parser *p,
53 struct radeon_cs_packet *pkt,
54 const unsigned *auth, unsigned n,
55 radeon_packet0_check_t check);
56 void r100_cs_dump_packet(struct radeon_cs_parser *p,
57 struct radeon_cs_packet *pkt);
58 int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
59 struct radeon_cs_packet *pkt,
60 struct radeon_object *robj);
62 /* This files gather functions specifics to:
63 * r300,r350,rv350,rv370,rv380
65 * Some of these functions might be used by newer ASICs.
67 void r300_gpu_init(struct radeon_device *rdev);
68 int r300_mc_wait_for_idle(struct radeon_device *rdev);
69 int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
73 * rv370,rv380 PCIE GART
75 void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
80 /* Workaround HW bug do flush 2 times */
81 for (i = 0; i < 2; i++) {
82 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
83 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB);
84 (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
85 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
90 int rv370_pcie_gart_enable(struct radeon_device *rdev)
96 /* Initialize common gart structure */
97 r = radeon_gart_init(rdev);
101 r = rv370_debugfs_pcie_gart_info_init(rdev);
103 DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
105 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
106 r = radeon_gart_table_vram_alloc(rdev);
110 /* discard memory request outside of configured range */
111 tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
112 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
113 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location);
114 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 4096;
115 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
116 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
117 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
118 table_addr = rdev->gart.table_addr;
119 WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr);
120 /* FIXME: setup default page */
121 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_location);
122 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
124 WREG32_PCIE(0x18, 0);
125 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
126 tmp |= RADEON_PCIE_TX_GART_EN;
127 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
128 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
129 rv370_pcie_gart_tlb_flush(rdev);
130 DRM_INFO("PCIE GART of %uM enabled (table at 0x%08X).\n",
131 rdev->mc.gtt_size >> 20, table_addr);
132 rdev->gart.ready = true;
136 void rv370_pcie_gart_disable(struct radeon_device *rdev)
140 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
141 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
142 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
143 if (rdev->gart.table.vram.robj) {
144 radeon_object_kunmap(rdev->gart.table.vram.robj);
145 radeon_object_unpin(rdev->gart.table.vram.robj);
149 int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
151 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
153 if (i < 0 || i > rdev->gart.num_gpu_pages) {
156 addr = (lower_32_bits(addr) >> 8) |
157 ((upper_32_bits(addr) & 0xff) << 24) |
159 /* on x86 we want this to be CPU endian, on powerpc
160 * on powerpc without HW swappers, it'll get swapped on way
161 * into VRAM - so no need for cpu_to_le32 on VRAM tables */
162 writel(addr, ((void __iomem *)ptr) + (i * 4));
166 int r300_gart_enable(struct radeon_device *rdev)
169 if (rdev->flags & RADEON_IS_AGP) {
170 if (rdev->family > CHIP_RV350) {
171 rv370_pcie_gart_disable(rdev);
173 r100_pci_gart_disable(rdev);
178 if (rdev->flags & RADEON_IS_PCIE) {
179 rdev->asic->gart_disable = &rv370_pcie_gart_disable;
180 rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
181 rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
182 return rv370_pcie_gart_enable(rdev);
184 return r100_pci_gart_enable(rdev);
191 int r300_mc_init(struct radeon_device *rdev)
195 if (r100_debugfs_rbbm_init(rdev)) {
196 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
200 r100_pci_gart_disable(rdev);
201 if (rdev->flags & RADEON_IS_PCIE) {
202 rv370_pcie_gart_disable(rdev);
205 /* Setup GPU memory space */
206 rdev->mc.vram_location = 0xFFFFFFFFUL;
207 rdev->mc.gtt_location = 0xFFFFFFFFUL;
208 if (rdev->flags & RADEON_IS_AGP) {
209 r = radeon_agp_init(rdev);
211 printk(KERN_WARNING "[drm] Disabling AGP\n");
212 rdev->flags &= ~RADEON_IS_AGP;
213 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
215 rdev->mc.gtt_location = rdev->mc.agp_base;
218 r = radeon_mc_setup(rdev);
223 /* Program GPU memory space */
224 r100_mc_disable_clients(rdev);
225 if (r300_mc_wait_for_idle(rdev)) {
226 printk(KERN_WARNING "Failed to wait MC idle while "
227 "programming pipes. Bad things might happen.\n");
233 void r300_mc_fini(struct radeon_device *rdev)
235 if (rdev->flags & RADEON_IS_PCIE) {
236 rv370_pcie_gart_disable(rdev);
237 radeon_gart_table_vram_free(rdev);
239 r100_pci_gart_disable(rdev);
240 radeon_gart_table_ram_free(rdev);
242 radeon_gart_fini(rdev);
249 void r300_fence_ring_emit(struct radeon_device *rdev,
250 struct radeon_fence *fence)
252 /* Who ever call radeon_fence_emit should call ring_lock and ask
253 * for enough space (today caller are ib schedule and buffer move) */
254 /* Write SC register so SC & US assert idle */
255 radeon_ring_write(rdev, PACKET0(0x43E0, 0));
256 radeon_ring_write(rdev, 0);
257 radeon_ring_write(rdev, PACKET0(0x43E4, 0));
258 radeon_ring_write(rdev, 0);
260 radeon_ring_write(rdev, PACKET0(0x4E4C, 0));
261 radeon_ring_write(rdev, (2 << 0));
262 radeon_ring_write(rdev, PACKET0(0x4F18, 0));
263 radeon_ring_write(rdev, (1 << 0));
264 /* Wait until IDLE & CLEAN */
265 radeon_ring_write(rdev, PACKET0(0x1720, 0));
266 radeon_ring_write(rdev, (1 << 17) | (1 << 16) | (1 << 9));
267 /* Emit fence sequence & fire IRQ */
268 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
269 radeon_ring_write(rdev, fence->seq);
270 radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
271 radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
276 * Global GPU functions
278 int r300_copy_dma(struct radeon_device *rdev,
282 struct radeon_fence *fence)
289 /* radeon pitch is /64 */
290 size = num_pages << PAGE_SHIFT;
291 num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
292 r = radeon_ring_lock(rdev, num_loops * 4 + 64);
294 DRM_ERROR("radeon: moving bo (%d).\n", r);
297 /* Must wait for 2D idle & clean before DMA or hangs might happen */
298 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0 ));
299 radeon_ring_write(rdev, (1 << 16));
300 for (i = 0; i < num_loops; i++) {
302 if (cur_size > 0x1FFFFF) {
306 radeon_ring_write(rdev, PACKET0(0x720, 2));
307 radeon_ring_write(rdev, src_offset);
308 radeon_ring_write(rdev, dst_offset);
309 radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
310 src_offset += cur_size;
311 dst_offset += cur_size;
313 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
314 radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
316 r = radeon_fence_emit(rdev, fence);
318 radeon_ring_unlock_commit(rdev);
322 void r300_ring_start(struct radeon_device *rdev)
324 unsigned gb_tile_config;
327 /* Sub pixel 1/12 so we can have 4K rendering according to doc */
328 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
329 switch(rdev->num_gb_pipes) {
331 gb_tile_config |= R300_PIPE_COUNT_R300;
334 gb_tile_config |= R300_PIPE_COUNT_R420_3P;
337 gb_tile_config |= R300_PIPE_COUNT_R420;
341 gb_tile_config |= R300_PIPE_COUNT_RV350;
345 r = radeon_ring_lock(rdev, 64);
349 radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
350 radeon_ring_write(rdev,
351 RADEON_ISYNC_ANY2D_IDLE3D |
352 RADEON_ISYNC_ANY3D_IDLE2D |
353 RADEON_ISYNC_WAIT_IDLEGUI |
354 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
355 radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0));
356 radeon_ring_write(rdev, gb_tile_config);
357 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
358 radeon_ring_write(rdev,
359 RADEON_WAIT_2D_IDLECLEAN |
360 RADEON_WAIT_3D_IDLECLEAN);
361 radeon_ring_write(rdev, PACKET0(0x170C, 0));
362 radeon_ring_write(rdev, 1 << 31);
363 radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
364 radeon_ring_write(rdev, 0);
365 radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
366 radeon_ring_write(rdev, 0);
367 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
368 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
369 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
370 radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
371 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
372 radeon_ring_write(rdev,
373 RADEON_WAIT_2D_IDLECLEAN |
374 RADEON_WAIT_3D_IDLECLEAN);
375 radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0));
376 radeon_ring_write(rdev, 0);
377 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
378 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
379 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
380 radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
381 radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0));
382 radeon_ring_write(rdev,
383 ((6 << R300_MS_X0_SHIFT) |
384 (6 << R300_MS_Y0_SHIFT) |
385 (6 << R300_MS_X1_SHIFT) |
386 (6 << R300_MS_Y1_SHIFT) |
387 (6 << R300_MS_X2_SHIFT) |
388 (6 << R300_MS_Y2_SHIFT) |
389 (6 << R300_MSBD0_Y_SHIFT) |
390 (6 << R300_MSBD0_X_SHIFT)));
391 radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0));
392 radeon_ring_write(rdev,
393 ((6 << R300_MS_X3_SHIFT) |
394 (6 << R300_MS_Y3_SHIFT) |
395 (6 << R300_MS_X4_SHIFT) |
396 (6 << R300_MS_Y4_SHIFT) |
397 (6 << R300_MS_X5_SHIFT) |
398 (6 << R300_MS_Y5_SHIFT) |
399 (6 << R300_MSBD1_SHIFT)));
400 radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0));
401 radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
402 radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0));
403 radeon_ring_write(rdev,
404 R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE);
405 radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0));
406 radeon_ring_write(rdev,
407 R300_GEOMETRY_ROUND_NEAREST |
408 R300_COLOR_ROUND_NEAREST);
409 radeon_ring_unlock_commit(rdev);
412 void r300_errata(struct radeon_device *rdev)
414 rdev->pll_errata = 0;
416 if (rdev->family == CHIP_R300 &&
417 (RREG32(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) == RADEON_CFG_ATI_REV_A11) {
418 rdev->pll_errata |= CHIP_ERRATA_R300_CG;
422 int r300_mc_wait_for_idle(struct radeon_device *rdev)
427 for (i = 0; i < rdev->usec_timeout; i++) {
429 tmp = RREG32(0x0150);
430 if (tmp & (1 << 4)) {
438 void r300_gpu_init(struct radeon_device *rdev)
440 uint32_t gb_tile_config, tmp;
442 r100_hdp_reset(rdev);
443 /* FIXME: rv380 one pipes ? */
444 if ((rdev->family == CHIP_R300) || (rdev->family == CHIP_R350)) {
446 rdev->num_gb_pipes = 2;
448 /* rv350,rv370,rv380 */
449 rdev->num_gb_pipes = 1;
451 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
452 switch (rdev->num_gb_pipes) {
454 gb_tile_config |= R300_PIPE_COUNT_R300;
457 gb_tile_config |= R300_PIPE_COUNT_R420_3P;
460 gb_tile_config |= R300_PIPE_COUNT_R420;
464 gb_tile_config |= R300_PIPE_COUNT_RV350;
467 WREG32(R300_GB_TILE_CONFIG, gb_tile_config);
469 if (r100_gui_wait_for_idle(rdev)) {
470 printk(KERN_WARNING "Failed to wait GUI idle while "
471 "programming pipes. Bad things might happen.\n");
474 tmp = RREG32(0x170C);
475 WREG32(0x170C, tmp | (1 << 31));
477 WREG32(R300_RB2D_DSTCACHE_MODE,
478 R300_DC_AUTOFLUSH_ENABLE |
479 R300_DC_DC_DISABLE_IGNORE_PE);
481 if (r100_gui_wait_for_idle(rdev)) {
482 printk(KERN_WARNING "Failed to wait GUI idle while "
483 "programming pipes. Bad things might happen.\n");
485 if (r300_mc_wait_for_idle(rdev)) {
486 printk(KERN_WARNING "Failed to wait MC idle while "
487 "programming pipes. Bad things might happen.\n");
489 DRM_INFO("radeon: %d pipes initialized.\n", rdev->num_gb_pipes);
492 int r300_ga_reset(struct radeon_device *rdev)
498 reinit_cp = rdev->cp.ready;
499 rdev->cp.ready = false;
500 for (i = 0; i < rdev->usec_timeout; i++) {
501 WREG32(RADEON_CP_CSQ_MODE, 0);
502 WREG32(RADEON_CP_CSQ_CNTL, 0);
503 WREG32(RADEON_RBBM_SOFT_RESET, 0x32005);
504 (void)RREG32(RADEON_RBBM_SOFT_RESET);
506 WREG32(RADEON_RBBM_SOFT_RESET, 0);
507 /* Wait to prevent race in RBBM_STATUS */
509 tmp = RREG32(RADEON_RBBM_STATUS);
510 if (tmp & ((1 << 20) | (1 << 26))) {
511 DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)", tmp);
512 /* GA still busy soft reset it */
513 WREG32(0x429C, 0x200);
514 WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0);
519 /* Wait to prevent race in RBBM_STATUS */
521 tmp = RREG32(RADEON_RBBM_STATUS);
522 if (!(tmp & ((1 << 20) | (1 << 26)))) {
526 for (i = 0; i < rdev->usec_timeout; i++) {
527 tmp = RREG32(RADEON_RBBM_STATUS);
528 if (!(tmp & ((1 << 20) | (1 << 26)))) {
529 DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
532 return r100_cp_init(rdev, rdev->cp.ring_size);
538 tmp = RREG32(RADEON_RBBM_STATUS);
539 DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
543 int r300_gpu_reset(struct radeon_device *rdev)
547 /* reset order likely matter */
548 status = RREG32(RADEON_RBBM_STATUS);
550 r100_hdp_reset(rdev);
552 if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
553 r100_rb2d_reset(rdev);
556 if (status & ((1 << 20) | (1 << 26))) {
560 status = RREG32(RADEON_RBBM_STATUS);
561 if (status & (1 << 16)) {
564 /* Check if GPU is idle */
565 status = RREG32(RADEON_RBBM_STATUS);
566 if (status & (1 << 31)) {
567 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
570 DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
576 * r300,r350,rv350,rv380 VRAM info
578 void r300_vram_info(struct radeon_device *rdev)
582 /* DDR for all card after R300 & IGP */
583 rdev->mc.vram_is_ddr = true;
584 tmp = RREG32(RADEON_MEM_CNTL);
585 if (tmp & R300_MEM_NUM_CHANNELS_MASK) {
586 rdev->mc.vram_width = 128;
588 rdev->mc.vram_width = 64;
591 r100_vram_init_sizes(rdev);
599 void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
601 uint32_t link_width_cntl, mask;
603 if (rdev->flags & RADEON_IS_IGP)
606 if (!(rdev->flags & RADEON_IS_PCIE))
609 /* FIXME wait for idle */
613 mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
616 mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
619 mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
622 mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
625 mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
628 mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
632 mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
636 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
638 if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
639 (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
642 link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
643 RADEON_PCIE_LC_RECONFIG_NOW |
644 RADEON_PCIE_LC_RECONFIG_LATER |
645 RADEON_PCIE_LC_SHORT_RECONFIG_EN);
646 link_width_cntl |= mask;
647 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
648 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
649 RADEON_PCIE_LC_RECONFIG_NOW));
651 /* wait for lane set to complete */
652 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
653 while (link_width_cntl == 0xffffffff)
654 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
662 #if defined(CONFIG_DEBUG_FS)
663 static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data)
665 struct drm_info_node *node = (struct drm_info_node *) m->private;
666 struct drm_device *dev = node->minor->dev;
667 struct radeon_device *rdev = dev->dev_private;
670 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
671 seq_printf(m, "PCIE_TX_GART_CNTL 0x%08x\n", tmp);
672 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_BASE);
673 seq_printf(m, "PCIE_TX_GART_BASE 0x%08x\n", tmp);
674 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_LO);
675 seq_printf(m, "PCIE_TX_GART_START_LO 0x%08x\n", tmp);
676 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_HI);
677 seq_printf(m, "PCIE_TX_GART_START_HI 0x%08x\n", tmp);
678 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_LO);
679 seq_printf(m, "PCIE_TX_GART_END_LO 0x%08x\n", tmp);
680 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_HI);
681 seq_printf(m, "PCIE_TX_GART_END_HI 0x%08x\n", tmp);
682 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_ERROR);
683 seq_printf(m, "PCIE_TX_GART_ERROR 0x%08x\n", tmp);
687 static struct drm_info_list rv370_pcie_gart_info_list[] = {
688 {"rv370_pcie_gart_info", rv370_debugfs_pcie_gart_info, 0, NULL},
692 int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
694 #if defined(CONFIG_DEBUG_FS)
695 return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1);
705 struct r300_cs_track_cb {
706 struct radeon_object *robj;
712 struct r300_cs_track_array {
713 struct radeon_object *robj;
717 struct r300_cs_track_texture {
718 struct radeon_object *robj;
724 unsigned tex_coord_type;
734 struct r300_cs_track {
738 unsigned vap_vf_cntl;
739 unsigned immd_dwords;
742 struct r300_cs_track_array arrays[11];
743 struct r300_cs_track_cb cb[4];
744 struct r300_cs_track_cb zb;
745 struct r300_cs_track_texture textures[16];
749 static inline void r300_cs_track_texture_print(struct r300_cs_track_texture *t)
751 DRM_ERROR("pitch %d\n", t->pitch);
752 DRM_ERROR("width %d\n", t->width);
753 DRM_ERROR("height %d\n", t->height);
754 DRM_ERROR("num levels %d\n", t->num_levels);
755 DRM_ERROR("depth %d\n", t->txdepth);
756 DRM_ERROR("bpp %d\n", t->cpp);
757 DRM_ERROR("coordinate type %d\n", t->tex_coord_type);
758 DRM_ERROR("width round to power of 2 %d\n", t->roundup_w);
759 DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
762 static inline int r300_cs_track_texture_check(struct radeon_device *rdev,
763 struct r300_cs_track *track)
765 struct radeon_object *robj;
769 for (u = 0; u < 16; u++) {
770 if (!track->textures[u].enabled)
772 robj = track->textures[u].robj;
774 DRM_ERROR("No texture bound to unit %u\n", u);
778 for (i = 0; i <= track->textures[u].num_levels; i++) {
779 if (track->textures[u].use_pitch) {
780 w = track->textures[u].pitch / (1 << i);
782 w = track->textures[u].width / (1 << i);
783 if (rdev->family >= CHIP_RV515)
784 w |= track->textures[u].width_11;
785 if (track->textures[u].roundup_w)
786 w = roundup_pow_of_two(w);
788 h = track->textures[u].height / (1 << i);
789 if (rdev->family >= CHIP_RV515)
790 h |= track->textures[u].height_11;
791 if (track->textures[u].roundup_h)
792 h = roundup_pow_of_two(h);
795 size *= track->textures[u].cpp;
796 switch (track->textures[u].tex_coord_type) {
800 size *= (1 << track->textures[u].txdepth);
806 DRM_ERROR("Invalid texture coordinate type %u for unit "
807 "%u\n", track->textures[u].tex_coord_type, u);
810 if (size > radeon_object_size(robj)) {
811 DRM_ERROR("Texture of unit %u needs %lu bytes but is "
812 "%lu\n", u, size, radeon_object_size(robj));
813 r300_cs_track_texture_print(&track->textures[u]);
820 int r300_cs_track_check(struct radeon_device *rdev, struct r300_cs_track *track)
827 for (i = 0; i < track->num_cb; i++) {
828 if (track->cb[i].robj == NULL) {
829 DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
832 size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
833 size += track->cb[i].offset;
834 if (size > radeon_object_size(track->cb[i].robj)) {
835 DRM_ERROR("[drm] Buffer too small for color buffer %d "
836 "(need %lu have %lu) !\n", i, size,
837 radeon_object_size(track->cb[i].robj));
838 DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
839 i, track->cb[i].pitch, track->cb[i].cpp,
840 track->cb[i].offset, track->maxy);
844 if (track->z_enabled) {
845 if (track->zb.robj == NULL) {
846 DRM_ERROR("[drm] No buffer for z buffer !\n");
849 size = track->zb.pitch * track->zb.cpp * track->maxy;
850 size += track->zb.offset;
851 if (size > radeon_object_size(track->zb.robj)) {
852 DRM_ERROR("[drm] Buffer too small for z buffer "
853 "(need %lu have %lu) !\n", size,
854 radeon_object_size(track->zb.robj));
858 prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
859 nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
862 for (i = 0; i < track->num_arrays; i++) {
863 size = track->arrays[i].esize * track->max_indx * 4;
864 if (track->arrays[i].robj == NULL) {
865 DRM_ERROR("(PW %u) Vertex array %u no buffer "
866 "bound\n", prim_walk, i);
869 if (size > radeon_object_size(track->arrays[i].robj)) {
870 DRM_ERROR("(PW %u) Vertex array %u need %lu dwords "
871 "have %lu dwords\n", prim_walk, i,
873 radeon_object_size(track->arrays[i].robj) >> 2);
874 DRM_ERROR("Max indices %u\n", track->max_indx);
880 for (i = 0; i < track->num_arrays; i++) {
881 size = track->arrays[i].esize * (nverts - 1) * 4;
882 if (track->arrays[i].robj == NULL) {
883 DRM_ERROR("(PW %u) Vertex array %u no buffer "
884 "bound\n", prim_walk, i);
887 if (size > radeon_object_size(track->arrays[i].robj)) {
888 DRM_ERROR("(PW %u) Vertex array %u need %lu dwords "
889 "have %lu dwords\n", prim_walk, i, size >> 2,
890 radeon_object_size(track->arrays[i].robj) >> 2);
896 size = track->vtx_size * nverts;
897 if (size != track->immd_dwords) {
898 DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
899 track->immd_dwords, size);
900 DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
901 nverts, track->vtx_size);
906 DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
910 return r300_cs_track_texture_check(rdev, track);
913 static inline void r300_cs_track_clear(struct r300_cs_track *track)
919 for (i = 0; i < track->num_cb; i++) {
920 track->cb[i].robj = NULL;
921 track->cb[i].pitch = 8192;
922 track->cb[i].cpp = 16;
923 track->cb[i].offset = 0;
925 track->z_enabled = true;
926 track->zb.robj = NULL;
927 track->zb.pitch = 8192;
929 track->zb.offset = 0;
930 track->vtx_size = 0x7F;
931 track->immd_dwords = 0xFFFFFFFFUL;
932 track->num_arrays = 11;
933 track->max_indx = 0x00FFFFFFUL;
934 for (i = 0; i < track->num_arrays; i++) {
935 track->arrays[i].robj = NULL;
936 track->arrays[i].esize = 0x7F;
938 for (i = 0; i < 16; i++) {
939 track->textures[i].pitch = 16536;
940 track->textures[i].width = 16536;
941 track->textures[i].height = 16536;
942 track->textures[i].width_11 = 1 << 11;
943 track->textures[i].height_11 = 1 << 11;
944 track->textures[i].num_levels = 12;
945 track->textures[i].txdepth = 16;
946 track->textures[i].cpp = 64;
947 track->textures[i].tex_coord_type = 1;
948 track->textures[i].robj = NULL;
949 /* CS IB emission code makes sure texture unit are disabled */
950 track->textures[i].enabled = false;
951 track->textures[i].roundup_w = true;
952 track->textures[i].roundup_h = true;
956 static const unsigned r300_reg_safe_bm[159] = {
957 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
958 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
959 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
960 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
961 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
962 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
963 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
964 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
965 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
966 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
967 0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF,
968 0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF,
969 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
970 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
971 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F,
972 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
973 0xFFFFFFFF, 0xFFFFEFCE, 0xF00EBFFF, 0x007C0000,
974 0xF0000078, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF,
975 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
976 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
977 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
978 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
979 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
980 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
981 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
982 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
983 0xFFFFF7FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
984 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
985 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
986 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
987 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
988 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
989 0xFFFFFC78, 0xFFFFFFFF, 0xFFFFFFFE, 0xFFFFFFFF,
990 0x38FF8F50, 0xFFF88082, 0xF000000C, 0xFAE009FF,
991 0x0000FFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000,
992 0x00000000, 0x0000C100, 0x00000000, 0x00000000,
993 0x00000000, 0x00000000, 0x00000000, 0x00000000,
994 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFF80FFFF,
995 0x00000000, 0x00000000, 0x00000000, 0x00000000,
996 0x0003FC01, 0xFFFFFFF8, 0xFE800B19,
999 static int r300_packet0_check(struct radeon_cs_parser *p,
1000 struct radeon_cs_packet *pkt,
1001 unsigned idx, unsigned reg)
1003 struct radeon_cs_chunk *ib_chunk;
1004 struct radeon_cs_reloc *reloc;
1005 struct r300_cs_track *track;
1006 volatile uint32_t *ib;
1007 uint32_t tmp, tile_flags = 0;
1012 ib_chunk = &p->chunks[p->chunk_ib_idx];
1013 track = (struct r300_cs_track*)p->track;
1015 case AVIVO_D1MODE_VLINE_START_END:
1016 case RADEON_CRTC_GUI_TRIG_VLINE:
1017 r = r100_cs_packet_parse_vline(p);
1019 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1021 r100_cs_dump_packet(p, pkt);
1025 case RADEON_DST_PITCH_OFFSET:
1026 case RADEON_SRC_PITCH_OFFSET:
1027 r = r100_cs_packet_next_reloc(p, &reloc);
1029 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1031 r100_cs_dump_packet(p, pkt);
1034 tmp = ib_chunk->kdata[idx] & 0x003fffff;
1035 tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
1037 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1038 tile_flags |= RADEON_DST_TILE_MACRO;
1039 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
1040 if (reg == RADEON_SRC_PITCH_OFFSET) {
1041 DRM_ERROR("Cannot src blit from microtiled surface\n");
1042 r100_cs_dump_packet(p, pkt);
1045 tile_flags |= RADEON_DST_TILE_MICRO;
1048 ib[idx] = (ib_chunk->kdata[idx] & 0x3fc00000) | tmp;
1050 case R300_RB3D_COLOROFFSET0:
1051 case R300_RB3D_COLOROFFSET1:
1052 case R300_RB3D_COLOROFFSET2:
1053 case R300_RB3D_COLOROFFSET3:
1054 i = (reg - R300_RB3D_COLOROFFSET0) >> 2;
1055 r = r100_cs_packet_next_reloc(p, &reloc);
1057 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1059 r100_cs_dump_packet(p, pkt);
1062 track->cb[i].robj = reloc->robj;
1063 track->cb[i].offset = ib_chunk->kdata[idx];
1064 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1066 case R300_ZB_DEPTHOFFSET:
1067 r = r100_cs_packet_next_reloc(p, &reloc);
1069 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1071 r100_cs_dump_packet(p, pkt);
1074 track->zb.robj = reloc->robj;
1075 track->zb.offset = ib_chunk->kdata[idx];
1076 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1078 case R300_TX_OFFSET_0:
1079 case R300_TX_OFFSET_0+4:
1080 case R300_TX_OFFSET_0+8:
1081 case R300_TX_OFFSET_0+12:
1082 case R300_TX_OFFSET_0+16:
1083 case R300_TX_OFFSET_0+20:
1084 case R300_TX_OFFSET_0+24:
1085 case R300_TX_OFFSET_0+28:
1086 case R300_TX_OFFSET_0+32:
1087 case R300_TX_OFFSET_0+36:
1088 case R300_TX_OFFSET_0+40:
1089 case R300_TX_OFFSET_0+44:
1090 case R300_TX_OFFSET_0+48:
1091 case R300_TX_OFFSET_0+52:
1092 case R300_TX_OFFSET_0+56:
1093 case R300_TX_OFFSET_0+60:
1094 i = (reg - R300_TX_OFFSET_0) >> 2;
1095 r = r100_cs_packet_next_reloc(p, &reloc);
1097 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1099 r100_cs_dump_packet(p, pkt);
1102 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1103 track->textures[i].robj = reloc->robj;
1105 /* Tracked registers */
1108 track->vap_vf_cntl = ib_chunk->kdata[idx];
1112 track->vtx_size = ib_chunk->kdata[idx] & 0x7F;
1115 /* VAP_VF_MAX_VTX_INDX */
1116 track->max_indx = ib_chunk->kdata[idx] & 0x00FFFFFFUL;
1120 track->maxy = ((ib_chunk->kdata[idx] >> 13) & 0x1FFF) + 1;
1121 if (p->rdev->family < CHIP_RV515) {
1122 track->maxy -= 1440;
1127 track->num_cb = ((ib_chunk->kdata[idx] >> 5) & 0x3) + 1;
1133 /* RB3D_COLORPITCH0 */
1134 /* RB3D_COLORPITCH1 */
1135 /* RB3D_COLORPITCH2 */
1136 /* RB3D_COLORPITCH3 */
1137 r = r100_cs_packet_next_reloc(p, &reloc);
1139 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1141 r100_cs_dump_packet(p, pkt);
1145 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1146 tile_flags |= R300_COLOR_TILE_ENABLE;
1147 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1148 tile_flags |= R300_COLOR_MICROTILE_ENABLE;
1150 tmp = ib_chunk->kdata[idx] & ~(0x7 << 16);
1154 i = (reg - 0x4E38) >> 2;
1155 track->cb[i].pitch = ib_chunk->kdata[idx] & 0x3FFE;
1156 switch (((ib_chunk->kdata[idx] >> 21) & 0xF)) {
1160 track->cb[i].cpp = 1;
1166 track->cb[i].cpp = 2;
1169 track->cb[i].cpp = 4;
1172 track->cb[i].cpp = 8;
1175 track->cb[i].cpp = 16;
1178 DRM_ERROR("Invalid color buffer format (%d) !\n",
1179 ((ib_chunk->kdata[idx] >> 21) & 0xF));
1185 if (ib_chunk->kdata[idx] & 2) {
1186 track->z_enabled = true;
1188 track->z_enabled = false;
1193 switch ((ib_chunk->kdata[idx] & 0xF)) {
1202 DRM_ERROR("Invalid z buffer format (%d) !\n",
1203 (ib_chunk->kdata[idx] & 0xF));
1209 r = r100_cs_packet_next_reloc(p, &reloc);
1211 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1213 r100_cs_dump_packet(p, pkt);
1217 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1218 tile_flags |= R300_DEPTHMACROTILE_ENABLE;
1219 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1220 tile_flags |= R300_DEPTHMICROTILE_TILED;;
1222 tmp = ib_chunk->kdata[idx] & ~(0x7 << 16);
1226 track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC;
1229 for (i = 0; i < 16; i++) {
1232 enabled = !!(ib_chunk->kdata[idx] & (1 << i));
1233 track->textures[i].enabled = enabled;
1252 /* TX_FORMAT1_[0-15] */
1253 i = (reg - 0x44C0) >> 2;
1254 tmp = (ib_chunk->kdata[idx] >> 25) & 0x3;
1255 track->textures[i].tex_coord_type = tmp;
1256 switch ((ib_chunk->kdata[idx] & 0x1F)) {
1263 track->textures[i].cpp = 1;
1274 track->textures[i].cpp = 2;
1285 track->textures[i].cpp = 4;
1290 track->textures[i].cpp = 8;
1293 track->textures[i].cpp = 16;
1296 DRM_ERROR("Invalid texture format %u\n",
1297 (ib_chunk->kdata[idx] & 0x1F));
1318 /* TX_FILTER0_[0-15] */
1319 i = (reg - 0x4400) >> 2;
1320 tmp = ib_chunk->kdata[idx] & 0x7;;
1321 if (tmp == 2 || tmp == 4 || tmp == 6) {
1322 track->textures[i].roundup_w = false;
1324 tmp = (ib_chunk->kdata[idx] >> 3) & 0x7;;
1325 if (tmp == 2 || tmp == 4 || tmp == 6) {
1326 track->textures[i].roundup_h = false;
1345 /* TX_FORMAT2_[0-15] */
1346 i = (reg - 0x4500) >> 2;
1347 tmp = ib_chunk->kdata[idx] & 0x3FFF;
1348 track->textures[i].pitch = tmp + 1;
1349 if (p->rdev->family >= CHIP_RV515) {
1350 tmp = ((ib_chunk->kdata[idx] >> 15) & 1) << 11;
1351 track->textures[i].width_11 = tmp;
1352 tmp = ((ib_chunk->kdata[idx] >> 16) & 1) << 11;
1353 track->textures[i].height_11 = tmp;
1372 /* TX_FORMAT0_[0-15] */
1373 i = (reg - 0x4480) >> 2;
1374 tmp = ib_chunk->kdata[idx] & 0x7FF;
1375 track->textures[i].width = tmp + 1;
1376 tmp = (ib_chunk->kdata[idx] >> 11) & 0x7FF;
1377 track->textures[i].height = tmp + 1;
1378 tmp = (ib_chunk->kdata[idx] >> 26) & 0xF;
1379 track->textures[i].num_levels = tmp;
1380 tmp = ib_chunk->kdata[idx] & (1 << 31);
1381 track->textures[i].use_pitch = !!tmp;
1382 tmp = (ib_chunk->kdata[idx] >> 22) & 0xF;
1383 track->textures[i].txdepth = tmp;
1385 case R300_ZB_ZPASS_ADDR:
1386 r = r100_cs_packet_next_reloc(p, &reloc);
1388 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1390 r100_cs_dump_packet(p, pkt);
1393 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1396 /* valid register only on RV530 */
1397 if (p->rdev->family == CHIP_RV530)
1399 /* fallthrough do not move */
1401 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
1408 static int r300_packet3_check(struct radeon_cs_parser *p,
1409 struct radeon_cs_packet *pkt)
1411 struct radeon_cs_chunk *ib_chunk;
1412 struct radeon_cs_reloc *reloc;
1413 struct r300_cs_track *track;
1414 volatile uint32_t *ib;
1420 ib_chunk = &p->chunks[p->chunk_ib_idx];
1422 track = (struct r300_cs_track*)p->track;
1423 switch(pkt->opcode) {
1424 case PACKET3_3D_LOAD_VBPNTR:
1425 c = ib_chunk->kdata[idx++] & 0x1F;
1426 track->num_arrays = c;
1427 for (i = 0; i < (c - 1); i+=2, idx+=3) {
1428 r = r100_cs_packet_next_reloc(p, &reloc);
1430 DRM_ERROR("No reloc for packet3 %d\n",
1432 r100_cs_dump_packet(p, pkt);
1435 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1436 track->arrays[i + 0].robj = reloc->robj;
1437 track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
1438 track->arrays[i + 0].esize &= 0x7F;
1439 r = r100_cs_packet_next_reloc(p, &reloc);
1441 DRM_ERROR("No reloc for packet3 %d\n",
1443 r100_cs_dump_packet(p, pkt);
1446 ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
1447 track->arrays[i + 1].robj = reloc->robj;
1448 track->arrays[i + 1].esize = ib_chunk->kdata[idx] >> 24;
1449 track->arrays[i + 1].esize &= 0x7F;
1452 r = r100_cs_packet_next_reloc(p, &reloc);
1454 DRM_ERROR("No reloc for packet3 %d\n",
1456 r100_cs_dump_packet(p, pkt);
1459 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1460 track->arrays[i + 0].robj = reloc->robj;
1461 track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
1462 track->arrays[i + 0].esize &= 0x7F;
1465 case PACKET3_INDX_BUFFER:
1466 r = r100_cs_packet_next_reloc(p, &reloc);
1468 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1469 r100_cs_dump_packet(p, pkt);
1472 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1473 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1479 case PACKET3_3D_DRAW_IMMD:
1480 /* Number of dwords is vtx_size * (num_vertices - 1)
1481 * PRIM_WALK must be equal to 3 vertex data in embedded
1483 if (((ib_chunk->kdata[idx+1] >> 4) & 0x3) != 3) {
1484 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1487 track->vap_vf_cntl = ib_chunk->kdata[idx+1];
1488 track->immd_dwords = pkt->count - 1;
1489 r = r300_cs_track_check(p->rdev, track);
1494 case PACKET3_3D_DRAW_IMMD_2:
1495 /* Number of dwords is vtx_size * (num_vertices - 1)
1496 * PRIM_WALK must be equal to 3 vertex data in embedded
1498 if (((ib_chunk->kdata[idx] >> 4) & 0x3) != 3) {
1499 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1502 track->vap_vf_cntl = ib_chunk->kdata[idx];
1503 track->immd_dwords = pkt->count;
1504 r = r300_cs_track_check(p->rdev, track);
1509 case PACKET3_3D_DRAW_VBUF:
1510 track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
1511 r = r300_cs_track_check(p->rdev, track);
1516 case PACKET3_3D_DRAW_VBUF_2:
1517 track->vap_vf_cntl = ib_chunk->kdata[idx];
1518 r = r300_cs_track_check(p->rdev, track);
1523 case PACKET3_3D_DRAW_INDX:
1524 track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
1525 r = r300_cs_track_check(p->rdev, track);
1530 case PACKET3_3D_DRAW_INDX_2:
1531 track->vap_vf_cntl = ib_chunk->kdata[idx];
1532 r = r300_cs_track_check(p->rdev, track);
1540 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1546 int r300_cs_parse(struct radeon_cs_parser *p)
1548 struct radeon_cs_packet pkt;
1549 struct r300_cs_track track;
1552 r300_cs_track_clear(&track);
1555 r = r100_cs_packet_parse(p, &pkt, p->idx);
1559 p->idx += pkt.count + 2;
1562 r = r100_cs_parse_packet0(p, &pkt,
1563 p->rdev->config.r300.reg_safe_bm,
1564 p->rdev->config.r300.reg_safe_bm_size,
1565 &r300_packet0_check);
1570 r = r300_packet3_check(p, &pkt);
1573 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
1579 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1583 int r300_init(struct radeon_device *rdev)
1585 rdev->config.r300.reg_safe_bm = r300_reg_safe_bm;
1586 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm);