]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/gpu/drm/radeon/r100.c
drm/radeon/kms: wait for cp idle before stopping it.
[net-next-2.6.git] / drivers / gpu / drm / radeon / r100.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/seq_file.h>
29#include "drmP.h"
30#include "drm.h"
31#include "radeon_drm.h"
771fe6b9
JG
32#include "radeon_reg.h"
33#include "radeon.h"
3ce0a23d
JG
34#include "r100d.h"
35
70967ab9
BH
36#include <linux/firmware.h>
37#include <linux/platform_device.h>
38
551ebd83
DA
39#include "r100_reg_safe.h"
40#include "rn50_reg_safe.h"
41
70967ab9
BH
42/* Firmware Names */
43#define FIRMWARE_R100 "radeon/R100_cp.bin"
44#define FIRMWARE_R200 "radeon/R200_cp.bin"
45#define FIRMWARE_R300 "radeon/R300_cp.bin"
46#define FIRMWARE_R420 "radeon/R420_cp.bin"
47#define FIRMWARE_RS690 "radeon/RS690_cp.bin"
48#define FIRMWARE_RS600 "radeon/RS600_cp.bin"
49#define FIRMWARE_R520 "radeon/R520_cp.bin"
50
51MODULE_FIRMWARE(FIRMWARE_R100);
52MODULE_FIRMWARE(FIRMWARE_R200);
53MODULE_FIRMWARE(FIRMWARE_R300);
54MODULE_FIRMWARE(FIRMWARE_R420);
55MODULE_FIRMWARE(FIRMWARE_RS690);
56MODULE_FIRMWARE(FIRMWARE_RS600);
57MODULE_FIRMWARE(FIRMWARE_R520);
771fe6b9 58
551ebd83
DA
59#include "r100_track.h"
60
771fe6b9
JG
61/* This files gather functions specifics to:
62 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
63 *
64 * Some of these functions might be used by newer ASICs.
65 */
551ebd83 66int r200_init(struct radeon_device *rdev);
771fe6b9
JG
67void r100_hdp_reset(struct radeon_device *rdev);
68void r100_gpu_init(struct radeon_device *rdev);
69int r100_gui_wait_for_idle(struct radeon_device *rdev);
70int r100_mc_wait_for_idle(struct radeon_device *rdev);
71void r100_gpu_wait_for_vsync(struct radeon_device *rdev);
72void r100_gpu_wait_for_vsync2(struct radeon_device *rdev);
73int r100_debugfs_mc_info_init(struct radeon_device *rdev);
74
75
76/*
77 * PCI GART
78 */
79void r100_pci_gart_tlb_flush(struct radeon_device *rdev)
80{
81 /* TODO: can we do somethings here ? */
82 /* It seems hw only cache one entry so we should discard this
83 * entry otherwise if first GPU GART read hit this entry it
84 * could end up in wrong address. */
85}
86
87int r100_pci_gart_enable(struct radeon_device *rdev)
88{
89 uint32_t tmp;
90 int r;
91
92 /* Initialize common gart structure */
93 r = radeon_gart_init(rdev);
94 if (r) {
95 return r;
96 }
97 if (rdev->gart.table.ram.ptr == NULL) {
98 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
99 r = radeon_gart_table_ram_alloc(rdev);
100 if (r) {
101 return r;
102 }
103 }
104 /* discard memory request outside of configured range */
105 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
106 WREG32(RADEON_AIC_CNTL, tmp);
107 /* set address range for PCI address translate */
108 WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location);
109 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
110 WREG32(RADEON_AIC_HI_ADDR, tmp);
111 /* Enable bus mastering */
112 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
113 WREG32(RADEON_BUS_CNTL, tmp);
114 /* set PCI GART page-table base address */
115 WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
116 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
117 WREG32(RADEON_AIC_CNTL, tmp);
118 r100_pci_gart_tlb_flush(rdev);
119 rdev->gart.ready = true;
120 return 0;
121}
122
123void r100_pci_gart_disable(struct radeon_device *rdev)
124{
125 uint32_t tmp;
126
127 /* discard memory request outside of configured range */
128 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
129 WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN);
130 WREG32(RADEON_AIC_LO_ADDR, 0);
131 WREG32(RADEON_AIC_HI_ADDR, 0);
132}
133
134int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
135{
136 if (i < 0 || i > rdev->gart.num_gpu_pages) {
137 return -EINVAL;
138 }
ed10f95d 139 rdev->gart.table.ram.ptr[i] = cpu_to_le32(lower_32_bits(addr));
771fe6b9
JG
140 return 0;
141}
142
143int r100_gart_enable(struct radeon_device *rdev)
144{
145 if (rdev->flags & RADEON_IS_AGP) {
146 r100_pci_gart_disable(rdev);
147 return 0;
148 }
149 return r100_pci_gart_enable(rdev);
150}
151
152
153/*
154 * MC
155 */
156void r100_mc_disable_clients(struct radeon_device *rdev)
157{
158 uint32_t ov0_scale_cntl, crtc_ext_cntl, crtc_gen_cntl, crtc2_gen_cntl;
159
160 /* FIXME: is this function correct for rs100,rs200,rs300 ? */
161 if (r100_gui_wait_for_idle(rdev)) {
162 printk(KERN_WARNING "Failed to wait GUI idle while "
163 "programming pipes. Bad things might happen.\n");
164 }
165
166 /* stop display and memory access */
167 ov0_scale_cntl = RREG32(RADEON_OV0_SCALE_CNTL);
168 WREG32(RADEON_OV0_SCALE_CNTL, ov0_scale_cntl & ~RADEON_SCALER_ENABLE);
169 crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
170 WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl | RADEON_CRTC_DISPLAY_DIS);
171 crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
172
173 r100_gpu_wait_for_vsync(rdev);
174
175 WREG32(RADEON_CRTC_GEN_CNTL,
176 (crtc_gen_cntl & ~(RADEON_CRTC_CUR_EN | RADEON_CRTC_ICON_EN)) |
177 RADEON_CRTC_DISP_REQ_EN_B | RADEON_CRTC_EXT_DISP_EN);
178
179 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
180 crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
181
182 r100_gpu_wait_for_vsync2(rdev);
183 WREG32(RADEON_CRTC2_GEN_CNTL,
184 (crtc2_gen_cntl &
185 ~(RADEON_CRTC2_CUR_EN | RADEON_CRTC2_ICON_EN)) |
186 RADEON_CRTC2_DISP_REQ_EN_B);
187 }
188
189 udelay(500);
190}
191
192void r100_mc_setup(struct radeon_device *rdev)
193{
194 uint32_t tmp;
195 int r;
196
197 r = r100_debugfs_mc_info_init(rdev);
198 if (r) {
199 DRM_ERROR("Failed to register debugfs file for R100 MC !\n");
200 }
201 /* Write VRAM size in case we are limiting it */
7a50f01a
DA
202 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
203 /* Novell bug 204882 for RN50/M6/M7 with 8/16/32MB VRAM,
204 * if the aperture is 64MB but we have 32MB VRAM
205 * we report only 32MB VRAM but we have to set MC_FB_LOCATION
206 * to 64MB, otherwise the gpu accidentially dies */
207 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
771fe6b9
JG
208 tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16);
209 tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16);
210 WREG32(RADEON_MC_FB_LOCATION, tmp);
211
212 /* Enable bus mastering */
213 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
214 WREG32(RADEON_BUS_CNTL, tmp);
215
216 if (rdev->flags & RADEON_IS_AGP) {
217 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
218 tmp = REG_SET(RADEON_MC_AGP_TOP, tmp >> 16);
219 tmp |= REG_SET(RADEON_MC_AGP_START, rdev->mc.gtt_location >> 16);
220 WREG32(RADEON_MC_AGP_LOCATION, tmp);
221 WREG32(RADEON_AGP_BASE, rdev->mc.agp_base);
222 } else {
223 WREG32(RADEON_MC_AGP_LOCATION, 0x0FFFFFFF);
224 WREG32(RADEON_AGP_BASE, 0);
225 }
226
227 tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
228 tmp |= (7 << 28);
229 WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
230 (void)RREG32(RADEON_HOST_PATH_CNTL);
231 WREG32(RADEON_HOST_PATH_CNTL, tmp);
232 (void)RREG32(RADEON_HOST_PATH_CNTL);
233}
234
235int r100_mc_init(struct radeon_device *rdev)
236{
237 int r;
238
239 if (r100_debugfs_rbbm_init(rdev)) {
240 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
241 }
242
243 r100_gpu_init(rdev);
244 /* Disable gart which also disable out of gart access */
245 r100_pci_gart_disable(rdev);
246
247 /* Setup GPU memory space */
771fe6b9
JG
248 rdev->mc.gtt_location = 0xFFFFFFFFUL;
249 if (rdev->flags & RADEON_IS_AGP) {
250 r = radeon_agp_init(rdev);
251 if (r) {
252 printk(KERN_WARNING "[drm] Disabling AGP\n");
253 rdev->flags &= ~RADEON_IS_AGP;
254 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
255 } else {
256 rdev->mc.gtt_location = rdev->mc.agp_base;
257 }
258 }
259 r = radeon_mc_setup(rdev);
260 if (r) {
261 return r;
262 }
263
264 r100_mc_disable_clients(rdev);
265 if (r100_mc_wait_for_idle(rdev)) {
266 printk(KERN_WARNING "Failed to wait MC idle while "
267 "programming pipes. Bad things might happen.\n");
268 }
269
270 r100_mc_setup(rdev);
271 return 0;
272}
273
274void r100_mc_fini(struct radeon_device *rdev)
275{
276 r100_pci_gart_disable(rdev);
277 radeon_gart_table_ram_free(rdev);
278 radeon_gart_fini(rdev);
279}
280
281
7ed220d7
MD
282/*
283 * Interrupts
284 */
285int r100_irq_set(struct radeon_device *rdev)
286{
287 uint32_t tmp = 0;
288
289 if (rdev->irq.sw_int) {
290 tmp |= RADEON_SW_INT_ENABLE;
291 }
292 if (rdev->irq.crtc_vblank_int[0]) {
293 tmp |= RADEON_CRTC_VBLANK_MASK;
294 }
295 if (rdev->irq.crtc_vblank_int[1]) {
296 tmp |= RADEON_CRTC2_VBLANK_MASK;
297 }
298 WREG32(RADEON_GEN_INT_CNTL, tmp);
299 return 0;
300}
301
302static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
303{
304 uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
305 uint32_t irq_mask = RADEON_SW_INT_TEST | RADEON_CRTC_VBLANK_STAT |
306 RADEON_CRTC2_VBLANK_STAT;
307
308 if (irqs) {
309 WREG32(RADEON_GEN_INT_STATUS, irqs);
310 }
311 return irqs & irq_mask;
312}
313
314int r100_irq_process(struct radeon_device *rdev)
315{
316 uint32_t status;
317
318 status = r100_irq_ack(rdev);
319 if (!status) {
320 return IRQ_NONE;
321 }
322 while (status) {
323 /* SW interrupt */
324 if (status & RADEON_SW_INT_TEST) {
325 radeon_fence_process(rdev);
326 }
327 /* Vertical blank interrupts */
328 if (status & RADEON_CRTC_VBLANK_STAT) {
329 drm_handle_vblank(rdev->ddev, 0);
330 }
331 if (status & RADEON_CRTC2_VBLANK_STAT) {
332 drm_handle_vblank(rdev->ddev, 1);
333 }
334 status = r100_irq_ack(rdev);
335 }
336 return IRQ_HANDLED;
337}
338
339u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
340{
341 if (crtc == 0)
342 return RREG32(RADEON_CRTC_CRNT_FRAME);
343 else
344 return RREG32(RADEON_CRTC2_CRNT_FRAME);
345}
346
347
771fe6b9
JG
348/*
349 * Fence emission
350 */
351void r100_fence_ring_emit(struct radeon_device *rdev,
352 struct radeon_fence *fence)
353{
354 /* Who ever call radeon_fence_emit should call ring_lock and ask
355 * for enough space (today caller are ib schedule and buffer move) */
356 /* Wait until IDLE & CLEAN */
357 radeon_ring_write(rdev, PACKET0(0x1720, 0));
358 radeon_ring_write(rdev, (1 << 16) | (1 << 17));
359 /* Emit fence sequence & fire IRQ */
360 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
361 radeon_ring_write(rdev, fence->seq);
362 radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
363 radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
364}
365
366
367/*
368 * Writeback
369 */
370int r100_wb_init(struct radeon_device *rdev)
371{
372 int r;
373
374 if (rdev->wb.wb_obj == NULL) {
375 r = radeon_object_create(rdev, NULL, 4096,
376 true,
377 RADEON_GEM_DOMAIN_GTT,
378 false, &rdev->wb.wb_obj);
379 if (r) {
380 DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r);
381 return r;
382 }
383 r = radeon_object_pin(rdev->wb.wb_obj,
384 RADEON_GEM_DOMAIN_GTT,
385 &rdev->wb.gpu_addr);
386 if (r) {
387 DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r);
388 return r;
389 }
390 r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
391 if (r) {
392 DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r);
393 return r;
394 }
395 }
3ce0a23d
JG
396 WREG32(RADEON_SCRATCH_ADDR, rdev->wb.gpu_addr);
397 WREG32(RADEON_CP_RB_RPTR_ADDR, rdev->wb.gpu_addr + 1024);
398 WREG32(RADEON_SCRATCH_UMSK, 0xff);
771fe6b9
JG
399 return 0;
400}
401
402void r100_wb_fini(struct radeon_device *rdev)
403{
404 if (rdev->wb.wb_obj) {
405 radeon_object_kunmap(rdev->wb.wb_obj);
406 radeon_object_unpin(rdev->wb.wb_obj);
407 radeon_object_unref(&rdev->wb.wb_obj);
408 rdev->wb.wb = NULL;
409 rdev->wb.wb_obj = NULL;
410 }
411}
412
413int r100_copy_blit(struct radeon_device *rdev,
414 uint64_t src_offset,
415 uint64_t dst_offset,
416 unsigned num_pages,
417 struct radeon_fence *fence)
418{
419 uint32_t cur_pages;
420 uint32_t stride_bytes = PAGE_SIZE;
421 uint32_t pitch;
422 uint32_t stride_pixels;
423 unsigned ndw;
424 int num_loops;
425 int r = 0;
426
427 /* radeon limited to 16k stride */
428 stride_bytes &= 0x3fff;
429 /* radeon pitch is /64 */
430 pitch = stride_bytes / 64;
431 stride_pixels = stride_bytes / 4;
432 num_loops = DIV_ROUND_UP(num_pages, 8191);
433
434 /* Ask for enough room for blit + flush + fence */
435 ndw = 64 + (10 * num_loops);
436 r = radeon_ring_lock(rdev, ndw);
437 if (r) {
438 DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
439 return -EINVAL;
440 }
441 while (num_pages > 0) {
442 cur_pages = num_pages;
443 if (cur_pages > 8191) {
444 cur_pages = 8191;
445 }
446 num_pages -= cur_pages;
447
448 /* pages are in Y direction - height
449 page width in X direction - width */
450 radeon_ring_write(rdev, PACKET3(PACKET3_BITBLT_MULTI, 8));
451 radeon_ring_write(rdev,
452 RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
453 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
454 RADEON_GMC_SRC_CLIPPING |
455 RADEON_GMC_DST_CLIPPING |
456 RADEON_GMC_BRUSH_NONE |
457 (RADEON_COLOR_FORMAT_ARGB8888 << 8) |
458 RADEON_GMC_SRC_DATATYPE_COLOR |
459 RADEON_ROP3_S |
460 RADEON_DP_SRC_SOURCE_MEMORY |
461 RADEON_GMC_CLR_CMP_CNTL_DIS |
462 RADEON_GMC_WR_MSK_DIS);
463 radeon_ring_write(rdev, (pitch << 22) | (src_offset >> 10));
464 radeon_ring_write(rdev, (pitch << 22) | (dst_offset >> 10));
465 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
466 radeon_ring_write(rdev, 0);
467 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
468 radeon_ring_write(rdev, num_pages);
469 radeon_ring_write(rdev, num_pages);
470 radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));
471 }
472 radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
473 radeon_ring_write(rdev, RADEON_RB2D_DC_FLUSH_ALL);
474 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
475 radeon_ring_write(rdev,
476 RADEON_WAIT_2D_IDLECLEAN |
477 RADEON_WAIT_HOST_IDLECLEAN |
478 RADEON_WAIT_DMA_GUI_IDLE);
479 if (fence) {
480 r = radeon_fence_emit(rdev, fence);
481 }
482 radeon_ring_unlock_commit(rdev);
483 return r;
484}
485
486
487/*
488 * CP
489 */
45600232
JG
490static int r100_cp_wait_for_idle(struct radeon_device *rdev)
491{
492 unsigned i;
493 u32 tmp;
494
495 for (i = 0; i < rdev->usec_timeout; i++) {
496 tmp = RREG32(R_000E40_RBBM_STATUS);
497 if (!G_000E40_CP_CMDSTRM_BUSY(tmp)) {
498 return 0;
499 }
500 udelay(1);
501 }
502 return -1;
503}
504
771fe6b9
JG
505void r100_ring_start(struct radeon_device *rdev)
506{
507 int r;
508
509 r = radeon_ring_lock(rdev, 2);
510 if (r) {
511 return;
512 }
513 radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
514 radeon_ring_write(rdev,
515 RADEON_ISYNC_ANY2D_IDLE3D |
516 RADEON_ISYNC_ANY3D_IDLE2D |
517 RADEON_ISYNC_WAIT_IDLEGUI |
518 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
519 radeon_ring_unlock_commit(rdev);
520}
521
70967ab9
BH
522
523/* Load the microcode for the CP */
524static int r100_cp_init_microcode(struct radeon_device *rdev)
771fe6b9 525{
70967ab9
BH
526 struct platform_device *pdev;
527 const char *fw_name = NULL;
528 int err;
771fe6b9 529
70967ab9 530 DRM_DEBUG("\n");
771fe6b9 531
70967ab9
BH
532 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
533 err = IS_ERR(pdev);
534 if (err) {
535 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
536 return -EINVAL;
537 }
771fe6b9
JG
538 if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
539 (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) ||
540 (rdev->family == CHIP_RS200)) {
541 DRM_INFO("Loading R100 Microcode\n");
70967ab9 542 fw_name = FIRMWARE_R100;
771fe6b9
JG
543 } else if ((rdev->family == CHIP_R200) ||
544 (rdev->family == CHIP_RV250) ||
545 (rdev->family == CHIP_RV280) ||
546 (rdev->family == CHIP_RS300)) {
547 DRM_INFO("Loading R200 Microcode\n");
70967ab9 548 fw_name = FIRMWARE_R200;
771fe6b9
JG
549 } else if ((rdev->family == CHIP_R300) ||
550 (rdev->family == CHIP_R350) ||
551 (rdev->family == CHIP_RV350) ||
552 (rdev->family == CHIP_RV380) ||
553 (rdev->family == CHIP_RS400) ||
554 (rdev->family == CHIP_RS480)) {
555 DRM_INFO("Loading R300 Microcode\n");
70967ab9 556 fw_name = FIRMWARE_R300;
771fe6b9
JG
557 } else if ((rdev->family == CHIP_R420) ||
558 (rdev->family == CHIP_R423) ||
559 (rdev->family == CHIP_RV410)) {
560 DRM_INFO("Loading R400 Microcode\n");
70967ab9 561 fw_name = FIRMWARE_R420;
771fe6b9
JG
562 } else if ((rdev->family == CHIP_RS690) ||
563 (rdev->family == CHIP_RS740)) {
564 DRM_INFO("Loading RS690/RS740 Microcode\n");
70967ab9 565 fw_name = FIRMWARE_RS690;
771fe6b9
JG
566 } else if (rdev->family == CHIP_RS600) {
567 DRM_INFO("Loading RS600 Microcode\n");
70967ab9 568 fw_name = FIRMWARE_RS600;
771fe6b9
JG
569 } else if ((rdev->family == CHIP_RV515) ||
570 (rdev->family == CHIP_R520) ||
571 (rdev->family == CHIP_RV530) ||
572 (rdev->family == CHIP_R580) ||
573 (rdev->family == CHIP_RV560) ||
574 (rdev->family == CHIP_RV570)) {
575 DRM_INFO("Loading R500 Microcode\n");
70967ab9
BH
576 fw_name = FIRMWARE_R520;
577 }
578
3ce0a23d 579 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
70967ab9
BH
580 platform_device_unregister(pdev);
581 if (err) {
582 printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n",
583 fw_name);
3ce0a23d 584 } else if (rdev->me_fw->size % 8) {
70967ab9
BH
585 printk(KERN_ERR
586 "radeon_cp: Bogus length %zu in firmware \"%s\"\n",
3ce0a23d 587 rdev->me_fw->size, fw_name);
70967ab9 588 err = -EINVAL;
3ce0a23d
JG
589 release_firmware(rdev->me_fw);
590 rdev->me_fw = NULL;
70967ab9
BH
591 }
592 return err;
593}
594static void r100_cp_load_microcode(struct radeon_device *rdev)
595{
596 const __be32 *fw_data;
597 int i, size;
598
599 if (r100_gui_wait_for_idle(rdev)) {
600 printk(KERN_WARNING "Failed to wait GUI idle while "
601 "programming pipes. Bad things might happen.\n");
602 }
603
3ce0a23d
JG
604 if (rdev->me_fw) {
605 size = rdev->me_fw->size / 4;
606 fw_data = (const __be32 *)&rdev->me_fw->data[0];
70967ab9
BH
607 WREG32(RADEON_CP_ME_RAM_ADDR, 0);
608 for (i = 0; i < size; i += 2) {
609 WREG32(RADEON_CP_ME_RAM_DATAH,
610 be32_to_cpup(&fw_data[i]));
611 WREG32(RADEON_CP_ME_RAM_DATAL,
612 be32_to_cpup(&fw_data[i + 1]));
771fe6b9
JG
613 }
614 }
615}
616
617int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
618{
619 unsigned rb_bufsz;
620 unsigned rb_blksz;
621 unsigned max_fetch;
622 unsigned pre_write_timer;
623 unsigned pre_write_limit;
624 unsigned indirect2_start;
625 unsigned indirect1_start;
626 uint32_t tmp;
627 int r;
628
629 if (r100_debugfs_cp_init(rdev)) {
630 DRM_ERROR("Failed to register debugfs file for CP !\n");
631 }
632 /* Reset CP */
633 tmp = RREG32(RADEON_CP_CSQ_STAT);
634 if ((tmp & (1 << 31))) {
635 DRM_INFO("radeon: cp busy (0x%08X) resetting\n", tmp);
636 WREG32(RADEON_CP_CSQ_MODE, 0);
637 WREG32(RADEON_CP_CSQ_CNTL, 0);
638 WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
639 tmp = RREG32(RADEON_RBBM_SOFT_RESET);
640 mdelay(2);
641 WREG32(RADEON_RBBM_SOFT_RESET, 0);
642 tmp = RREG32(RADEON_RBBM_SOFT_RESET);
643 mdelay(2);
644 tmp = RREG32(RADEON_CP_CSQ_STAT);
645 if ((tmp & (1 << 31))) {
646 DRM_INFO("radeon: cp reset failed (0x%08X)\n", tmp);
647 }
648 } else {
649 DRM_INFO("radeon: cp idle (0x%08X)\n", tmp);
650 }
70967ab9 651
3ce0a23d 652 if (!rdev->me_fw) {
70967ab9
BH
653 r = r100_cp_init_microcode(rdev);
654 if (r) {
655 DRM_ERROR("Failed to load firmware!\n");
656 return r;
657 }
658 }
659
771fe6b9
JG
660 /* Align ring size */
661 rb_bufsz = drm_order(ring_size / 8);
662 ring_size = (1 << (rb_bufsz + 1)) * 4;
663 r100_cp_load_microcode(rdev);
664 r = radeon_ring_init(rdev, ring_size);
665 if (r) {
666 return r;
667 }
668 /* Each time the cp read 1024 bytes (16 dword/quadword) update
669 * the rptr copy in system ram */
670 rb_blksz = 9;
671 /* cp will read 128bytes at a time (4 dwords) */
672 max_fetch = 1;
673 rdev->cp.align_mask = 16 - 1;
674 /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
675 pre_write_timer = 64;
676 /* Force CP_RB_WPTR write if written more than one time before the
677 * delay expire
678 */
679 pre_write_limit = 0;
680 /* Setup the cp cache like this (cache size is 96 dwords) :
681 * RING 0 to 15
682 * INDIRECT1 16 to 79
683 * INDIRECT2 80 to 95
684 * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
685 * indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords))
686 * indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
687 * Idea being that most of the gpu cmd will be through indirect1 buffer
688 * so it gets the bigger cache.
689 */
690 indirect2_start = 80;
691 indirect1_start = 16;
692 /* cp setup */
693 WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
694 WREG32(RADEON_CP_RB_CNTL,
4e484e7d
MD
695#ifdef __BIG_ENDIAN
696 RADEON_BUF_SWAP_32BIT |
697#endif
771fe6b9
JG
698 REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
699 REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
700 REG_SET(RADEON_MAX_FETCH, max_fetch) |
701 RADEON_RB_NO_UPDATE);
702 /* Set ring address */
703 DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr);
704 WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr);
705 /* Force read & write ptr to 0 */
706 tmp = RREG32(RADEON_CP_RB_CNTL);
707 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
708 WREG32(RADEON_CP_RB_RPTR_WR, 0);
709 WREG32(RADEON_CP_RB_WPTR, 0);
710 WREG32(RADEON_CP_RB_CNTL, tmp);
711 udelay(10);
712 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
713 rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR);
714 /* Set cp mode to bus mastering & enable cp*/
715 WREG32(RADEON_CP_CSQ_MODE,
716 REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
717 REG_SET(RADEON_INDIRECT1_START, indirect1_start));
718 WREG32(0x718, 0);
719 WREG32(0x744, 0x00004D4D);
720 WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
721 radeon_ring_start(rdev);
722 r = radeon_ring_test(rdev);
723 if (r) {
724 DRM_ERROR("radeon: cp isn't working (%d).\n", r);
725 return r;
726 }
727 rdev->cp.ready = true;
728 return 0;
729}
730
731void r100_cp_fini(struct radeon_device *rdev)
732{
45600232
JG
733 if (r100_cp_wait_for_idle(rdev)) {
734 DRM_ERROR("Wait for CP idle timeout, shutting down CP.\n");
735 }
771fe6b9
JG
736 /* Disable ring */
737 rdev->cp.ready = false;
738 WREG32(RADEON_CP_CSQ_CNTL, 0);
739 radeon_ring_fini(rdev);
740 DRM_INFO("radeon: cp finalized\n");
741}
742
743void r100_cp_disable(struct radeon_device *rdev)
744{
745 /* Disable ring */
746 rdev->cp.ready = false;
747 WREG32(RADEON_CP_CSQ_MODE, 0);
748 WREG32(RADEON_CP_CSQ_CNTL, 0);
749 if (r100_gui_wait_for_idle(rdev)) {
750 printk(KERN_WARNING "Failed to wait GUI idle while "
751 "programming pipes. Bad things might happen.\n");
752 }
753}
754
755int r100_cp_reset(struct radeon_device *rdev)
756{
757 uint32_t tmp;
758 bool reinit_cp;
759 int i;
760
761 reinit_cp = rdev->cp.ready;
762 rdev->cp.ready = false;
763 WREG32(RADEON_CP_CSQ_MODE, 0);
764 WREG32(RADEON_CP_CSQ_CNTL, 0);
765 WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
766 (void)RREG32(RADEON_RBBM_SOFT_RESET);
767 udelay(200);
768 WREG32(RADEON_RBBM_SOFT_RESET, 0);
769 /* Wait to prevent race in RBBM_STATUS */
770 mdelay(1);
771 for (i = 0; i < rdev->usec_timeout; i++) {
772 tmp = RREG32(RADEON_RBBM_STATUS);
773 if (!(tmp & (1 << 16))) {
774 DRM_INFO("CP reset succeed (RBBM_STATUS=0x%08X)\n",
775 tmp);
776 if (reinit_cp) {
777 return r100_cp_init(rdev, rdev->cp.ring_size);
778 }
779 return 0;
780 }
781 DRM_UDELAY(1);
782 }
783 tmp = RREG32(RADEON_RBBM_STATUS);
784 DRM_ERROR("Failed to reset CP (RBBM_STATUS=0x%08X)!\n", tmp);
785 return -1;
786}
787
3ce0a23d
JG
788void r100_cp_commit(struct radeon_device *rdev)
789{
790 WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
791 (void)RREG32(RADEON_CP_RB_WPTR);
792}
793
771fe6b9
JG
794
795/*
796 * CS functions
797 */
798int r100_cs_parse_packet0(struct radeon_cs_parser *p,
799 struct radeon_cs_packet *pkt,
068a117c 800 const unsigned *auth, unsigned n,
771fe6b9
JG
801 radeon_packet0_check_t check)
802{
803 unsigned reg;
804 unsigned i, j, m;
805 unsigned idx;
806 int r;
807
808 idx = pkt->idx + 1;
809 reg = pkt->reg;
068a117c
JG
810 /* Check that register fall into register range
811 * determined by the number of entry (n) in the
812 * safe register bitmap.
813 */
771fe6b9
JG
814 if (pkt->one_reg_wr) {
815 if ((reg >> 7) > n) {
816 return -EINVAL;
817 }
818 } else {
819 if (((reg + (pkt->count << 2)) >> 7) > n) {
820 return -EINVAL;
821 }
822 }
823 for (i = 0; i <= pkt->count; i++, idx++) {
824 j = (reg >> 7);
825 m = 1 << ((reg >> 2) & 31);
826 if (auth[j] & m) {
827 r = check(p, pkt, idx, reg);
828 if (r) {
829 return r;
830 }
831 }
832 if (pkt->one_reg_wr) {
833 if (!(auth[j] & m)) {
834 break;
835 }
836 } else {
837 reg += 4;
838 }
839 }
840 return 0;
841}
842
771fe6b9
JG
843void r100_cs_dump_packet(struct radeon_cs_parser *p,
844 struct radeon_cs_packet *pkt)
845{
846 struct radeon_cs_chunk *ib_chunk;
847 volatile uint32_t *ib;
848 unsigned i;
849 unsigned idx;
850
851 ib = p->ib->ptr;
852 ib_chunk = &p->chunks[p->chunk_ib_idx];
853 idx = pkt->idx;
854 for (i = 0; i <= (pkt->count + 1); i++, idx++) {
855 DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
856 }
857}
858
859/**
860 * r100_cs_packet_parse() - parse cp packet and point ib index to next packet
861 * @parser: parser structure holding parsing context.
862 * @pkt: where to store packet informations
863 *
864 * Assume that chunk_ib_index is properly set. Will return -EINVAL
865 * if packet is bigger than remaining ib size. or if packets is unknown.
866 **/
867int r100_cs_packet_parse(struct radeon_cs_parser *p,
868 struct radeon_cs_packet *pkt,
869 unsigned idx)
870{
871 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
fa99239c 872 uint32_t header;
771fe6b9
JG
873
874 if (idx >= ib_chunk->length_dw) {
875 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
876 idx, ib_chunk->length_dw);
877 return -EINVAL;
878 }
fa99239c 879 header = ib_chunk->kdata[idx];
771fe6b9
JG
880 pkt->idx = idx;
881 pkt->type = CP_PACKET_GET_TYPE(header);
882 pkt->count = CP_PACKET_GET_COUNT(header);
883 switch (pkt->type) {
884 case PACKET_TYPE0:
885 pkt->reg = CP_PACKET0_GET_REG(header);
886 pkt->one_reg_wr = CP_PACKET0_GET_ONE_REG_WR(header);
887 break;
888 case PACKET_TYPE3:
889 pkt->opcode = CP_PACKET3_GET_OPCODE(header);
890 break;
891 case PACKET_TYPE2:
892 pkt->count = -1;
893 break;
894 default:
895 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
896 return -EINVAL;
897 }
898 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
899 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
900 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
901 return -EINVAL;
902 }
903 return 0;
904}
905
531369e6
DA
906/**
907 * r100_cs_packet_next_vline() - parse userspace VLINE packet
908 * @parser: parser structure holding parsing context.
909 *
910 * Userspace sends a special sequence for VLINE waits.
911 * PACKET0 - VLINE_START_END + value
912 * PACKET0 - WAIT_UNTIL +_value
913 * RELOC (P3) - crtc_id in reloc.
914 *
915 * This function parses this and relocates the VLINE START END
916 * and WAIT UNTIL packets to the correct crtc.
917 * It also detects a switched off crtc and nulls out the
918 * wait in that case.
919 */
920int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
921{
922 struct radeon_cs_chunk *ib_chunk;
923 struct drm_mode_object *obj;
924 struct drm_crtc *crtc;
925 struct radeon_crtc *radeon_crtc;
926 struct radeon_cs_packet p3reloc, waitreloc;
927 int crtc_id;
928 int r;
929 uint32_t header, h_idx, reg;
930
931 ib_chunk = &p->chunks[p->chunk_ib_idx];
932
933 /* parse the wait until */
934 r = r100_cs_packet_parse(p, &waitreloc, p->idx);
935 if (r)
936 return r;
937
938 /* check its a wait until and only 1 count */
939 if (waitreloc.reg != RADEON_WAIT_UNTIL ||
940 waitreloc.count != 0) {
941 DRM_ERROR("vline wait had illegal wait until segment\n");
942 r = -EINVAL;
943 return r;
944 }
945
946 if (ib_chunk->kdata[waitreloc.idx + 1] != RADEON_WAIT_CRTC_VLINE) {
947 DRM_ERROR("vline wait had illegal wait until\n");
948 r = -EINVAL;
949 return r;
950 }
951
952 /* jump over the NOP */
953 r = r100_cs_packet_parse(p, &p3reloc, p->idx);
954 if (r)
955 return r;
956
957 h_idx = p->idx - 2;
958 p->idx += waitreloc.count;
959 p->idx += p3reloc.count;
960
961 header = ib_chunk->kdata[h_idx];
962 crtc_id = ib_chunk->kdata[h_idx + 5];
963 reg = ib_chunk->kdata[h_idx] >> 2;
964 mutex_lock(&p->rdev->ddev->mode_config.mutex);
965 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
966 if (!obj) {
967 DRM_ERROR("cannot find crtc %d\n", crtc_id);
968 r = -EINVAL;
969 goto out;
970 }
971 crtc = obj_to_crtc(obj);
972 radeon_crtc = to_radeon_crtc(crtc);
973 crtc_id = radeon_crtc->crtc_id;
974
975 if (!crtc->enabled) {
976 /* if the CRTC isn't enabled - we need to nop out the wait until */
977 ib_chunk->kdata[h_idx + 2] = PACKET2(0);
978 ib_chunk->kdata[h_idx + 3] = PACKET2(0);
979 } else if (crtc_id == 1) {
980 switch (reg) {
981 case AVIVO_D1MODE_VLINE_START_END:
982 header &= R300_CP_PACKET0_REG_MASK;
983 header |= AVIVO_D2MODE_VLINE_START_END >> 2;
984 break;
985 case RADEON_CRTC_GUI_TRIG_VLINE:
986 header &= R300_CP_PACKET0_REG_MASK;
987 header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2;
988 break;
989 default:
990 DRM_ERROR("unknown crtc reloc\n");
991 r = -EINVAL;
992 goto out;
993 }
994 ib_chunk->kdata[h_idx] = header;
995 ib_chunk->kdata[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
996 }
997out:
998 mutex_unlock(&p->rdev->ddev->mode_config.mutex);
999 return r;
1000}
1001
771fe6b9
JG
1002/**
1003 * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3
1004 * @parser: parser structure holding parsing context.
1005 * @data: pointer to relocation data
1006 * @offset_start: starting offset
1007 * @offset_mask: offset mask (to align start offset on)
1008 * @reloc: reloc informations
1009 *
1010 * Check next packet is relocation packet3, do bo validation and compute
1011 * GPU offset using the provided start.
1012 **/
1013int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
1014 struct radeon_cs_reloc **cs_reloc)
1015{
1016 struct radeon_cs_chunk *ib_chunk;
1017 struct radeon_cs_chunk *relocs_chunk;
1018 struct radeon_cs_packet p3reloc;
1019 unsigned idx;
1020 int r;
1021
1022 if (p->chunk_relocs_idx == -1) {
1023 DRM_ERROR("No relocation chunk !\n");
1024 return -EINVAL;
1025 }
1026 *cs_reloc = NULL;
1027 ib_chunk = &p->chunks[p->chunk_ib_idx];
1028 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
1029 r = r100_cs_packet_parse(p, &p3reloc, p->idx);
1030 if (r) {
1031 return r;
1032 }
1033 p->idx += p3reloc.count + 2;
1034 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
1035 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
1036 p3reloc.idx);
1037 r100_cs_dump_packet(p, &p3reloc);
1038 return -EINVAL;
1039 }
1040 idx = ib_chunk->kdata[p3reloc.idx + 1];
1041 if (idx >= relocs_chunk->length_dw) {
1042 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
1043 idx, relocs_chunk->length_dw);
1044 r100_cs_dump_packet(p, &p3reloc);
1045 return -EINVAL;
1046 }
1047 /* FIXME: we assume reloc size is 4 dwords */
1048 *cs_reloc = p->relocs_ptr[(idx / 4)];
1049 return 0;
1050}
1051
551ebd83
DA
1052static int r100_get_vtx_size(uint32_t vtx_fmt)
1053{
1054 int vtx_size;
1055 vtx_size = 2;
1056 /* ordered according to bits in spec */
1057 if (vtx_fmt & RADEON_SE_VTX_FMT_W0)
1058 vtx_size++;
1059 if (vtx_fmt & RADEON_SE_VTX_FMT_FPCOLOR)
1060 vtx_size += 3;
1061 if (vtx_fmt & RADEON_SE_VTX_FMT_FPALPHA)
1062 vtx_size++;
1063 if (vtx_fmt & RADEON_SE_VTX_FMT_PKCOLOR)
1064 vtx_size++;
1065 if (vtx_fmt & RADEON_SE_VTX_FMT_FPSPEC)
1066 vtx_size += 3;
1067 if (vtx_fmt & RADEON_SE_VTX_FMT_FPFOG)
1068 vtx_size++;
1069 if (vtx_fmt & RADEON_SE_VTX_FMT_PKSPEC)
1070 vtx_size++;
1071 if (vtx_fmt & RADEON_SE_VTX_FMT_ST0)
1072 vtx_size += 2;
1073 if (vtx_fmt & RADEON_SE_VTX_FMT_ST1)
1074 vtx_size += 2;
1075 if (vtx_fmt & RADEON_SE_VTX_FMT_Q1)
1076 vtx_size++;
1077 if (vtx_fmt & RADEON_SE_VTX_FMT_ST2)
1078 vtx_size += 2;
1079 if (vtx_fmt & RADEON_SE_VTX_FMT_Q2)
1080 vtx_size++;
1081 if (vtx_fmt & RADEON_SE_VTX_FMT_ST3)
1082 vtx_size += 2;
1083 if (vtx_fmt & RADEON_SE_VTX_FMT_Q3)
1084 vtx_size++;
1085 if (vtx_fmt & RADEON_SE_VTX_FMT_Q0)
1086 vtx_size++;
1087 /* blend weight */
1088 if (vtx_fmt & (0x7 << 15))
1089 vtx_size += (vtx_fmt >> 15) & 0x7;
1090 if (vtx_fmt & RADEON_SE_VTX_FMT_N0)
1091 vtx_size += 3;
1092 if (vtx_fmt & RADEON_SE_VTX_FMT_XY1)
1093 vtx_size += 2;
1094 if (vtx_fmt & RADEON_SE_VTX_FMT_Z1)
1095 vtx_size++;
1096 if (vtx_fmt & RADEON_SE_VTX_FMT_W1)
1097 vtx_size++;
1098 if (vtx_fmt & RADEON_SE_VTX_FMT_N1)
1099 vtx_size++;
1100 if (vtx_fmt & RADEON_SE_VTX_FMT_Z)
1101 vtx_size++;
1102 return vtx_size;
1103}
1104
771fe6b9 1105static int r100_packet0_check(struct radeon_cs_parser *p,
551ebd83
DA
1106 struct radeon_cs_packet *pkt,
1107 unsigned idx, unsigned reg)
771fe6b9
JG
1108{
1109 struct radeon_cs_chunk *ib_chunk;
1110 struct radeon_cs_reloc *reloc;
551ebd83 1111 struct r100_cs_track *track;
771fe6b9
JG
1112 volatile uint32_t *ib;
1113 uint32_t tmp;
771fe6b9 1114 int r;
551ebd83 1115 int i, face;
e024e110 1116 u32 tile_flags = 0;
771fe6b9
JG
1117
1118 ib = p->ib->ptr;
1119 ib_chunk = &p->chunks[p->chunk_ib_idx];
551ebd83
DA
1120 track = (struct r100_cs_track *)p->track;
1121
1122 switch (reg) {
1123 case RADEON_CRTC_GUI_TRIG_VLINE:
1124 r = r100_cs_packet_parse_vline(p);
1125 if (r) {
1126 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1127 idx, reg);
1128 r100_cs_dump_packet(p, pkt);
1129 return r;
1130 }
1131 break;
771fe6b9
JG
1132 /* FIXME: only allow PACKET3 blit? easier to check for out of
1133 * range access */
551ebd83
DA
1134 case RADEON_DST_PITCH_OFFSET:
1135 case RADEON_SRC_PITCH_OFFSET:
1136 r = r100_reloc_pitch_offset(p, pkt, idx, reg);
1137 if (r)
1138 return r;
1139 break;
1140 case RADEON_RB3D_DEPTHOFFSET:
1141 r = r100_cs_packet_next_reloc(p, &reloc);
1142 if (r) {
1143 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1144 idx, reg);
1145 r100_cs_dump_packet(p, pkt);
1146 return r;
1147 }
1148 track->zb.robj = reloc->robj;
1149 track->zb.offset = ib_chunk->kdata[idx];
1150 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1151 break;
1152 case RADEON_RB3D_COLOROFFSET:
1153 r = r100_cs_packet_next_reloc(p, &reloc);
1154 if (r) {
1155 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1156 idx, reg);
1157 r100_cs_dump_packet(p, pkt);
1158 return r;
1159 }
1160 track->cb[0].robj = reloc->robj;
1161 track->cb[0].offset = ib_chunk->kdata[idx];
1162 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1163 break;
1164 case RADEON_PP_TXOFFSET_0:
1165 case RADEON_PP_TXOFFSET_1:
1166 case RADEON_PP_TXOFFSET_2:
1167 i = (reg - RADEON_PP_TXOFFSET_0) / 24;
1168 r = r100_cs_packet_next_reloc(p, &reloc);
1169 if (r) {
1170 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1171 idx, reg);
1172 r100_cs_dump_packet(p, pkt);
1173 return r;
1174 }
1175 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1176 track->textures[i].robj = reloc->robj;
1177 break;
1178 case RADEON_PP_CUBIC_OFFSET_T0_0:
1179 case RADEON_PP_CUBIC_OFFSET_T0_1:
1180 case RADEON_PP_CUBIC_OFFSET_T0_2:
1181 case RADEON_PP_CUBIC_OFFSET_T0_3:
1182 case RADEON_PP_CUBIC_OFFSET_T0_4:
1183 i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4;
1184 r = r100_cs_packet_next_reloc(p, &reloc);
1185 if (r) {
1186 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1187 idx, reg);
1188 r100_cs_dump_packet(p, pkt);
1189 return r;
1190 }
1191 track->textures[0].cube_info[i].offset = ib_chunk->kdata[idx];
1192 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1193 track->textures[0].cube_info[i].robj = reloc->robj;
1194 break;
1195 case RADEON_PP_CUBIC_OFFSET_T1_0:
1196 case RADEON_PP_CUBIC_OFFSET_T1_1:
1197 case RADEON_PP_CUBIC_OFFSET_T1_2:
1198 case RADEON_PP_CUBIC_OFFSET_T1_3:
1199 case RADEON_PP_CUBIC_OFFSET_T1_4:
1200 i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4;
1201 r = r100_cs_packet_next_reloc(p, &reloc);
1202 if (r) {
1203 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1204 idx, reg);
1205 r100_cs_dump_packet(p, pkt);
1206 return r;
1207 }
1208 track->textures[1].cube_info[i].offset = ib_chunk->kdata[idx];
1209 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1210 track->textures[1].cube_info[i].robj = reloc->robj;
1211 break;
1212 case RADEON_PP_CUBIC_OFFSET_T2_0:
1213 case RADEON_PP_CUBIC_OFFSET_T2_1:
1214 case RADEON_PP_CUBIC_OFFSET_T2_2:
1215 case RADEON_PP_CUBIC_OFFSET_T2_3:
1216 case RADEON_PP_CUBIC_OFFSET_T2_4:
1217 i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4;
1218 r = r100_cs_packet_next_reloc(p, &reloc);
1219 if (r) {
1220 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1221 idx, reg);
1222 r100_cs_dump_packet(p, pkt);
1223 return r;
1224 }
1225 track->textures[2].cube_info[i].offset = ib_chunk->kdata[idx];
1226 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1227 track->textures[2].cube_info[i].robj = reloc->robj;
1228 break;
1229 case RADEON_RE_WIDTH_HEIGHT:
1230 track->maxy = ((ib_chunk->kdata[idx] >> 16) & 0x7FF);
1231 break;
1232 case RADEON_RB3D_COLORPITCH:
1233 r = r100_cs_packet_next_reloc(p, &reloc);
1234 if (r) {
1235 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1236 idx, reg);
1237 r100_cs_dump_packet(p, pkt);
1238 return r;
1239 }
e024e110 1240
551ebd83
DA
1241 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1242 tile_flags |= RADEON_COLOR_TILE_ENABLE;
1243 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1244 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
e024e110 1245
551ebd83
DA
1246 tmp = ib_chunk->kdata[idx] & ~(0x7 << 16);
1247 tmp |= tile_flags;
1248 ib[idx] = tmp;
e024e110 1249
551ebd83
DA
1250 track->cb[0].pitch = ib_chunk->kdata[idx] & RADEON_COLORPITCH_MASK;
1251 break;
1252 case RADEON_RB3D_DEPTHPITCH:
1253 track->zb.pitch = ib_chunk->kdata[idx] & RADEON_DEPTHPITCH_MASK;
1254 break;
1255 case RADEON_RB3D_CNTL:
1256 switch ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
1257 case 7:
1258 case 8:
1259 case 9:
1260 case 11:
1261 case 12:
1262 track->cb[0].cpp = 1;
e024e110 1263 break;
551ebd83
DA
1264 case 3:
1265 case 4:
1266 case 15:
1267 track->cb[0].cpp = 2;
1268 break;
1269 case 6:
1270 track->cb[0].cpp = 4;
1271 break;
1272 default:
1273 DRM_ERROR("Invalid color buffer format (%d) !\n",
1274 ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
1275 return -EINVAL;
1276 }
1277 track->z_enabled = !!(ib_chunk->kdata[idx] & RADEON_Z_ENABLE);
1278 break;
1279 case RADEON_RB3D_ZSTENCILCNTL:
1280 switch (ib_chunk->kdata[idx] & 0xf) {
1281 case 0:
1282 track->zb.cpp = 2;
1283 break;
1284 case 2:
1285 case 3:
1286 case 4:
1287 case 5:
1288 case 9:
1289 case 11:
1290 track->zb.cpp = 4;
17782d99 1291 break;
771fe6b9 1292 default:
771fe6b9
JG
1293 break;
1294 }
551ebd83
DA
1295 break;
1296 case RADEON_RB3D_ZPASS_ADDR:
1297 r = r100_cs_packet_next_reloc(p, &reloc);
1298 if (r) {
1299 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1300 idx, reg);
1301 r100_cs_dump_packet(p, pkt);
1302 return r;
1303 }
1304 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1305 break;
1306 case RADEON_PP_CNTL:
1307 {
1308 uint32_t temp = ib_chunk->kdata[idx] >> 4;
1309 for (i = 0; i < track->num_texture; i++)
1310 track->textures[i].enabled = !!(temp & (1 << i));
1311 }
1312 break;
1313 case RADEON_SE_VF_CNTL:
1314 track->vap_vf_cntl = ib_chunk->kdata[idx];
1315 break;
1316 case RADEON_SE_VTX_FMT:
1317 track->vtx_size = r100_get_vtx_size(ib_chunk->kdata[idx]);
1318 break;
1319 case RADEON_PP_TEX_SIZE_0:
1320 case RADEON_PP_TEX_SIZE_1:
1321 case RADEON_PP_TEX_SIZE_2:
1322 i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
1323 track->textures[i].width = (ib_chunk->kdata[idx] & RADEON_TEX_USIZE_MASK) + 1;
1324 track->textures[i].height = ((ib_chunk->kdata[idx] & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
1325 break;
1326 case RADEON_PP_TEX_PITCH_0:
1327 case RADEON_PP_TEX_PITCH_1:
1328 case RADEON_PP_TEX_PITCH_2:
1329 i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
1330 track->textures[i].pitch = ib_chunk->kdata[idx] + 32;
1331 break;
1332 case RADEON_PP_TXFILTER_0:
1333 case RADEON_PP_TXFILTER_1:
1334 case RADEON_PP_TXFILTER_2:
1335 i = (reg - RADEON_PP_TXFILTER_0) / 24;
1336 track->textures[i].num_levels = ((ib_chunk->kdata[idx] & RADEON_MAX_MIP_LEVEL_MASK)
1337 >> RADEON_MAX_MIP_LEVEL_SHIFT);
1338 tmp = (ib_chunk->kdata[idx] >> 23) & 0x7;
1339 if (tmp == 2 || tmp == 6)
1340 track->textures[i].roundup_w = false;
1341 tmp = (ib_chunk->kdata[idx] >> 27) & 0x7;
1342 if (tmp == 2 || tmp == 6)
1343 track->textures[i].roundup_h = false;
1344 break;
1345 case RADEON_PP_TXFORMAT_0:
1346 case RADEON_PP_TXFORMAT_1:
1347 case RADEON_PP_TXFORMAT_2:
1348 i = (reg - RADEON_PP_TXFORMAT_0) / 24;
1349 if (ib_chunk->kdata[idx] & RADEON_TXFORMAT_NON_POWER2) {
1350 track->textures[i].use_pitch = 1;
1351 } else {
1352 track->textures[i].use_pitch = 0;
1353 track->textures[i].width = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
1354 track->textures[i].height = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
1355 }
1356 if (ib_chunk->kdata[idx] & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
1357 track->textures[i].tex_coord_type = 2;
1358 switch ((ib_chunk->kdata[idx] & RADEON_TXFORMAT_FORMAT_MASK)) {
1359 case RADEON_TXFORMAT_I8:
1360 case RADEON_TXFORMAT_RGB332:
1361 case RADEON_TXFORMAT_Y8:
1362 track->textures[i].cpp = 1;
1363 break;
1364 case RADEON_TXFORMAT_AI88:
1365 case RADEON_TXFORMAT_ARGB1555:
1366 case RADEON_TXFORMAT_RGB565:
1367 case RADEON_TXFORMAT_ARGB4444:
1368 case RADEON_TXFORMAT_VYUY422:
1369 case RADEON_TXFORMAT_YVYU422:
1370 case RADEON_TXFORMAT_DXT1:
1371 case RADEON_TXFORMAT_SHADOW16:
1372 case RADEON_TXFORMAT_LDUDV655:
1373 case RADEON_TXFORMAT_DUDV88:
1374 track->textures[i].cpp = 2;
771fe6b9 1375 break;
551ebd83
DA
1376 case RADEON_TXFORMAT_ARGB8888:
1377 case RADEON_TXFORMAT_RGBA8888:
1378 case RADEON_TXFORMAT_DXT23:
1379 case RADEON_TXFORMAT_DXT45:
1380 case RADEON_TXFORMAT_SHADOW32:
1381 case RADEON_TXFORMAT_LDUDUV8888:
1382 track->textures[i].cpp = 4;
1383 break;
1384 }
1385 track->textures[i].cube_info[4].width = 1 << ((ib_chunk->kdata[idx] >> 16) & 0xf);
1386 track->textures[i].cube_info[4].height = 1 << ((ib_chunk->kdata[idx] >> 20) & 0xf);
1387 break;
1388 case RADEON_PP_CUBIC_FACES_0:
1389 case RADEON_PP_CUBIC_FACES_1:
1390 case RADEON_PP_CUBIC_FACES_2:
1391 tmp = ib_chunk->kdata[idx];
1392 i = (reg - RADEON_PP_CUBIC_FACES_0) / 4;
1393 for (face = 0; face < 4; face++) {
1394 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
1395 track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
771fe6b9 1396 }
551ebd83
DA
1397 break;
1398 default:
1399 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
1400 reg, idx);
1401 return -EINVAL;
771fe6b9
JG
1402 }
1403 return 0;
1404}
1405
068a117c
JG
1406int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
1407 struct radeon_cs_packet *pkt,
1408 struct radeon_object *robj)
1409{
1410 struct radeon_cs_chunk *ib_chunk;
1411 unsigned idx;
1412
1413 ib_chunk = &p->chunks[p->chunk_ib_idx];
1414 idx = pkt->idx + 1;
1415 if ((ib_chunk->kdata[idx+2] + 1) > radeon_object_size(robj)) {
1416 DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
1417 "(need %u have %lu) !\n",
1418 ib_chunk->kdata[idx+2] + 1,
1419 radeon_object_size(robj));
1420 return -EINVAL;
1421 }
1422 return 0;
1423}
1424
771fe6b9
JG
1425static int r100_packet3_check(struct radeon_cs_parser *p,
1426 struct radeon_cs_packet *pkt)
1427{
1428 struct radeon_cs_chunk *ib_chunk;
1429 struct radeon_cs_reloc *reloc;
551ebd83 1430 struct r100_cs_track *track;
771fe6b9
JG
1431 unsigned idx;
1432 unsigned i, c;
1433 volatile uint32_t *ib;
1434 int r;
1435
1436 ib = p->ib->ptr;
1437 ib_chunk = &p->chunks[p->chunk_ib_idx];
1438 idx = pkt->idx + 1;
551ebd83 1439 track = (struct r100_cs_track *)p->track;
771fe6b9
JG
1440 switch (pkt->opcode) {
1441 case PACKET3_3D_LOAD_VBPNTR:
1442 c = ib_chunk->kdata[idx++];
551ebd83 1443 track->num_arrays = c;
771fe6b9
JG
1444 for (i = 0; i < (c - 1); i += 2, idx += 3) {
1445 r = r100_cs_packet_next_reloc(p, &reloc);
1446 if (r) {
1447 DRM_ERROR("No reloc for packet3 %d\n",
1448 pkt->opcode);
1449 r100_cs_dump_packet(p, pkt);
1450 return r;
1451 }
1452 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
551ebd83
DA
1453 track->arrays[i + 0].robj = reloc->robj;
1454 track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
1455 track->arrays[i + 0].esize &= 0x7F;
771fe6b9
JG
1456 r = r100_cs_packet_next_reloc(p, &reloc);
1457 if (r) {
1458 DRM_ERROR("No reloc for packet3 %d\n",
1459 pkt->opcode);
1460 r100_cs_dump_packet(p, pkt);
1461 return r;
1462 }
1463 ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
551ebd83
DA
1464 track->arrays[i + 1].robj = reloc->robj;
1465 track->arrays[i + 1].esize = ib_chunk->kdata[idx] >> 24;
1466 track->arrays[i + 1].esize &= 0x7F;
771fe6b9
JG
1467 }
1468 if (c & 1) {
1469 r = r100_cs_packet_next_reloc(p, &reloc);
1470 if (r) {
1471 DRM_ERROR("No reloc for packet3 %d\n",
1472 pkt->opcode);
1473 r100_cs_dump_packet(p, pkt);
1474 return r;
1475 }
1476 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
551ebd83
DA
1477 track->arrays[i + 0].robj = reloc->robj;
1478 track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
1479 track->arrays[i + 0].esize &= 0x7F;
771fe6b9
JG
1480 }
1481 break;
1482 case PACKET3_INDX_BUFFER:
1483 r = r100_cs_packet_next_reloc(p, &reloc);
1484 if (r) {
1485 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1486 r100_cs_dump_packet(p, pkt);
1487 return r;
1488 }
1489 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
068a117c
JG
1490 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1491 if (r) {
1492 return r;
1493 }
771fe6b9
JG
1494 break;
1495 case 0x23:
771fe6b9
JG
1496 /* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
1497 r = r100_cs_packet_next_reloc(p, &reloc);
1498 if (r) {
1499 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1500 r100_cs_dump_packet(p, pkt);
1501 return r;
1502 }
1503 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
551ebd83
DA
1504 track->num_arrays = 1;
1505 track->vtx_size = r100_get_vtx_size(ib_chunk->kdata[idx+2]);
1506
1507 track->arrays[0].robj = reloc->robj;
1508 track->arrays[0].esize = track->vtx_size;
1509
1510 track->max_indx = ib_chunk->kdata[idx+1];
1511
1512 track->vap_vf_cntl = ib_chunk->kdata[idx+3];
1513 track->immd_dwords = pkt->count - 1;
1514 r = r100_cs_track_check(p->rdev, track);
1515 if (r)
1516 return r;
771fe6b9
JG
1517 break;
1518 case PACKET3_3D_DRAW_IMMD:
551ebd83
DA
1519 if (((ib_chunk->kdata[idx+1] >> 4) & 0x3) != 3) {
1520 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1521 return -EINVAL;
1522 }
1523 track->vap_vf_cntl = ib_chunk->kdata[idx+1];
1524 track->immd_dwords = pkt->count - 1;
1525 r = r100_cs_track_check(p->rdev, track);
1526 if (r)
1527 return r;
1528 break;
771fe6b9
JG
1529 /* triggers drawing using in-packet vertex data */
1530 case PACKET3_3D_DRAW_IMMD_2:
551ebd83
DA
1531 if (((ib_chunk->kdata[idx] >> 4) & 0x3) != 3) {
1532 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1533 return -EINVAL;
1534 }
1535 track->vap_vf_cntl = ib_chunk->kdata[idx];
1536 track->immd_dwords = pkt->count;
1537 r = r100_cs_track_check(p->rdev, track);
1538 if (r)
1539 return r;
1540 break;
771fe6b9
JG
1541 /* triggers drawing using in-packet vertex data */
1542 case PACKET3_3D_DRAW_VBUF_2:
551ebd83
DA
1543 track->vap_vf_cntl = ib_chunk->kdata[idx];
1544 r = r100_cs_track_check(p->rdev, track);
1545 if (r)
1546 return r;
1547 break;
771fe6b9
JG
1548 /* triggers drawing of vertex buffers setup elsewhere */
1549 case PACKET3_3D_DRAW_INDX_2:
551ebd83
DA
1550 track->vap_vf_cntl = ib_chunk->kdata[idx];
1551 r = r100_cs_track_check(p->rdev, track);
1552 if (r)
1553 return r;
1554 break;
771fe6b9
JG
1555 /* triggers drawing using indices to vertex buffer */
1556 case PACKET3_3D_DRAW_VBUF:
551ebd83
DA
1557 track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
1558 r = r100_cs_track_check(p->rdev, track);
1559 if (r)
1560 return r;
1561 break;
771fe6b9
JG
1562 /* triggers drawing of vertex buffers setup elsewhere */
1563 case PACKET3_3D_DRAW_INDX:
551ebd83
DA
1564 track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
1565 r = r100_cs_track_check(p->rdev, track);
1566 if (r)
1567 return r;
1568 break;
771fe6b9
JG
1569 /* triggers drawing using indices to vertex buffer */
1570 case PACKET3_NOP:
1571 break;
1572 default:
1573 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1574 return -EINVAL;
1575 }
1576 return 0;
1577}
1578
1579int r100_cs_parse(struct radeon_cs_parser *p)
1580{
1581 struct radeon_cs_packet pkt;
551ebd83 1582 struct r100_cs_track track;
771fe6b9
JG
1583 int r;
1584
551ebd83
DA
1585 r100_cs_track_clear(p->rdev, &track);
1586 p->track = &track;
771fe6b9
JG
1587 do {
1588 r = r100_cs_packet_parse(p, &pkt, p->idx);
1589 if (r) {
1590 return r;
1591 }
1592 p->idx += pkt.count + 2;
1593 switch (pkt.type) {
068a117c 1594 case PACKET_TYPE0:
551ebd83
DA
1595 if (p->rdev->family >= CHIP_R200)
1596 r = r100_cs_parse_packet0(p, &pkt,
1597 p->rdev->config.r100.reg_safe_bm,
1598 p->rdev->config.r100.reg_safe_bm_size,
1599 &r200_packet0_check);
1600 else
1601 r = r100_cs_parse_packet0(p, &pkt,
1602 p->rdev->config.r100.reg_safe_bm,
1603 p->rdev->config.r100.reg_safe_bm_size,
1604 &r100_packet0_check);
068a117c
JG
1605 break;
1606 case PACKET_TYPE2:
1607 break;
1608 case PACKET_TYPE3:
1609 r = r100_packet3_check(p, &pkt);
1610 break;
1611 default:
1612 DRM_ERROR("Unknown packet type %d !\n",
1613 pkt.type);
1614 return -EINVAL;
771fe6b9
JG
1615 }
1616 if (r) {
1617 return r;
1618 }
1619 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1620 return 0;
1621}
1622
1623
1624/*
1625 * Global GPU functions
1626 */
1627void r100_errata(struct radeon_device *rdev)
1628{
1629 rdev->pll_errata = 0;
1630
1631 if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) {
1632 rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS;
1633 }
1634
1635 if (rdev->family == CHIP_RV100 ||
1636 rdev->family == CHIP_RS100 ||
1637 rdev->family == CHIP_RS200) {
1638 rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY;
1639 }
1640}
1641
1642/* Wait for vertical sync on primary CRTC */
1643void r100_gpu_wait_for_vsync(struct radeon_device *rdev)
1644{
1645 uint32_t crtc_gen_cntl, tmp;
1646 int i;
1647
1648 crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
1649 if ((crtc_gen_cntl & RADEON_CRTC_DISP_REQ_EN_B) ||
1650 !(crtc_gen_cntl & RADEON_CRTC_EN)) {
1651 return;
1652 }
1653 /* Clear the CRTC_VBLANK_SAVE bit */
1654 WREG32(RADEON_CRTC_STATUS, RADEON_CRTC_VBLANK_SAVE_CLEAR);
1655 for (i = 0; i < rdev->usec_timeout; i++) {
1656 tmp = RREG32(RADEON_CRTC_STATUS);
1657 if (tmp & RADEON_CRTC_VBLANK_SAVE) {
1658 return;
1659 }
1660 DRM_UDELAY(1);
1661 }
1662}
1663
1664/* Wait for vertical sync on secondary CRTC */
1665void r100_gpu_wait_for_vsync2(struct radeon_device *rdev)
1666{
1667 uint32_t crtc2_gen_cntl, tmp;
1668 int i;
1669
1670 crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
1671 if ((crtc2_gen_cntl & RADEON_CRTC2_DISP_REQ_EN_B) ||
1672 !(crtc2_gen_cntl & RADEON_CRTC2_EN))
1673 return;
1674
1675 /* Clear the CRTC_VBLANK_SAVE bit */
1676 WREG32(RADEON_CRTC2_STATUS, RADEON_CRTC2_VBLANK_SAVE_CLEAR);
1677 for (i = 0; i < rdev->usec_timeout; i++) {
1678 tmp = RREG32(RADEON_CRTC2_STATUS);
1679 if (tmp & RADEON_CRTC2_VBLANK_SAVE) {
1680 return;
1681 }
1682 DRM_UDELAY(1);
1683 }
1684}
1685
1686int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
1687{
1688 unsigned i;
1689 uint32_t tmp;
1690
1691 for (i = 0; i < rdev->usec_timeout; i++) {
1692 tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK;
1693 if (tmp >= n) {
1694 return 0;
1695 }
1696 DRM_UDELAY(1);
1697 }
1698 return -1;
1699}
1700
1701int r100_gui_wait_for_idle(struct radeon_device *rdev)
1702{
1703 unsigned i;
1704 uint32_t tmp;
1705
1706 if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) {
1707 printk(KERN_WARNING "radeon: wait for empty RBBM fifo failed !"
1708 " Bad things might happen.\n");
1709 }
1710 for (i = 0; i < rdev->usec_timeout; i++) {
1711 tmp = RREG32(RADEON_RBBM_STATUS);
1712 if (!(tmp & (1 << 31))) {
1713 return 0;
1714 }
1715 DRM_UDELAY(1);
1716 }
1717 return -1;
1718}
1719
1720int r100_mc_wait_for_idle(struct radeon_device *rdev)
1721{
1722 unsigned i;
1723 uint32_t tmp;
1724
1725 for (i = 0; i < rdev->usec_timeout; i++) {
1726 /* read MC_STATUS */
1727 tmp = RREG32(0x0150);
1728 if (tmp & (1 << 2)) {
1729 return 0;
1730 }
1731 DRM_UDELAY(1);
1732 }
1733 return -1;
1734}
1735
1736void r100_gpu_init(struct radeon_device *rdev)
1737{
1738 /* TODO: anythings to do here ? pipes ? */
1739 r100_hdp_reset(rdev);
1740}
1741
1742void r100_hdp_reset(struct radeon_device *rdev)
1743{
1744 uint32_t tmp;
1745
1746 tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
1747 tmp |= (7 << 28);
1748 WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
1749 (void)RREG32(RADEON_HOST_PATH_CNTL);
1750 udelay(200);
1751 WREG32(RADEON_RBBM_SOFT_RESET, 0);
1752 WREG32(RADEON_HOST_PATH_CNTL, tmp);
1753 (void)RREG32(RADEON_HOST_PATH_CNTL);
1754}
1755
1756int r100_rb2d_reset(struct radeon_device *rdev)
1757{
1758 uint32_t tmp;
1759 int i;
1760
1761 WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2);
1762 (void)RREG32(RADEON_RBBM_SOFT_RESET);
1763 udelay(200);
1764 WREG32(RADEON_RBBM_SOFT_RESET, 0);
1765 /* Wait to prevent race in RBBM_STATUS */
1766 mdelay(1);
1767 for (i = 0; i < rdev->usec_timeout; i++) {
1768 tmp = RREG32(RADEON_RBBM_STATUS);
1769 if (!(tmp & (1 << 26))) {
1770 DRM_INFO("RB2D reset succeed (RBBM_STATUS=0x%08X)\n",
1771 tmp);
1772 return 0;
1773 }
1774 DRM_UDELAY(1);
1775 }
1776 tmp = RREG32(RADEON_RBBM_STATUS);
1777 DRM_ERROR("Failed to reset RB2D (RBBM_STATUS=0x%08X)!\n", tmp);
1778 return -1;
1779}
1780
1781int r100_gpu_reset(struct radeon_device *rdev)
1782{
1783 uint32_t status;
1784
1785 /* reset order likely matter */
1786 status = RREG32(RADEON_RBBM_STATUS);
1787 /* reset HDP */
1788 r100_hdp_reset(rdev);
1789 /* reset rb2d */
1790 if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
1791 r100_rb2d_reset(rdev);
1792 }
1793 /* TODO: reset 3D engine */
1794 /* reset CP */
1795 status = RREG32(RADEON_RBBM_STATUS);
1796 if (status & (1 << 16)) {
1797 r100_cp_reset(rdev);
1798 }
1799 /* Check if GPU is idle */
1800 status = RREG32(RADEON_RBBM_STATUS);
1801 if (status & (1 << 31)) {
1802 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
1803 return -1;
1804 }
1805 DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
1806 return 0;
1807}
1808
1809
1810/*
1811 * VRAM info
1812 */
1813static void r100_vram_get_type(struct radeon_device *rdev)
1814{
1815 uint32_t tmp;
1816
1817 rdev->mc.vram_is_ddr = false;
1818 if (rdev->flags & RADEON_IS_IGP)
1819 rdev->mc.vram_is_ddr = true;
1820 else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR)
1821 rdev->mc.vram_is_ddr = true;
1822 if ((rdev->family == CHIP_RV100) ||
1823 (rdev->family == CHIP_RS100) ||
1824 (rdev->family == CHIP_RS200)) {
1825 tmp = RREG32(RADEON_MEM_CNTL);
1826 if (tmp & RV100_HALF_MODE) {
1827 rdev->mc.vram_width = 32;
1828 } else {
1829 rdev->mc.vram_width = 64;
1830 }
1831 if (rdev->flags & RADEON_SINGLE_CRTC) {
1832 rdev->mc.vram_width /= 4;
1833 rdev->mc.vram_is_ddr = true;
1834 }
1835 } else if (rdev->family <= CHIP_RV280) {
1836 tmp = RREG32(RADEON_MEM_CNTL);
1837 if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) {
1838 rdev->mc.vram_width = 128;
1839 } else {
1840 rdev->mc.vram_width = 64;
1841 }
1842 } else {
1843 /* newer IGPs */
1844 rdev->mc.vram_width = 128;
1845 }
1846}
1847
2a0f8918 1848static u32 r100_get_accessible_vram(struct radeon_device *rdev)
771fe6b9 1849{
2a0f8918
DA
1850 u32 aper_size;
1851 u8 byte;
1852
1853 aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
1854
1855 /* Set HDP_APER_CNTL only on cards that are known not to be broken,
1856 * that is has the 2nd generation multifunction PCI interface
1857 */
1858 if (rdev->family == CHIP_RV280 ||
1859 rdev->family >= CHIP_RV350) {
1860 WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL,
1861 ~RADEON_HDP_APER_CNTL);
1862 DRM_INFO("Generation 2 PCI interface, using max accessible memory\n");
1863 return aper_size * 2;
1864 }
1865
1866 /* Older cards have all sorts of funny issues to deal with. First
1867 * check if it's a multifunction card by reading the PCI config
1868 * header type... Limit those to one aperture size
1869 */
1870 pci_read_config_byte(rdev->pdev, 0xe, &byte);
1871 if (byte & 0x80) {
1872 DRM_INFO("Generation 1 PCI interface in multifunction mode\n");
1873 DRM_INFO("Limiting VRAM to one aperture\n");
1874 return aper_size;
1875 }
1876
1877 /* Single function older card. We read HDP_APER_CNTL to see how the BIOS
1878 * have set it up. We don't write this as it's broken on some ASICs but
1879 * we expect the BIOS to have done the right thing (might be too optimistic...)
1880 */
1881 if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL)
1882 return aper_size * 2;
1883 return aper_size;
1884}
1885
1886void r100_vram_init_sizes(struct radeon_device *rdev)
1887{
1888 u64 config_aper_size;
1889 u32 accessible;
1890
1891 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
771fe6b9
JG
1892
1893 if (rdev->flags & RADEON_IS_IGP) {
1894 uint32_t tom;
1895 /* read NB_TOM to get the amount of ram stolen for the GPU */
1896 tom = RREG32(RADEON_NB_TOM);
7a50f01a 1897 rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
3e43d821
DA
1898 /* for IGPs we need to keep VRAM where it was put by the BIOS */
1899 rdev->mc.vram_location = (tom & 0xffff) << 16;
7a50f01a
DA
1900 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
1901 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
771fe6b9 1902 } else {
7a50f01a 1903 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
771fe6b9
JG
1904 /* Some production boards of m6 will report 0
1905 * if it's 8 MB
1906 */
7a50f01a
DA
1907 if (rdev->mc.real_vram_size == 0) {
1908 rdev->mc.real_vram_size = 8192 * 1024;
1909 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
771fe6b9 1910 }
3e43d821
DA
1911 /* let driver place VRAM */
1912 rdev->mc.vram_location = 0xFFFFFFFFUL;
2a0f8918
DA
1913 /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
1914 * Novell bug 204882 + along with lots of ubuntu ones */
7a50f01a
DA
1915 if (config_aper_size > rdev->mc.real_vram_size)
1916 rdev->mc.mc_vram_size = config_aper_size;
1917 else
1918 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
771fe6b9
JG
1919 }
1920
2a0f8918
DA
1921 /* work out accessible VRAM */
1922 accessible = r100_get_accessible_vram(rdev);
1923
771fe6b9
JG
1924 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
1925 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
2a0f8918
DA
1926
1927 if (accessible > rdev->mc.aper_size)
1928 accessible = rdev->mc.aper_size;
1929
7a50f01a
DA
1930 if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
1931 rdev->mc.mc_vram_size = rdev->mc.aper_size;
1932
1933 if (rdev->mc.real_vram_size > rdev->mc.aper_size)
1934 rdev->mc.real_vram_size = rdev->mc.aper_size;
2a0f8918
DA
1935}
1936
1937void r100_vram_info(struct radeon_device *rdev)
1938{
1939 r100_vram_get_type(rdev);
1940
1941 r100_vram_init_sizes(rdev);
771fe6b9
JG
1942}
1943
1944
1945/*
1946 * Indirect registers accessor
1947 */
1948void r100_pll_errata_after_index(struct radeon_device *rdev)
1949{
1950 if (!(rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS)) {
1951 return;
1952 }
1953 (void)RREG32(RADEON_CLOCK_CNTL_DATA);
1954 (void)RREG32(RADEON_CRTC_GEN_CNTL);
1955}
1956
1957static void r100_pll_errata_after_data(struct radeon_device *rdev)
1958{
1959 /* This workarounds is necessary on RV100, RS100 and RS200 chips
1960 * or the chip could hang on a subsequent access
1961 */
1962 if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) {
1963 udelay(5000);
1964 }
1965
1966 /* This function is required to workaround a hardware bug in some (all?)
1967 * revisions of the R300. This workaround should be called after every
1968 * CLOCK_CNTL_INDEX register access. If not, register reads afterward
1969 * may not be correct.
1970 */
1971 if (rdev->pll_errata & CHIP_ERRATA_R300_CG) {
1972 uint32_t save, tmp;
1973
1974 save = RREG32(RADEON_CLOCK_CNTL_INDEX);
1975 tmp = save & ~(0x3f | RADEON_PLL_WR_EN);
1976 WREG32(RADEON_CLOCK_CNTL_INDEX, tmp);
1977 tmp = RREG32(RADEON_CLOCK_CNTL_DATA);
1978 WREG32(RADEON_CLOCK_CNTL_INDEX, save);
1979 }
1980}
1981
1982uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
1983{
1984 uint32_t data;
1985
1986 WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
1987 r100_pll_errata_after_index(rdev);
1988 data = RREG32(RADEON_CLOCK_CNTL_DATA);
1989 r100_pll_errata_after_data(rdev);
1990 return data;
1991}
1992
1993void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1994{
1995 WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
1996 r100_pll_errata_after_index(rdev);
1997 WREG32(RADEON_CLOCK_CNTL_DATA, v);
1998 r100_pll_errata_after_data(rdev);
1999}
2000
068a117c
JG
2001int r100_init(struct radeon_device *rdev)
2002{
551ebd83
DA
2003 if (ASIC_IS_RN50(rdev)) {
2004 rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm;
2005 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(rn50_reg_safe_bm);
2006 } else if (rdev->family < CHIP_R200) {
2007 rdev->config.r100.reg_safe_bm = r100_reg_safe_bm;
2008 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm);
2009 } else {
2010 return r200_init(rdev);
2011 }
068a117c
JG
2012 return 0;
2013}
2014
771fe6b9
JG
2015/*
2016 * Debugfs info
2017 */
2018#if defined(CONFIG_DEBUG_FS)
2019static int r100_debugfs_rbbm_info(struct seq_file *m, void *data)
2020{
2021 struct drm_info_node *node = (struct drm_info_node *) m->private;
2022 struct drm_device *dev = node->minor->dev;
2023 struct radeon_device *rdev = dev->dev_private;
2024 uint32_t reg, value;
2025 unsigned i;
2026
2027 seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS));
2028 seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C));
2029 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2030 for (i = 0; i < 64; i++) {
2031 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100);
2032 reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2;
2033 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i);
2034 value = RREG32(RADEON_RBBM_CMDFIFO_DATA);
2035 seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value);
2036 }
2037 return 0;
2038}
2039
2040static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
2041{
2042 struct drm_info_node *node = (struct drm_info_node *) m->private;
2043 struct drm_device *dev = node->minor->dev;
2044 struct radeon_device *rdev = dev->dev_private;
2045 uint32_t rdp, wdp;
2046 unsigned count, i, j;
2047
2048 radeon_ring_free_size(rdev);
2049 rdp = RREG32(RADEON_CP_RB_RPTR);
2050 wdp = RREG32(RADEON_CP_RB_WPTR);
2051 count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
2052 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2053 seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
2054 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
2055 seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
2056 seq_printf(m, "%u dwords in ring\n", count);
2057 for (j = 0; j <= count; j++) {
2058 i = (rdp + j) & rdev->cp.ptr_mask;
2059 seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
2060 }
2061 return 0;
2062}
2063
2064
2065static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data)
2066{
2067 struct drm_info_node *node = (struct drm_info_node *) m->private;
2068 struct drm_device *dev = node->minor->dev;
2069 struct radeon_device *rdev = dev->dev_private;
2070 uint32_t csq_stat, csq2_stat, tmp;
2071 unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr;
2072 unsigned i;
2073
2074 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2075 seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE));
2076 csq_stat = RREG32(RADEON_CP_CSQ_STAT);
2077 csq2_stat = RREG32(RADEON_CP_CSQ2_STAT);
2078 r_rptr = (csq_stat >> 0) & 0x3ff;
2079 r_wptr = (csq_stat >> 10) & 0x3ff;
2080 ib1_rptr = (csq_stat >> 20) & 0x3ff;
2081 ib1_wptr = (csq2_stat >> 0) & 0x3ff;
2082 ib2_rptr = (csq2_stat >> 10) & 0x3ff;
2083 ib2_wptr = (csq2_stat >> 20) & 0x3ff;
2084 seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat);
2085 seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat);
2086 seq_printf(m, "Ring rptr %u\n", r_rptr);
2087 seq_printf(m, "Ring wptr %u\n", r_wptr);
2088 seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr);
2089 seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr);
2090 seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr);
2091 seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr);
2092 /* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms
2093 * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */
2094 seq_printf(m, "Ring fifo:\n");
2095 for (i = 0; i < 256; i++) {
2096 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
2097 tmp = RREG32(RADEON_CP_CSQ_DATA);
2098 seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp);
2099 }
2100 seq_printf(m, "Indirect1 fifo:\n");
2101 for (i = 256; i <= 512; i++) {
2102 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
2103 tmp = RREG32(RADEON_CP_CSQ_DATA);
2104 seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp);
2105 }
2106 seq_printf(m, "Indirect2 fifo:\n");
2107 for (i = 640; i < ib1_wptr; i++) {
2108 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
2109 tmp = RREG32(RADEON_CP_CSQ_DATA);
2110 seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp);
2111 }
2112 return 0;
2113}
2114
2115static int r100_debugfs_mc_info(struct seq_file *m, void *data)
2116{
2117 struct drm_info_node *node = (struct drm_info_node *) m->private;
2118 struct drm_device *dev = node->minor->dev;
2119 struct radeon_device *rdev = dev->dev_private;
2120 uint32_t tmp;
2121
2122 tmp = RREG32(RADEON_CONFIG_MEMSIZE);
2123 seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp);
2124 tmp = RREG32(RADEON_MC_FB_LOCATION);
2125 seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp);
2126 tmp = RREG32(RADEON_BUS_CNTL);
2127 seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
2128 tmp = RREG32(RADEON_MC_AGP_LOCATION);
2129 seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
2130 tmp = RREG32(RADEON_AGP_BASE);
2131 seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
2132 tmp = RREG32(RADEON_HOST_PATH_CNTL);
2133 seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
2134 tmp = RREG32(0x01D0);
2135 seq_printf(m, "AIC_CTRL 0x%08x\n", tmp);
2136 tmp = RREG32(RADEON_AIC_LO_ADDR);
2137 seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp);
2138 tmp = RREG32(RADEON_AIC_HI_ADDR);
2139 seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp);
2140 tmp = RREG32(0x01E4);
2141 seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp);
2142 return 0;
2143}
2144
2145static struct drm_info_list r100_debugfs_rbbm_list[] = {
2146 {"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL},
2147};
2148
2149static struct drm_info_list r100_debugfs_cp_list[] = {
2150 {"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL},
2151 {"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL},
2152};
2153
2154static struct drm_info_list r100_debugfs_mc_info_list[] = {
2155 {"r100_mc_info", r100_debugfs_mc_info, 0, NULL},
2156};
2157#endif
2158
2159int r100_debugfs_rbbm_init(struct radeon_device *rdev)
2160{
2161#if defined(CONFIG_DEBUG_FS)
2162 return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1);
2163#else
2164 return 0;
2165#endif
2166}
2167
2168int r100_debugfs_cp_init(struct radeon_device *rdev)
2169{
2170#if defined(CONFIG_DEBUG_FS)
2171 return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2);
2172#else
2173 return 0;
2174#endif
2175}
2176
2177int r100_debugfs_mc_info_init(struct radeon_device *rdev)
2178{
2179#if defined(CONFIG_DEBUG_FS)
2180 return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1);
2181#else
2182 return 0;
2183#endif
2184}
e024e110
DA
2185
2186int r100_set_surface_reg(struct radeon_device *rdev, int reg,
2187 uint32_t tiling_flags, uint32_t pitch,
2188 uint32_t offset, uint32_t obj_size)
2189{
2190 int surf_index = reg * 16;
2191 int flags = 0;
2192
2193 /* r100/r200 divide by 16 */
2194 if (rdev->family < CHIP_R300)
2195 flags = pitch / 16;
2196 else
2197 flags = pitch / 8;
2198
2199 if (rdev->family <= CHIP_RS200) {
2200 if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
2201 == (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
2202 flags |= RADEON_SURF_TILE_COLOR_BOTH;
2203 if (tiling_flags & RADEON_TILING_MACRO)
2204 flags |= RADEON_SURF_TILE_COLOR_MACRO;
2205 } else if (rdev->family <= CHIP_RV280) {
2206 if (tiling_flags & (RADEON_TILING_MACRO))
2207 flags |= R200_SURF_TILE_COLOR_MACRO;
2208 if (tiling_flags & RADEON_TILING_MICRO)
2209 flags |= R200_SURF_TILE_COLOR_MICRO;
2210 } else {
2211 if (tiling_flags & RADEON_TILING_MACRO)
2212 flags |= R300_SURF_TILE_MACRO;
2213 if (tiling_flags & RADEON_TILING_MICRO)
2214 flags |= R300_SURF_TILE_MICRO;
2215 }
2216
2217 DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1);
2218 WREG32(RADEON_SURFACE0_INFO + surf_index, flags);
2219 WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset);
2220 WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1);
2221 return 0;
2222}
2223
2224void r100_clear_surface_reg(struct radeon_device *rdev, int reg)
2225{
2226 int surf_index = reg * 16;
2227 WREG32(RADEON_SURFACE0_INFO + surf_index, 0);
2228}
c93bb85b
JG
2229
2230void r100_bandwidth_update(struct radeon_device *rdev)
2231{
2232 fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff;
2233 fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff;
2234 fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff;
2235 uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
2236 fixed20_12 memtcas_ff[8] = {
2237 fixed_init(1),
2238 fixed_init(2),
2239 fixed_init(3),
2240 fixed_init(0),
2241 fixed_init_half(1),
2242 fixed_init_half(2),
2243 fixed_init(0),
2244 };
2245 fixed20_12 memtcas_rs480_ff[8] = {
2246 fixed_init(0),
2247 fixed_init(1),
2248 fixed_init(2),
2249 fixed_init(3),
2250 fixed_init(0),
2251 fixed_init_half(1),
2252 fixed_init_half(2),
2253 fixed_init_half(3),
2254 };
2255 fixed20_12 memtcas2_ff[8] = {
2256 fixed_init(0),
2257 fixed_init(1),
2258 fixed_init(2),
2259 fixed_init(3),
2260 fixed_init(4),
2261 fixed_init(5),
2262 fixed_init(6),
2263 fixed_init(7),
2264 };
2265 fixed20_12 memtrbs[8] = {
2266 fixed_init(1),
2267 fixed_init_half(1),
2268 fixed_init(2),
2269 fixed_init_half(2),
2270 fixed_init(3),
2271 fixed_init_half(3),
2272 fixed_init(4),
2273 fixed_init_half(4)
2274 };
2275 fixed20_12 memtrbs_r4xx[8] = {
2276 fixed_init(4),
2277 fixed_init(5),
2278 fixed_init(6),
2279 fixed_init(7),
2280 fixed_init(8),
2281 fixed_init(9),
2282 fixed_init(10),
2283 fixed_init(11)
2284 };
2285 fixed20_12 min_mem_eff;
2286 fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
2287 fixed20_12 cur_latency_mclk, cur_latency_sclk;
2288 fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate,
2289 disp_drain_rate2, read_return_rate;
2290 fixed20_12 time_disp1_drop_priority;
2291 int c;
2292 int cur_size = 16; /* in octawords */
2293 int critical_point = 0, critical_point2;
2294/* uint32_t read_return_rate, time_disp1_drop_priority; */
2295 int stop_req, max_stop_req;
2296 struct drm_display_mode *mode1 = NULL;
2297 struct drm_display_mode *mode2 = NULL;
2298 uint32_t pixel_bytes1 = 0;
2299 uint32_t pixel_bytes2 = 0;
2300
2301 if (rdev->mode_info.crtcs[0]->base.enabled) {
2302 mode1 = &rdev->mode_info.crtcs[0]->base.mode;
2303 pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8;
2304 }
2305 if (rdev->mode_info.crtcs[1]->base.enabled) {
2306 mode2 = &rdev->mode_info.crtcs[1]->base.mode;
2307 pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8;
2308 }
2309
2310 min_mem_eff.full = rfixed_const_8(0);
2311 /* get modes */
2312 if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
2313 uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
2314 mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT);
2315 mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT);
2316 /* check crtc enables */
2317 if (mode2)
2318 mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
2319 if (mode1)
2320 mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT);
2321 WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer);
2322 }
2323
2324 /*
2325 * determine is there is enough bw for current mode
2326 */
2327 mclk_ff.full = rfixed_const(rdev->clock.default_mclk);
2328 temp_ff.full = rfixed_const(100);
2329 mclk_ff.full = rfixed_div(mclk_ff, temp_ff);
2330 sclk_ff.full = rfixed_const(rdev->clock.default_sclk);
2331 sclk_ff.full = rfixed_div(sclk_ff, temp_ff);
2332
2333 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
2334 temp_ff.full = rfixed_const(temp);
2335 mem_bw.full = rfixed_mul(mclk_ff, temp_ff);
2336
2337 pix_clk.full = 0;
2338 pix_clk2.full = 0;
2339 peak_disp_bw.full = 0;
2340 if (mode1) {
2341 temp_ff.full = rfixed_const(1000);
2342 pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */
2343 pix_clk.full = rfixed_div(pix_clk, temp_ff);
2344 temp_ff.full = rfixed_const(pixel_bytes1);
2345 peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff);
2346 }
2347 if (mode2) {
2348 temp_ff.full = rfixed_const(1000);
2349 pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */
2350 pix_clk2.full = rfixed_div(pix_clk2, temp_ff);
2351 temp_ff.full = rfixed_const(pixel_bytes2);
2352 peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff);
2353 }
2354
2355 mem_bw.full = rfixed_mul(mem_bw, min_mem_eff);
2356 if (peak_disp_bw.full >= mem_bw.full) {
2357 DRM_ERROR("You may not have enough display bandwidth for current mode\n"
2358 "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
2359 }
2360
2361 /* Get values from the EXT_MEM_CNTL register...converting its contents. */
2362 temp = RREG32(RADEON_MEM_TIMING_CNTL);
2363 if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */
2364 mem_trcd = ((temp >> 2) & 0x3) + 1;
2365 mem_trp = ((temp & 0x3)) + 1;
2366 mem_tras = ((temp & 0x70) >> 4) + 1;
2367 } else if (rdev->family == CHIP_R300 ||
2368 rdev->family == CHIP_R350) { /* r300, r350 */
2369 mem_trcd = (temp & 0x7) + 1;
2370 mem_trp = ((temp >> 8) & 0x7) + 1;
2371 mem_tras = ((temp >> 11) & 0xf) + 4;
2372 } else if (rdev->family == CHIP_RV350 ||
2373 rdev->family <= CHIP_RV380) {
2374 /* rv3x0 */
2375 mem_trcd = (temp & 0x7) + 3;
2376 mem_trp = ((temp >> 8) & 0x7) + 3;
2377 mem_tras = ((temp >> 11) & 0xf) + 6;
2378 } else if (rdev->family == CHIP_R420 ||
2379 rdev->family == CHIP_R423 ||
2380 rdev->family == CHIP_RV410) {
2381 /* r4xx */
2382 mem_trcd = (temp & 0xf) + 3;
2383 if (mem_trcd > 15)
2384 mem_trcd = 15;
2385 mem_trp = ((temp >> 8) & 0xf) + 3;
2386 if (mem_trp > 15)
2387 mem_trp = 15;
2388 mem_tras = ((temp >> 12) & 0x1f) + 6;
2389 if (mem_tras > 31)
2390 mem_tras = 31;
2391 } else { /* RV200, R200 */
2392 mem_trcd = (temp & 0x7) + 1;
2393 mem_trp = ((temp >> 8) & 0x7) + 1;
2394 mem_tras = ((temp >> 12) & 0xf) + 4;
2395 }
2396 /* convert to FF */
2397 trcd_ff.full = rfixed_const(mem_trcd);
2398 trp_ff.full = rfixed_const(mem_trp);
2399 tras_ff.full = rfixed_const(mem_tras);
2400
2401 /* Get values from the MEM_SDRAM_MODE_REG register...converting its */
2402 temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
2403 data = (temp & (7 << 20)) >> 20;
2404 if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) {
2405 if (rdev->family == CHIP_RS480) /* don't think rs400 */
2406 tcas_ff = memtcas_rs480_ff[data];
2407 else
2408 tcas_ff = memtcas_ff[data];
2409 } else
2410 tcas_ff = memtcas2_ff[data];
2411
2412 if (rdev->family == CHIP_RS400 ||
2413 rdev->family == CHIP_RS480) {
2414 /* extra cas latency stored in bits 23-25 0-4 clocks */
2415 data = (temp >> 23) & 0x7;
2416 if (data < 5)
2417 tcas_ff.full += rfixed_const(data);
2418 }
2419
2420 if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
2421 /* on the R300, Tcas is included in Trbs.
2422 */
2423 temp = RREG32(RADEON_MEM_CNTL);
2424 data = (R300_MEM_NUM_CHANNELS_MASK & temp);
2425 if (data == 1) {
2426 if (R300_MEM_USE_CD_CH_ONLY & temp) {
2427 temp = RREG32(R300_MC_IND_INDEX);
2428 temp &= ~R300_MC_IND_ADDR_MASK;
2429 temp |= R300_MC_READ_CNTL_CD_mcind;
2430 WREG32(R300_MC_IND_INDEX, temp);
2431 temp = RREG32(R300_MC_IND_DATA);
2432 data = (R300_MEM_RBS_POSITION_C_MASK & temp);
2433 } else {
2434 temp = RREG32(R300_MC_READ_CNTL_AB);
2435 data = (R300_MEM_RBS_POSITION_A_MASK & temp);
2436 }
2437 } else {
2438 temp = RREG32(R300_MC_READ_CNTL_AB);
2439 data = (R300_MEM_RBS_POSITION_A_MASK & temp);
2440 }
2441 if (rdev->family == CHIP_RV410 ||
2442 rdev->family == CHIP_R420 ||
2443 rdev->family == CHIP_R423)
2444 trbs_ff = memtrbs_r4xx[data];
2445 else
2446 trbs_ff = memtrbs[data];
2447 tcas_ff.full += trbs_ff.full;
2448 }
2449
2450 sclk_eff_ff.full = sclk_ff.full;
2451
2452 if (rdev->flags & RADEON_IS_AGP) {
2453 fixed20_12 agpmode_ff;
2454 agpmode_ff.full = rfixed_const(radeon_agpmode);
2455 temp_ff.full = rfixed_const_666(16);
2456 sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff);
2457 }
2458 /* TODO PCIE lanes may affect this - agpmode == 16?? */
2459
2460 if (ASIC_IS_R300(rdev)) {
2461 sclk_delay_ff.full = rfixed_const(250);
2462 } else {
2463 if ((rdev->family == CHIP_RV100) ||
2464 rdev->flags & RADEON_IS_IGP) {
2465 if (rdev->mc.vram_is_ddr)
2466 sclk_delay_ff.full = rfixed_const(41);
2467 else
2468 sclk_delay_ff.full = rfixed_const(33);
2469 } else {
2470 if (rdev->mc.vram_width == 128)
2471 sclk_delay_ff.full = rfixed_const(57);
2472 else
2473 sclk_delay_ff.full = rfixed_const(41);
2474 }
2475 }
2476
2477 mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff);
2478
2479 if (rdev->mc.vram_is_ddr) {
2480 if (rdev->mc.vram_width == 32) {
2481 k1.full = rfixed_const(40);
2482 c = 3;
2483 } else {
2484 k1.full = rfixed_const(20);
2485 c = 1;
2486 }
2487 } else {
2488 k1.full = rfixed_const(40);
2489 c = 3;
2490 }
2491
2492 temp_ff.full = rfixed_const(2);
2493 mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff);
2494 temp_ff.full = rfixed_const(c);
2495 mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff);
2496 temp_ff.full = rfixed_const(4);
2497 mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff);
2498 mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff);
2499 mc_latency_mclk.full += k1.full;
2500
2501 mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff);
2502 mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff);
2503
2504 /*
2505 HW cursor time assuming worst case of full size colour cursor.
2506 */
2507 temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
2508 temp_ff.full += trcd_ff.full;
2509 if (temp_ff.full < tras_ff.full)
2510 temp_ff.full = tras_ff.full;
2511 cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff);
2512
2513 temp_ff.full = rfixed_const(cur_size);
2514 cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff);
2515 /*
2516 Find the total latency for the display data.
2517 */
2518 disp_latency_overhead.full = rfixed_const(80);
2519 disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff);
2520 mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
2521 mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
2522
2523 if (mc_latency_mclk.full > mc_latency_sclk.full)
2524 disp_latency.full = mc_latency_mclk.full;
2525 else
2526 disp_latency.full = mc_latency_sclk.full;
2527
2528 /* setup Max GRPH_STOP_REQ default value */
2529 if (ASIC_IS_RV100(rdev))
2530 max_stop_req = 0x5c;
2531 else
2532 max_stop_req = 0x7c;
2533
2534 if (mode1) {
2535 /* CRTC1
2536 Set GRPH_BUFFER_CNTL register using h/w defined optimal values.
2537 GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ]
2538 */
2539 stop_req = mode1->hdisplay * pixel_bytes1 / 16;
2540
2541 if (stop_req > max_stop_req)
2542 stop_req = max_stop_req;
2543
2544 /*
2545 Find the drain rate of the display buffer.
2546 */
2547 temp_ff.full = rfixed_const((16/pixel_bytes1));
2548 disp_drain_rate.full = rfixed_div(pix_clk, temp_ff);
2549
2550 /*
2551 Find the critical point of the display buffer.
2552 */
2553 crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency);
2554 crit_point_ff.full += rfixed_const_half(0);
2555
2556 critical_point = rfixed_trunc(crit_point_ff);
2557
2558 if (rdev->disp_priority == 2) {
2559 critical_point = 0;
2560 }
2561
2562 /*
2563 The critical point should never be above max_stop_req-4. Setting
2564 GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time.
2565 */
2566 if (max_stop_req - critical_point < 4)
2567 critical_point = 0;
2568
2569 if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) {
2570 /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/
2571 critical_point = 0x10;
2572 }
2573
2574 temp = RREG32(RADEON_GRPH_BUFFER_CNTL);
2575 temp &= ~(RADEON_GRPH_STOP_REQ_MASK);
2576 temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
2577 temp &= ~(RADEON_GRPH_START_REQ_MASK);
2578 if ((rdev->family == CHIP_R350) &&
2579 (stop_req > 0x15)) {
2580 stop_req -= 0x10;
2581 }
2582 temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
2583 temp |= RADEON_GRPH_BUFFER_SIZE;
2584 temp &= ~(RADEON_GRPH_CRITICAL_CNTL |
2585 RADEON_GRPH_CRITICAL_AT_SOF |
2586 RADEON_GRPH_STOP_CNTL);
2587 /*
2588 Write the result into the register.
2589 */
2590 WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
2591 (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
2592
2593#if 0
2594 if ((rdev->family == CHIP_RS400) ||
2595 (rdev->family == CHIP_RS480)) {
2596 /* attempt to program RS400 disp regs correctly ??? */
2597 temp = RREG32(RS400_DISP1_REG_CNTL);
2598 temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK |
2599 RS400_DISP1_STOP_REQ_LEVEL_MASK);
2600 WREG32(RS400_DISP1_REQ_CNTL1, (temp |
2601 (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
2602 (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
2603 temp = RREG32(RS400_DMIF_MEM_CNTL1);
2604 temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK |
2605 RS400_DISP1_CRITICAL_POINT_STOP_MASK);
2606 WREG32(RS400_DMIF_MEM_CNTL1, (temp |
2607 (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) |
2608 (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT)));
2609 }
2610#endif
2611
2612 DRM_DEBUG("GRPH_BUFFER_CNTL from to %x\n",
2613 /* (unsigned int)info->SavedReg->grph_buffer_cntl, */
2614 (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL));
2615 }
2616
2617 if (mode2) {
2618 u32 grph2_cntl;
2619 stop_req = mode2->hdisplay * pixel_bytes2 / 16;
2620
2621 if (stop_req > max_stop_req)
2622 stop_req = max_stop_req;
2623
2624 /*
2625 Find the drain rate of the display buffer.
2626 */
2627 temp_ff.full = rfixed_const((16/pixel_bytes2));
2628 disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff);
2629
2630 grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
2631 grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
2632 grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
2633 grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK);
2634 if ((rdev->family == CHIP_R350) &&
2635 (stop_req > 0x15)) {
2636 stop_req -= 0x10;
2637 }
2638 grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
2639 grph2_cntl |= RADEON_GRPH_BUFFER_SIZE;
2640 grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL |
2641 RADEON_GRPH_CRITICAL_AT_SOF |
2642 RADEON_GRPH_STOP_CNTL);
2643
2644 if ((rdev->family == CHIP_RS100) ||
2645 (rdev->family == CHIP_RS200))
2646 critical_point2 = 0;
2647 else {
2648 temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
2649 temp_ff.full = rfixed_const(temp);
2650 temp_ff.full = rfixed_mul(mclk_ff, temp_ff);
2651 if (sclk_ff.full < temp_ff.full)
2652 temp_ff.full = sclk_ff.full;
2653
2654 read_return_rate.full = temp_ff.full;
2655
2656 if (mode1) {
2657 temp_ff.full = read_return_rate.full - disp_drain_rate.full;
2658 time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff);
2659 } else {
2660 time_disp1_drop_priority.full = 0;
2661 }
2662 crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
2663 crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2);
2664 crit_point_ff.full += rfixed_const_half(0);
2665
2666 critical_point2 = rfixed_trunc(crit_point_ff);
2667
2668 if (rdev->disp_priority == 2) {
2669 critical_point2 = 0;
2670 }
2671
2672 if (max_stop_req - critical_point2 < 4)
2673 critical_point2 = 0;
2674
2675 }
2676
2677 if (critical_point2 == 0 && rdev->family == CHIP_R300) {
2678 /* some R300 cards have problem with this set to 0 */
2679 critical_point2 = 0x10;
2680 }
2681
2682 WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
2683 (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
2684
2685 if ((rdev->family == CHIP_RS400) ||
2686 (rdev->family == CHIP_RS480)) {
2687#if 0
2688 /* attempt to program RS400 disp2 regs correctly ??? */
2689 temp = RREG32(RS400_DISP2_REQ_CNTL1);
2690 temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK |
2691 RS400_DISP2_STOP_REQ_LEVEL_MASK);
2692 WREG32(RS400_DISP2_REQ_CNTL1, (temp |
2693 (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
2694 (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
2695 temp = RREG32(RS400_DISP2_REQ_CNTL2);
2696 temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK |
2697 RS400_DISP2_CRITICAL_POINT_STOP_MASK);
2698 WREG32(RS400_DISP2_REQ_CNTL2, (temp |
2699 (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) |
2700 (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT)));
2701#endif
2702 WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC);
2703 WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000);
2704 WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC);
2705 WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC);
2706 }
2707
2708 DRM_DEBUG("GRPH2_BUFFER_CNTL from to %x\n",
2709 (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
2710 }
2711}
551ebd83
DA
2712
2713static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
2714{
2715 DRM_ERROR("pitch %d\n", t->pitch);
2716 DRM_ERROR("width %d\n", t->width);
2717 DRM_ERROR("height %d\n", t->height);
2718 DRM_ERROR("num levels %d\n", t->num_levels);
2719 DRM_ERROR("depth %d\n", t->txdepth);
2720 DRM_ERROR("bpp %d\n", t->cpp);
2721 DRM_ERROR("coordinate type %d\n", t->tex_coord_type);
2722 DRM_ERROR("width round to power of 2 %d\n", t->roundup_w);
2723 DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
2724}
2725
2726static int r100_cs_track_cube(struct radeon_device *rdev,
2727 struct r100_cs_track *track, unsigned idx)
2728{
2729 unsigned face, w, h;
2730 struct radeon_object *cube_robj;
2731 unsigned long size;
2732
2733 for (face = 0; face < 5; face++) {
2734 cube_robj = track->textures[idx].cube_info[face].robj;
2735 w = track->textures[idx].cube_info[face].width;
2736 h = track->textures[idx].cube_info[face].height;
2737
2738 size = w * h;
2739 size *= track->textures[idx].cpp;
2740
2741 size += track->textures[idx].cube_info[face].offset;
2742
2743 if (size > radeon_object_size(cube_robj)) {
2744 DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
2745 size, radeon_object_size(cube_robj));
2746 r100_cs_track_texture_print(&track->textures[idx]);
2747 return -1;
2748 }
2749 }
2750 return 0;
2751}
2752
2753static int r100_cs_track_texture_check(struct radeon_device *rdev,
2754 struct r100_cs_track *track)
2755{
2756 struct radeon_object *robj;
2757 unsigned long size;
2758 unsigned u, i, w, h;
2759 int ret;
2760
2761 for (u = 0; u < track->num_texture; u++) {
2762 if (!track->textures[u].enabled)
2763 continue;
2764 robj = track->textures[u].robj;
2765 if (robj == NULL) {
2766 DRM_ERROR("No texture bound to unit %u\n", u);
2767 return -EINVAL;
2768 }
2769 size = 0;
2770 for (i = 0; i <= track->textures[u].num_levels; i++) {
2771 if (track->textures[u].use_pitch) {
2772 if (rdev->family < CHIP_R300)
2773 w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i);
2774 else
2775 w = track->textures[u].pitch / (1 << i);
2776 } else {
2777 w = track->textures[u].width / (1 << i);
2778 if (rdev->family >= CHIP_RV515)
2779 w |= track->textures[u].width_11;
2780 if (track->textures[u].roundup_w)
2781 w = roundup_pow_of_two(w);
2782 }
2783 h = track->textures[u].height / (1 << i);
2784 if (rdev->family >= CHIP_RV515)
2785 h |= track->textures[u].height_11;
2786 if (track->textures[u].roundup_h)
2787 h = roundup_pow_of_two(h);
2788 size += w * h;
2789 }
2790 size *= track->textures[u].cpp;
2791 switch (track->textures[u].tex_coord_type) {
2792 case 0:
2793 break;
2794 case 1:
2795 size *= (1 << track->textures[u].txdepth);
2796 break;
2797 case 2:
2798 if (track->separate_cube) {
2799 ret = r100_cs_track_cube(rdev, track, u);
2800 if (ret)
2801 return ret;
2802 } else
2803 size *= 6;
2804 break;
2805 default:
2806 DRM_ERROR("Invalid texture coordinate type %u for unit "
2807 "%u\n", track->textures[u].tex_coord_type, u);
2808 return -EINVAL;
2809 }
2810 if (size > radeon_object_size(robj)) {
2811 DRM_ERROR("Texture of unit %u needs %lu bytes but is "
2812 "%lu\n", u, size, radeon_object_size(robj));
2813 r100_cs_track_texture_print(&track->textures[u]);
2814 return -EINVAL;
2815 }
2816 }
2817 return 0;
2818}
2819
2820int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
2821{
2822 unsigned i;
2823 unsigned long size;
2824 unsigned prim_walk;
2825 unsigned nverts;
2826
2827 for (i = 0; i < track->num_cb; i++) {
2828 if (track->cb[i].robj == NULL) {
2829 DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
2830 return -EINVAL;
2831 }
2832 size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
2833 size += track->cb[i].offset;
2834 if (size > radeon_object_size(track->cb[i].robj)) {
2835 DRM_ERROR("[drm] Buffer too small for color buffer %d "
2836 "(need %lu have %lu) !\n", i, size,
2837 radeon_object_size(track->cb[i].robj));
2838 DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
2839 i, track->cb[i].pitch, track->cb[i].cpp,
2840 track->cb[i].offset, track->maxy);
2841 return -EINVAL;
2842 }
2843 }
2844 if (track->z_enabled) {
2845 if (track->zb.robj == NULL) {
2846 DRM_ERROR("[drm] No buffer for z buffer !\n");
2847 return -EINVAL;
2848 }
2849 size = track->zb.pitch * track->zb.cpp * track->maxy;
2850 size += track->zb.offset;
2851 if (size > radeon_object_size(track->zb.robj)) {
2852 DRM_ERROR("[drm] Buffer too small for z buffer "
2853 "(need %lu have %lu) !\n", size,
2854 radeon_object_size(track->zb.robj));
2855 DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
2856 track->zb.pitch, track->zb.cpp,
2857 track->zb.offset, track->maxy);
2858 return -EINVAL;
2859 }
2860 }
2861 prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
2862 nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
2863 switch (prim_walk) {
2864 case 1:
2865 for (i = 0; i < track->num_arrays; i++) {
2866 size = track->arrays[i].esize * track->max_indx * 4;
2867 if (track->arrays[i].robj == NULL) {
2868 DRM_ERROR("(PW %u) Vertex array %u no buffer "
2869 "bound\n", prim_walk, i);
2870 return -EINVAL;
2871 }
2872 if (size > radeon_object_size(track->arrays[i].robj)) {
2873 DRM_ERROR("(PW %u) Vertex array %u need %lu dwords "
2874 "have %lu dwords\n", prim_walk, i,
2875 size >> 2,
2876 radeon_object_size(track->arrays[i].robj) >> 2);
2877 DRM_ERROR("Max indices %u\n", track->max_indx);
2878 return -EINVAL;
2879 }
2880 }
2881 break;
2882 case 2:
2883 for (i = 0; i < track->num_arrays; i++) {
2884 size = track->arrays[i].esize * (nverts - 1) * 4;
2885 if (track->arrays[i].robj == NULL) {
2886 DRM_ERROR("(PW %u) Vertex array %u no buffer "
2887 "bound\n", prim_walk, i);
2888 return -EINVAL;
2889 }
2890 if (size > radeon_object_size(track->arrays[i].robj)) {
2891 DRM_ERROR("(PW %u) Vertex array %u need %lu dwords "
2892 "have %lu dwords\n", prim_walk, i, size >> 2,
2893 radeon_object_size(track->arrays[i].robj) >> 2);
2894 return -EINVAL;
2895 }
2896 }
2897 break;
2898 case 3:
2899 size = track->vtx_size * nverts;
2900 if (size != track->immd_dwords) {
2901 DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
2902 track->immd_dwords, size);
2903 DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
2904 nverts, track->vtx_size);
2905 return -EINVAL;
2906 }
2907 break;
2908 default:
2909 DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
2910 prim_walk);
2911 return -EINVAL;
2912 }
2913 return r100_cs_track_texture_check(rdev, track);
2914}
2915
2916void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
2917{
2918 unsigned i, face;
2919
2920 if (rdev->family < CHIP_R300) {
2921 track->num_cb = 1;
2922 if (rdev->family <= CHIP_RS200)
2923 track->num_texture = 3;
2924 else
2925 track->num_texture = 6;
2926 track->maxy = 2048;
2927 track->separate_cube = 1;
2928 } else {
2929 track->num_cb = 4;
2930 track->num_texture = 16;
2931 track->maxy = 4096;
2932 track->separate_cube = 0;
2933 }
2934
2935 for (i = 0; i < track->num_cb; i++) {
2936 track->cb[i].robj = NULL;
2937 track->cb[i].pitch = 8192;
2938 track->cb[i].cpp = 16;
2939 track->cb[i].offset = 0;
2940 }
2941 track->z_enabled = true;
2942 track->zb.robj = NULL;
2943 track->zb.pitch = 8192;
2944 track->zb.cpp = 4;
2945 track->zb.offset = 0;
2946 track->vtx_size = 0x7F;
2947 track->immd_dwords = 0xFFFFFFFFUL;
2948 track->num_arrays = 11;
2949 track->max_indx = 0x00FFFFFFUL;
2950 for (i = 0; i < track->num_arrays; i++) {
2951 track->arrays[i].robj = NULL;
2952 track->arrays[i].esize = 0x7F;
2953 }
2954 for (i = 0; i < track->num_texture; i++) {
2955 track->textures[i].pitch = 16536;
2956 track->textures[i].width = 16536;
2957 track->textures[i].height = 16536;
2958 track->textures[i].width_11 = 1 << 11;
2959 track->textures[i].height_11 = 1 << 11;
2960 track->textures[i].num_levels = 12;
2961 if (rdev->family <= CHIP_RS200) {
2962 track->textures[i].tex_coord_type = 0;
2963 track->textures[i].txdepth = 0;
2964 } else {
2965 track->textures[i].txdepth = 16;
2966 track->textures[i].tex_coord_type = 1;
2967 }
2968 track->textures[i].cpp = 64;
2969 track->textures[i].robj = NULL;
2970 /* CS IB emission code makes sure texture unit are disabled */
2971 track->textures[i].enabled = false;
2972 track->textures[i].roundup_w = true;
2973 track->textures[i].roundup_h = true;
2974 if (track->separate_cube)
2975 for (face = 0; face < 5; face++) {
2976 track->textures[i].cube_info[face].robj = NULL;
2977 track->textures[i].cube_info[face].width = 16536;
2978 track->textures[i].cube_info[face].height = 16536;
2979 track->textures[i].cube_info[face].offset = 0;
2980 }
2981 }
2982}
3ce0a23d
JG
2983
2984int r100_ring_test(struct radeon_device *rdev)
2985{
2986 uint32_t scratch;
2987 uint32_t tmp = 0;
2988 unsigned i;
2989 int r;
2990
2991 r = radeon_scratch_get(rdev, &scratch);
2992 if (r) {
2993 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
2994 return r;
2995 }
2996 WREG32(scratch, 0xCAFEDEAD);
2997 r = radeon_ring_lock(rdev, 2);
2998 if (r) {
2999 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
3000 radeon_scratch_free(rdev, scratch);
3001 return r;
3002 }
3003 radeon_ring_write(rdev, PACKET0(scratch, 0));
3004 radeon_ring_write(rdev, 0xDEADBEEF);
3005 radeon_ring_unlock_commit(rdev);
3006 for (i = 0; i < rdev->usec_timeout; i++) {
3007 tmp = RREG32(scratch);
3008 if (tmp == 0xDEADBEEF) {
3009 break;
3010 }
3011 DRM_UDELAY(1);
3012 }
3013 if (i < rdev->usec_timeout) {
3014 DRM_INFO("ring test succeeded in %d usecs\n", i);
3015 } else {
3016 DRM_ERROR("radeon: ring test failed (sracth(0x%04X)=0x%08X)\n",
3017 scratch, tmp);
3018 r = -EINVAL;
3019 }
3020 radeon_scratch_free(rdev, scratch);
3021 return r;
3022}
3023
3024void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3025{
3026 radeon_ring_write(rdev, PACKET0(RADEON_CP_IB_BASE, 1));
3027 radeon_ring_write(rdev, ib->gpu_addr);
3028 radeon_ring_write(rdev, ib->length_dw);
3029}
3030
3031int r100_ib_test(struct radeon_device *rdev)
3032{
3033 struct radeon_ib *ib;
3034 uint32_t scratch;
3035 uint32_t tmp = 0;
3036 unsigned i;
3037 int r;
3038
3039 r = radeon_scratch_get(rdev, &scratch);
3040 if (r) {
3041 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
3042 return r;
3043 }
3044 WREG32(scratch, 0xCAFEDEAD);
3045 r = radeon_ib_get(rdev, &ib);
3046 if (r) {
3047 return r;
3048 }
3049 ib->ptr[0] = PACKET0(scratch, 0);
3050 ib->ptr[1] = 0xDEADBEEF;
3051 ib->ptr[2] = PACKET2(0);
3052 ib->ptr[3] = PACKET2(0);
3053 ib->ptr[4] = PACKET2(0);
3054 ib->ptr[5] = PACKET2(0);
3055 ib->ptr[6] = PACKET2(0);
3056 ib->ptr[7] = PACKET2(0);
3057 ib->length_dw = 8;
3058 r = radeon_ib_schedule(rdev, ib);
3059 if (r) {
3060 radeon_scratch_free(rdev, scratch);
3061 radeon_ib_free(rdev, &ib);
3062 return r;
3063 }
3064 r = radeon_fence_wait(ib->fence, false);
3065 if (r) {
3066 return r;
3067 }
3068 for (i = 0; i < rdev->usec_timeout; i++) {
3069 tmp = RREG32(scratch);
3070 if (tmp == 0xDEADBEEF) {
3071 break;
3072 }
3073 DRM_UDELAY(1);
3074 }
3075 if (i < rdev->usec_timeout) {
3076 DRM_INFO("ib test succeeded in %u usecs\n", i);
3077 } else {
3078 DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
3079 scratch, tmp);
3080 r = -EINVAL;
3081 }
3082 radeon_scratch_free(rdev, scratch);
3083 radeon_ib_free(rdev, &ib);
3084 return r;
3085}