]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/gpu/drm/radeon/r100.c
drm/radeon/kms: add R4XX mc register access helper.
[net-next-2.6.git] / drivers / gpu / drm / radeon / r100.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/seq_file.h>
29#include "drmP.h"
30#include "drm.h"
31#include "radeon_drm.h"
771fe6b9
JG
32#include "radeon_reg.h"
33#include "radeon.h"
3ce0a23d
JG
34#include "r100d.h"
35
70967ab9
BH
36#include <linux/firmware.h>
37#include <linux/platform_device.h>
38
551ebd83
DA
39#include "r100_reg_safe.h"
40#include "rn50_reg_safe.h"
41
70967ab9
BH
42/* Firmware Names */
43#define FIRMWARE_R100 "radeon/R100_cp.bin"
44#define FIRMWARE_R200 "radeon/R200_cp.bin"
45#define FIRMWARE_R300 "radeon/R300_cp.bin"
46#define FIRMWARE_R420 "radeon/R420_cp.bin"
47#define FIRMWARE_RS690 "radeon/RS690_cp.bin"
48#define FIRMWARE_RS600 "radeon/RS600_cp.bin"
49#define FIRMWARE_R520 "radeon/R520_cp.bin"
50
51MODULE_FIRMWARE(FIRMWARE_R100);
52MODULE_FIRMWARE(FIRMWARE_R200);
53MODULE_FIRMWARE(FIRMWARE_R300);
54MODULE_FIRMWARE(FIRMWARE_R420);
55MODULE_FIRMWARE(FIRMWARE_RS690);
56MODULE_FIRMWARE(FIRMWARE_RS600);
57MODULE_FIRMWARE(FIRMWARE_R520);
771fe6b9 58
551ebd83
DA
59#include "r100_track.h"
60
771fe6b9
JG
61/* This files gather functions specifics to:
62 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
63 *
64 * Some of these functions might be used by newer ASICs.
65 */
551ebd83 66int r200_init(struct radeon_device *rdev);
771fe6b9
JG
67void r100_hdp_reset(struct radeon_device *rdev);
68void r100_gpu_init(struct radeon_device *rdev);
69int r100_gui_wait_for_idle(struct radeon_device *rdev);
70int r100_mc_wait_for_idle(struct radeon_device *rdev);
71void r100_gpu_wait_for_vsync(struct radeon_device *rdev);
72void r100_gpu_wait_for_vsync2(struct radeon_device *rdev);
73int r100_debugfs_mc_info_init(struct radeon_device *rdev);
74
75
76/*
77 * PCI GART
78 */
79void r100_pci_gart_tlb_flush(struct radeon_device *rdev)
80{
81 /* TODO: can we do somethings here ? */
82 /* It seems hw only cache one entry so we should discard this
83 * entry otherwise if first GPU GART read hit this entry it
84 * could end up in wrong address. */
85}
86
87int r100_pci_gart_enable(struct radeon_device *rdev)
88{
89 uint32_t tmp;
90 int r;
91
92 /* Initialize common gart structure */
93 r = radeon_gart_init(rdev);
94 if (r) {
95 return r;
96 }
97 if (rdev->gart.table.ram.ptr == NULL) {
98 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
99 r = radeon_gart_table_ram_alloc(rdev);
100 if (r) {
101 return r;
102 }
103 }
104 /* discard memory request outside of configured range */
105 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
106 WREG32(RADEON_AIC_CNTL, tmp);
107 /* set address range for PCI address translate */
108 WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location);
109 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
110 WREG32(RADEON_AIC_HI_ADDR, tmp);
111 /* Enable bus mastering */
112 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
113 WREG32(RADEON_BUS_CNTL, tmp);
114 /* set PCI GART page-table base address */
115 WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
116 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
117 WREG32(RADEON_AIC_CNTL, tmp);
118 r100_pci_gart_tlb_flush(rdev);
119 rdev->gart.ready = true;
120 return 0;
121}
122
123void r100_pci_gart_disable(struct radeon_device *rdev)
124{
125 uint32_t tmp;
126
127 /* discard memory request outside of configured range */
128 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
129 WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN);
130 WREG32(RADEON_AIC_LO_ADDR, 0);
131 WREG32(RADEON_AIC_HI_ADDR, 0);
132}
133
134int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
135{
136 if (i < 0 || i > rdev->gart.num_gpu_pages) {
137 return -EINVAL;
138 }
ed10f95d 139 rdev->gart.table.ram.ptr[i] = cpu_to_le32(lower_32_bits(addr));
771fe6b9
JG
140 return 0;
141}
142
143int r100_gart_enable(struct radeon_device *rdev)
144{
145 if (rdev->flags & RADEON_IS_AGP) {
146 r100_pci_gart_disable(rdev);
147 return 0;
148 }
149 return r100_pci_gart_enable(rdev);
150}
151
152
153/*
154 * MC
155 */
156void r100_mc_disable_clients(struct radeon_device *rdev)
157{
158 uint32_t ov0_scale_cntl, crtc_ext_cntl, crtc_gen_cntl, crtc2_gen_cntl;
159
160 /* FIXME: is this function correct for rs100,rs200,rs300 ? */
161 if (r100_gui_wait_for_idle(rdev)) {
162 printk(KERN_WARNING "Failed to wait GUI idle while "
163 "programming pipes. Bad things might happen.\n");
164 }
165
166 /* stop display and memory access */
167 ov0_scale_cntl = RREG32(RADEON_OV0_SCALE_CNTL);
168 WREG32(RADEON_OV0_SCALE_CNTL, ov0_scale_cntl & ~RADEON_SCALER_ENABLE);
169 crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
170 WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl | RADEON_CRTC_DISPLAY_DIS);
171 crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
172
173 r100_gpu_wait_for_vsync(rdev);
174
175 WREG32(RADEON_CRTC_GEN_CNTL,
176 (crtc_gen_cntl & ~(RADEON_CRTC_CUR_EN | RADEON_CRTC_ICON_EN)) |
177 RADEON_CRTC_DISP_REQ_EN_B | RADEON_CRTC_EXT_DISP_EN);
178
179 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
180 crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
181
182 r100_gpu_wait_for_vsync2(rdev);
183 WREG32(RADEON_CRTC2_GEN_CNTL,
184 (crtc2_gen_cntl &
185 ~(RADEON_CRTC2_CUR_EN | RADEON_CRTC2_ICON_EN)) |
186 RADEON_CRTC2_DISP_REQ_EN_B);
187 }
188
189 udelay(500);
190}
191
192void r100_mc_setup(struct radeon_device *rdev)
193{
194 uint32_t tmp;
195 int r;
196
197 r = r100_debugfs_mc_info_init(rdev);
198 if (r) {
199 DRM_ERROR("Failed to register debugfs file for R100 MC !\n");
200 }
201 /* Write VRAM size in case we are limiting it */
7a50f01a
DA
202 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
203 /* Novell bug 204882 for RN50/M6/M7 with 8/16/32MB VRAM,
204 * if the aperture is 64MB but we have 32MB VRAM
205 * we report only 32MB VRAM but we have to set MC_FB_LOCATION
206 * to 64MB, otherwise the gpu accidentially dies */
207 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
771fe6b9
JG
208 tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16);
209 tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16);
210 WREG32(RADEON_MC_FB_LOCATION, tmp);
211
212 /* Enable bus mastering */
213 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
214 WREG32(RADEON_BUS_CNTL, tmp);
215
216 if (rdev->flags & RADEON_IS_AGP) {
217 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
218 tmp = REG_SET(RADEON_MC_AGP_TOP, tmp >> 16);
219 tmp |= REG_SET(RADEON_MC_AGP_START, rdev->mc.gtt_location >> 16);
220 WREG32(RADEON_MC_AGP_LOCATION, tmp);
221 WREG32(RADEON_AGP_BASE, rdev->mc.agp_base);
222 } else {
223 WREG32(RADEON_MC_AGP_LOCATION, 0x0FFFFFFF);
224 WREG32(RADEON_AGP_BASE, 0);
225 }
226
227 tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
228 tmp |= (7 << 28);
229 WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
230 (void)RREG32(RADEON_HOST_PATH_CNTL);
231 WREG32(RADEON_HOST_PATH_CNTL, tmp);
232 (void)RREG32(RADEON_HOST_PATH_CNTL);
233}
234
235int r100_mc_init(struct radeon_device *rdev)
236{
237 int r;
238
239 if (r100_debugfs_rbbm_init(rdev)) {
240 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
241 }
242
243 r100_gpu_init(rdev);
244 /* Disable gart which also disable out of gart access */
245 r100_pci_gart_disable(rdev);
246
247 /* Setup GPU memory space */
771fe6b9
JG
248 rdev->mc.gtt_location = 0xFFFFFFFFUL;
249 if (rdev->flags & RADEON_IS_AGP) {
250 r = radeon_agp_init(rdev);
251 if (r) {
252 printk(KERN_WARNING "[drm] Disabling AGP\n");
253 rdev->flags &= ~RADEON_IS_AGP;
254 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
255 } else {
256 rdev->mc.gtt_location = rdev->mc.agp_base;
257 }
258 }
259 r = radeon_mc_setup(rdev);
260 if (r) {
261 return r;
262 }
263
264 r100_mc_disable_clients(rdev);
265 if (r100_mc_wait_for_idle(rdev)) {
266 printk(KERN_WARNING "Failed to wait MC idle while "
267 "programming pipes. Bad things might happen.\n");
268 }
269
270 r100_mc_setup(rdev);
271 return 0;
272}
273
274void r100_mc_fini(struct radeon_device *rdev)
275{
276 r100_pci_gart_disable(rdev);
277 radeon_gart_table_ram_free(rdev);
278 radeon_gart_fini(rdev);
279}
280
281
7ed220d7
MD
282/*
283 * Interrupts
284 */
285int r100_irq_set(struct radeon_device *rdev)
286{
287 uint32_t tmp = 0;
288
289 if (rdev->irq.sw_int) {
290 tmp |= RADEON_SW_INT_ENABLE;
291 }
292 if (rdev->irq.crtc_vblank_int[0]) {
293 tmp |= RADEON_CRTC_VBLANK_MASK;
294 }
295 if (rdev->irq.crtc_vblank_int[1]) {
296 tmp |= RADEON_CRTC2_VBLANK_MASK;
297 }
298 WREG32(RADEON_GEN_INT_CNTL, tmp);
299 return 0;
300}
301
302static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
303{
304 uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
305 uint32_t irq_mask = RADEON_SW_INT_TEST | RADEON_CRTC_VBLANK_STAT |
306 RADEON_CRTC2_VBLANK_STAT;
307
308 if (irqs) {
309 WREG32(RADEON_GEN_INT_STATUS, irqs);
310 }
311 return irqs & irq_mask;
312}
313
314int r100_irq_process(struct radeon_device *rdev)
315{
316 uint32_t status;
317
318 status = r100_irq_ack(rdev);
319 if (!status) {
320 return IRQ_NONE;
321 }
322 while (status) {
323 /* SW interrupt */
324 if (status & RADEON_SW_INT_TEST) {
325 radeon_fence_process(rdev);
326 }
327 /* Vertical blank interrupts */
328 if (status & RADEON_CRTC_VBLANK_STAT) {
329 drm_handle_vblank(rdev->ddev, 0);
330 }
331 if (status & RADEON_CRTC2_VBLANK_STAT) {
332 drm_handle_vblank(rdev->ddev, 1);
333 }
334 status = r100_irq_ack(rdev);
335 }
336 return IRQ_HANDLED;
337}
338
339u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
340{
341 if (crtc == 0)
342 return RREG32(RADEON_CRTC_CRNT_FRAME);
343 else
344 return RREG32(RADEON_CRTC2_CRNT_FRAME);
345}
346
347
771fe6b9
JG
348/*
349 * Fence emission
350 */
351void r100_fence_ring_emit(struct radeon_device *rdev,
352 struct radeon_fence *fence)
353{
354 /* Who ever call radeon_fence_emit should call ring_lock and ask
355 * for enough space (today caller are ib schedule and buffer move) */
356 /* Wait until IDLE & CLEAN */
357 radeon_ring_write(rdev, PACKET0(0x1720, 0));
358 radeon_ring_write(rdev, (1 << 16) | (1 << 17));
359 /* Emit fence sequence & fire IRQ */
360 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
361 radeon_ring_write(rdev, fence->seq);
362 radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
363 radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
364}
365
366
367/*
368 * Writeback
369 */
370int r100_wb_init(struct radeon_device *rdev)
371{
372 int r;
373
374 if (rdev->wb.wb_obj == NULL) {
375 r = radeon_object_create(rdev, NULL, 4096,
376 true,
377 RADEON_GEM_DOMAIN_GTT,
378 false, &rdev->wb.wb_obj);
379 if (r) {
380 DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r);
381 return r;
382 }
383 r = radeon_object_pin(rdev->wb.wb_obj,
384 RADEON_GEM_DOMAIN_GTT,
385 &rdev->wb.gpu_addr);
386 if (r) {
387 DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r);
388 return r;
389 }
390 r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
391 if (r) {
392 DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r);
393 return r;
394 }
395 }
3ce0a23d
JG
396 WREG32(RADEON_SCRATCH_ADDR, rdev->wb.gpu_addr);
397 WREG32(RADEON_CP_RB_RPTR_ADDR, rdev->wb.gpu_addr + 1024);
398 WREG32(RADEON_SCRATCH_UMSK, 0xff);
771fe6b9
JG
399 return 0;
400}
401
402void r100_wb_fini(struct radeon_device *rdev)
403{
404 if (rdev->wb.wb_obj) {
405 radeon_object_kunmap(rdev->wb.wb_obj);
406 radeon_object_unpin(rdev->wb.wb_obj);
407 radeon_object_unref(&rdev->wb.wb_obj);
408 rdev->wb.wb = NULL;
409 rdev->wb.wb_obj = NULL;
410 }
411}
412
413int r100_copy_blit(struct radeon_device *rdev,
414 uint64_t src_offset,
415 uint64_t dst_offset,
416 unsigned num_pages,
417 struct radeon_fence *fence)
418{
419 uint32_t cur_pages;
420 uint32_t stride_bytes = PAGE_SIZE;
421 uint32_t pitch;
422 uint32_t stride_pixels;
423 unsigned ndw;
424 int num_loops;
425 int r = 0;
426
427 /* radeon limited to 16k stride */
428 stride_bytes &= 0x3fff;
429 /* radeon pitch is /64 */
430 pitch = stride_bytes / 64;
431 stride_pixels = stride_bytes / 4;
432 num_loops = DIV_ROUND_UP(num_pages, 8191);
433
434 /* Ask for enough room for blit + flush + fence */
435 ndw = 64 + (10 * num_loops);
436 r = radeon_ring_lock(rdev, ndw);
437 if (r) {
438 DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
439 return -EINVAL;
440 }
441 while (num_pages > 0) {
442 cur_pages = num_pages;
443 if (cur_pages > 8191) {
444 cur_pages = 8191;
445 }
446 num_pages -= cur_pages;
447
448 /* pages are in Y direction - height
449 page width in X direction - width */
450 radeon_ring_write(rdev, PACKET3(PACKET3_BITBLT_MULTI, 8));
451 radeon_ring_write(rdev,
452 RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
453 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
454 RADEON_GMC_SRC_CLIPPING |
455 RADEON_GMC_DST_CLIPPING |
456 RADEON_GMC_BRUSH_NONE |
457 (RADEON_COLOR_FORMAT_ARGB8888 << 8) |
458 RADEON_GMC_SRC_DATATYPE_COLOR |
459 RADEON_ROP3_S |
460 RADEON_DP_SRC_SOURCE_MEMORY |
461 RADEON_GMC_CLR_CMP_CNTL_DIS |
462 RADEON_GMC_WR_MSK_DIS);
463 radeon_ring_write(rdev, (pitch << 22) | (src_offset >> 10));
464 radeon_ring_write(rdev, (pitch << 22) | (dst_offset >> 10));
465 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
466 radeon_ring_write(rdev, 0);
467 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
468 radeon_ring_write(rdev, num_pages);
469 radeon_ring_write(rdev, num_pages);
470 radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));
471 }
472 radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
473 radeon_ring_write(rdev, RADEON_RB2D_DC_FLUSH_ALL);
474 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
475 radeon_ring_write(rdev,
476 RADEON_WAIT_2D_IDLECLEAN |
477 RADEON_WAIT_HOST_IDLECLEAN |
478 RADEON_WAIT_DMA_GUI_IDLE);
479 if (fence) {
480 r = radeon_fence_emit(rdev, fence);
481 }
482 radeon_ring_unlock_commit(rdev);
483 return r;
484}
485
486
487/*
488 * CP
489 */
45600232
JG
490static int r100_cp_wait_for_idle(struct radeon_device *rdev)
491{
492 unsigned i;
493 u32 tmp;
494
495 for (i = 0; i < rdev->usec_timeout; i++) {
496 tmp = RREG32(R_000E40_RBBM_STATUS);
497 if (!G_000E40_CP_CMDSTRM_BUSY(tmp)) {
498 return 0;
499 }
500 udelay(1);
501 }
502 return -1;
503}
504
771fe6b9
JG
505void r100_ring_start(struct radeon_device *rdev)
506{
507 int r;
508
509 r = radeon_ring_lock(rdev, 2);
510 if (r) {
511 return;
512 }
513 radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
514 radeon_ring_write(rdev,
515 RADEON_ISYNC_ANY2D_IDLE3D |
516 RADEON_ISYNC_ANY3D_IDLE2D |
517 RADEON_ISYNC_WAIT_IDLEGUI |
518 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
519 radeon_ring_unlock_commit(rdev);
520}
521
70967ab9
BH
522
523/* Load the microcode for the CP */
524static int r100_cp_init_microcode(struct radeon_device *rdev)
771fe6b9 525{
70967ab9
BH
526 struct platform_device *pdev;
527 const char *fw_name = NULL;
528 int err;
771fe6b9 529
70967ab9 530 DRM_DEBUG("\n");
771fe6b9 531
70967ab9
BH
532 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
533 err = IS_ERR(pdev);
534 if (err) {
535 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
536 return -EINVAL;
537 }
771fe6b9
JG
538 if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
539 (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) ||
540 (rdev->family == CHIP_RS200)) {
541 DRM_INFO("Loading R100 Microcode\n");
70967ab9 542 fw_name = FIRMWARE_R100;
771fe6b9
JG
543 } else if ((rdev->family == CHIP_R200) ||
544 (rdev->family == CHIP_RV250) ||
545 (rdev->family == CHIP_RV280) ||
546 (rdev->family == CHIP_RS300)) {
547 DRM_INFO("Loading R200 Microcode\n");
70967ab9 548 fw_name = FIRMWARE_R200;
771fe6b9
JG
549 } else if ((rdev->family == CHIP_R300) ||
550 (rdev->family == CHIP_R350) ||
551 (rdev->family == CHIP_RV350) ||
552 (rdev->family == CHIP_RV380) ||
553 (rdev->family == CHIP_RS400) ||
554 (rdev->family == CHIP_RS480)) {
555 DRM_INFO("Loading R300 Microcode\n");
70967ab9 556 fw_name = FIRMWARE_R300;
771fe6b9
JG
557 } else if ((rdev->family == CHIP_R420) ||
558 (rdev->family == CHIP_R423) ||
559 (rdev->family == CHIP_RV410)) {
560 DRM_INFO("Loading R400 Microcode\n");
70967ab9 561 fw_name = FIRMWARE_R420;
771fe6b9
JG
562 } else if ((rdev->family == CHIP_RS690) ||
563 (rdev->family == CHIP_RS740)) {
564 DRM_INFO("Loading RS690/RS740 Microcode\n");
70967ab9 565 fw_name = FIRMWARE_RS690;
771fe6b9
JG
566 } else if (rdev->family == CHIP_RS600) {
567 DRM_INFO("Loading RS600 Microcode\n");
70967ab9 568 fw_name = FIRMWARE_RS600;
771fe6b9
JG
569 } else if ((rdev->family == CHIP_RV515) ||
570 (rdev->family == CHIP_R520) ||
571 (rdev->family == CHIP_RV530) ||
572 (rdev->family == CHIP_R580) ||
573 (rdev->family == CHIP_RV560) ||
574 (rdev->family == CHIP_RV570)) {
575 DRM_INFO("Loading R500 Microcode\n");
70967ab9
BH
576 fw_name = FIRMWARE_R520;
577 }
578
3ce0a23d 579 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
70967ab9
BH
580 platform_device_unregister(pdev);
581 if (err) {
582 printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n",
583 fw_name);
3ce0a23d 584 } else if (rdev->me_fw->size % 8) {
70967ab9
BH
585 printk(KERN_ERR
586 "radeon_cp: Bogus length %zu in firmware \"%s\"\n",
3ce0a23d 587 rdev->me_fw->size, fw_name);
70967ab9 588 err = -EINVAL;
3ce0a23d
JG
589 release_firmware(rdev->me_fw);
590 rdev->me_fw = NULL;
70967ab9
BH
591 }
592 return err;
593}
594static void r100_cp_load_microcode(struct radeon_device *rdev)
595{
596 const __be32 *fw_data;
597 int i, size;
598
599 if (r100_gui_wait_for_idle(rdev)) {
600 printk(KERN_WARNING "Failed to wait GUI idle while "
601 "programming pipes. Bad things might happen.\n");
602 }
603
3ce0a23d
JG
604 if (rdev->me_fw) {
605 size = rdev->me_fw->size / 4;
606 fw_data = (const __be32 *)&rdev->me_fw->data[0];
70967ab9
BH
607 WREG32(RADEON_CP_ME_RAM_ADDR, 0);
608 for (i = 0; i < size; i += 2) {
609 WREG32(RADEON_CP_ME_RAM_DATAH,
610 be32_to_cpup(&fw_data[i]));
611 WREG32(RADEON_CP_ME_RAM_DATAL,
612 be32_to_cpup(&fw_data[i + 1]));
771fe6b9
JG
613 }
614 }
615}
616
617int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
618{
619 unsigned rb_bufsz;
620 unsigned rb_blksz;
621 unsigned max_fetch;
622 unsigned pre_write_timer;
623 unsigned pre_write_limit;
624 unsigned indirect2_start;
625 unsigned indirect1_start;
626 uint32_t tmp;
627 int r;
628
629 if (r100_debugfs_cp_init(rdev)) {
630 DRM_ERROR("Failed to register debugfs file for CP !\n");
631 }
632 /* Reset CP */
633 tmp = RREG32(RADEON_CP_CSQ_STAT);
634 if ((tmp & (1 << 31))) {
635 DRM_INFO("radeon: cp busy (0x%08X) resetting\n", tmp);
636 WREG32(RADEON_CP_CSQ_MODE, 0);
637 WREG32(RADEON_CP_CSQ_CNTL, 0);
638 WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
639 tmp = RREG32(RADEON_RBBM_SOFT_RESET);
640 mdelay(2);
641 WREG32(RADEON_RBBM_SOFT_RESET, 0);
642 tmp = RREG32(RADEON_RBBM_SOFT_RESET);
643 mdelay(2);
644 tmp = RREG32(RADEON_CP_CSQ_STAT);
645 if ((tmp & (1 << 31))) {
646 DRM_INFO("radeon: cp reset failed (0x%08X)\n", tmp);
647 }
648 } else {
649 DRM_INFO("radeon: cp idle (0x%08X)\n", tmp);
650 }
70967ab9 651
3ce0a23d 652 if (!rdev->me_fw) {
70967ab9
BH
653 r = r100_cp_init_microcode(rdev);
654 if (r) {
655 DRM_ERROR("Failed to load firmware!\n");
656 return r;
657 }
658 }
659
771fe6b9
JG
660 /* Align ring size */
661 rb_bufsz = drm_order(ring_size / 8);
662 ring_size = (1 << (rb_bufsz + 1)) * 4;
663 r100_cp_load_microcode(rdev);
664 r = radeon_ring_init(rdev, ring_size);
665 if (r) {
666 return r;
667 }
668 /* Each time the cp read 1024 bytes (16 dword/quadword) update
669 * the rptr copy in system ram */
670 rb_blksz = 9;
671 /* cp will read 128bytes at a time (4 dwords) */
672 max_fetch = 1;
673 rdev->cp.align_mask = 16 - 1;
674 /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
675 pre_write_timer = 64;
676 /* Force CP_RB_WPTR write if written more than one time before the
677 * delay expire
678 */
679 pre_write_limit = 0;
680 /* Setup the cp cache like this (cache size is 96 dwords) :
681 * RING 0 to 15
682 * INDIRECT1 16 to 79
683 * INDIRECT2 80 to 95
684 * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
685 * indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords))
686 * indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
687 * Idea being that most of the gpu cmd will be through indirect1 buffer
688 * so it gets the bigger cache.
689 */
690 indirect2_start = 80;
691 indirect1_start = 16;
692 /* cp setup */
693 WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
694 WREG32(RADEON_CP_RB_CNTL,
4e484e7d
MD
695#ifdef __BIG_ENDIAN
696 RADEON_BUF_SWAP_32BIT |
697#endif
771fe6b9
JG
698 REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
699 REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
700 REG_SET(RADEON_MAX_FETCH, max_fetch) |
701 RADEON_RB_NO_UPDATE);
702 /* Set ring address */
703 DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr);
704 WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr);
705 /* Force read & write ptr to 0 */
706 tmp = RREG32(RADEON_CP_RB_CNTL);
707 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
708 WREG32(RADEON_CP_RB_RPTR_WR, 0);
709 WREG32(RADEON_CP_RB_WPTR, 0);
710 WREG32(RADEON_CP_RB_CNTL, tmp);
711 udelay(10);
712 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
713 rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR);
714 /* Set cp mode to bus mastering & enable cp*/
715 WREG32(RADEON_CP_CSQ_MODE,
716 REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
717 REG_SET(RADEON_INDIRECT1_START, indirect1_start));
718 WREG32(0x718, 0);
719 WREG32(0x744, 0x00004D4D);
720 WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
721 radeon_ring_start(rdev);
722 r = radeon_ring_test(rdev);
723 if (r) {
724 DRM_ERROR("radeon: cp isn't working (%d).\n", r);
725 return r;
726 }
727 rdev->cp.ready = true;
728 return 0;
729}
730
731void r100_cp_fini(struct radeon_device *rdev)
732{
45600232
JG
733 if (r100_cp_wait_for_idle(rdev)) {
734 DRM_ERROR("Wait for CP idle timeout, shutting down CP.\n");
735 }
771fe6b9 736 /* Disable ring */
a18d7ea1 737 r100_cp_disable(rdev);
771fe6b9
JG
738 radeon_ring_fini(rdev);
739 DRM_INFO("radeon: cp finalized\n");
740}
741
742void r100_cp_disable(struct radeon_device *rdev)
743{
744 /* Disable ring */
745 rdev->cp.ready = false;
746 WREG32(RADEON_CP_CSQ_MODE, 0);
747 WREG32(RADEON_CP_CSQ_CNTL, 0);
748 if (r100_gui_wait_for_idle(rdev)) {
749 printk(KERN_WARNING "Failed to wait GUI idle while "
750 "programming pipes. Bad things might happen.\n");
751 }
752}
753
754int r100_cp_reset(struct radeon_device *rdev)
755{
756 uint32_t tmp;
757 bool reinit_cp;
758 int i;
759
760 reinit_cp = rdev->cp.ready;
761 rdev->cp.ready = false;
762 WREG32(RADEON_CP_CSQ_MODE, 0);
763 WREG32(RADEON_CP_CSQ_CNTL, 0);
764 WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
765 (void)RREG32(RADEON_RBBM_SOFT_RESET);
766 udelay(200);
767 WREG32(RADEON_RBBM_SOFT_RESET, 0);
768 /* Wait to prevent race in RBBM_STATUS */
769 mdelay(1);
770 for (i = 0; i < rdev->usec_timeout; i++) {
771 tmp = RREG32(RADEON_RBBM_STATUS);
772 if (!(tmp & (1 << 16))) {
773 DRM_INFO("CP reset succeed (RBBM_STATUS=0x%08X)\n",
774 tmp);
775 if (reinit_cp) {
776 return r100_cp_init(rdev, rdev->cp.ring_size);
777 }
778 return 0;
779 }
780 DRM_UDELAY(1);
781 }
782 tmp = RREG32(RADEON_RBBM_STATUS);
783 DRM_ERROR("Failed to reset CP (RBBM_STATUS=0x%08X)!\n", tmp);
784 return -1;
785}
786
3ce0a23d
JG
787void r100_cp_commit(struct radeon_device *rdev)
788{
789 WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
790 (void)RREG32(RADEON_CP_RB_WPTR);
791}
792
771fe6b9
JG
793
794/*
795 * CS functions
796 */
797int r100_cs_parse_packet0(struct radeon_cs_parser *p,
798 struct radeon_cs_packet *pkt,
068a117c 799 const unsigned *auth, unsigned n,
771fe6b9
JG
800 radeon_packet0_check_t check)
801{
802 unsigned reg;
803 unsigned i, j, m;
804 unsigned idx;
805 int r;
806
807 idx = pkt->idx + 1;
808 reg = pkt->reg;
068a117c
JG
809 /* Check that register fall into register range
810 * determined by the number of entry (n) in the
811 * safe register bitmap.
812 */
771fe6b9
JG
813 if (pkt->one_reg_wr) {
814 if ((reg >> 7) > n) {
815 return -EINVAL;
816 }
817 } else {
818 if (((reg + (pkt->count << 2)) >> 7) > n) {
819 return -EINVAL;
820 }
821 }
822 for (i = 0; i <= pkt->count; i++, idx++) {
823 j = (reg >> 7);
824 m = 1 << ((reg >> 2) & 31);
825 if (auth[j] & m) {
826 r = check(p, pkt, idx, reg);
827 if (r) {
828 return r;
829 }
830 }
831 if (pkt->one_reg_wr) {
832 if (!(auth[j] & m)) {
833 break;
834 }
835 } else {
836 reg += 4;
837 }
838 }
839 return 0;
840}
841
771fe6b9
JG
842void r100_cs_dump_packet(struct radeon_cs_parser *p,
843 struct radeon_cs_packet *pkt)
844{
845 struct radeon_cs_chunk *ib_chunk;
846 volatile uint32_t *ib;
847 unsigned i;
848 unsigned idx;
849
850 ib = p->ib->ptr;
851 ib_chunk = &p->chunks[p->chunk_ib_idx];
852 idx = pkt->idx;
853 for (i = 0; i <= (pkt->count + 1); i++, idx++) {
854 DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
855 }
856}
857
858/**
859 * r100_cs_packet_parse() - parse cp packet and point ib index to next packet
860 * @parser: parser structure holding parsing context.
861 * @pkt: where to store packet informations
862 *
863 * Assume that chunk_ib_index is properly set. Will return -EINVAL
864 * if packet is bigger than remaining ib size. or if packets is unknown.
865 **/
866int r100_cs_packet_parse(struct radeon_cs_parser *p,
867 struct radeon_cs_packet *pkt,
868 unsigned idx)
869{
870 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
fa99239c 871 uint32_t header;
771fe6b9
JG
872
873 if (idx >= ib_chunk->length_dw) {
874 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
875 idx, ib_chunk->length_dw);
876 return -EINVAL;
877 }
fa99239c 878 header = ib_chunk->kdata[idx];
771fe6b9
JG
879 pkt->idx = idx;
880 pkt->type = CP_PACKET_GET_TYPE(header);
881 pkt->count = CP_PACKET_GET_COUNT(header);
882 switch (pkt->type) {
883 case PACKET_TYPE0:
884 pkt->reg = CP_PACKET0_GET_REG(header);
885 pkt->one_reg_wr = CP_PACKET0_GET_ONE_REG_WR(header);
886 break;
887 case PACKET_TYPE3:
888 pkt->opcode = CP_PACKET3_GET_OPCODE(header);
889 break;
890 case PACKET_TYPE2:
891 pkt->count = -1;
892 break;
893 default:
894 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
895 return -EINVAL;
896 }
897 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
898 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
899 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
900 return -EINVAL;
901 }
902 return 0;
903}
904
531369e6
DA
905/**
906 * r100_cs_packet_next_vline() - parse userspace VLINE packet
907 * @parser: parser structure holding parsing context.
908 *
909 * Userspace sends a special sequence for VLINE waits.
910 * PACKET0 - VLINE_START_END + value
911 * PACKET0 - WAIT_UNTIL +_value
912 * RELOC (P3) - crtc_id in reloc.
913 *
914 * This function parses this and relocates the VLINE START END
915 * and WAIT UNTIL packets to the correct crtc.
916 * It also detects a switched off crtc and nulls out the
917 * wait in that case.
918 */
919int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
920{
921 struct radeon_cs_chunk *ib_chunk;
922 struct drm_mode_object *obj;
923 struct drm_crtc *crtc;
924 struct radeon_crtc *radeon_crtc;
925 struct radeon_cs_packet p3reloc, waitreloc;
926 int crtc_id;
927 int r;
928 uint32_t header, h_idx, reg;
929
930 ib_chunk = &p->chunks[p->chunk_ib_idx];
931
932 /* parse the wait until */
933 r = r100_cs_packet_parse(p, &waitreloc, p->idx);
934 if (r)
935 return r;
936
937 /* check its a wait until and only 1 count */
938 if (waitreloc.reg != RADEON_WAIT_UNTIL ||
939 waitreloc.count != 0) {
940 DRM_ERROR("vline wait had illegal wait until segment\n");
941 r = -EINVAL;
942 return r;
943 }
944
945 if (ib_chunk->kdata[waitreloc.idx + 1] != RADEON_WAIT_CRTC_VLINE) {
946 DRM_ERROR("vline wait had illegal wait until\n");
947 r = -EINVAL;
948 return r;
949 }
950
951 /* jump over the NOP */
952 r = r100_cs_packet_parse(p, &p3reloc, p->idx);
953 if (r)
954 return r;
955
956 h_idx = p->idx - 2;
957 p->idx += waitreloc.count;
958 p->idx += p3reloc.count;
959
960 header = ib_chunk->kdata[h_idx];
961 crtc_id = ib_chunk->kdata[h_idx + 5];
962 reg = ib_chunk->kdata[h_idx] >> 2;
963 mutex_lock(&p->rdev->ddev->mode_config.mutex);
964 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
965 if (!obj) {
966 DRM_ERROR("cannot find crtc %d\n", crtc_id);
967 r = -EINVAL;
968 goto out;
969 }
970 crtc = obj_to_crtc(obj);
971 radeon_crtc = to_radeon_crtc(crtc);
972 crtc_id = radeon_crtc->crtc_id;
973
974 if (!crtc->enabled) {
975 /* if the CRTC isn't enabled - we need to nop out the wait until */
976 ib_chunk->kdata[h_idx + 2] = PACKET2(0);
977 ib_chunk->kdata[h_idx + 3] = PACKET2(0);
978 } else if (crtc_id == 1) {
979 switch (reg) {
980 case AVIVO_D1MODE_VLINE_START_END:
981 header &= R300_CP_PACKET0_REG_MASK;
982 header |= AVIVO_D2MODE_VLINE_START_END >> 2;
983 break;
984 case RADEON_CRTC_GUI_TRIG_VLINE:
985 header &= R300_CP_PACKET0_REG_MASK;
986 header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2;
987 break;
988 default:
989 DRM_ERROR("unknown crtc reloc\n");
990 r = -EINVAL;
991 goto out;
992 }
993 ib_chunk->kdata[h_idx] = header;
994 ib_chunk->kdata[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
995 }
996out:
997 mutex_unlock(&p->rdev->ddev->mode_config.mutex);
998 return r;
999}
1000
771fe6b9
JG
1001/**
1002 * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3
1003 * @parser: parser structure holding parsing context.
1004 * @data: pointer to relocation data
1005 * @offset_start: starting offset
1006 * @offset_mask: offset mask (to align start offset on)
1007 * @reloc: reloc informations
1008 *
1009 * Check next packet is relocation packet3, do bo validation and compute
1010 * GPU offset using the provided start.
1011 **/
1012int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
1013 struct radeon_cs_reloc **cs_reloc)
1014{
1015 struct radeon_cs_chunk *ib_chunk;
1016 struct radeon_cs_chunk *relocs_chunk;
1017 struct radeon_cs_packet p3reloc;
1018 unsigned idx;
1019 int r;
1020
1021 if (p->chunk_relocs_idx == -1) {
1022 DRM_ERROR("No relocation chunk !\n");
1023 return -EINVAL;
1024 }
1025 *cs_reloc = NULL;
1026 ib_chunk = &p->chunks[p->chunk_ib_idx];
1027 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
1028 r = r100_cs_packet_parse(p, &p3reloc, p->idx);
1029 if (r) {
1030 return r;
1031 }
1032 p->idx += p3reloc.count + 2;
1033 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
1034 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
1035 p3reloc.idx);
1036 r100_cs_dump_packet(p, &p3reloc);
1037 return -EINVAL;
1038 }
1039 idx = ib_chunk->kdata[p3reloc.idx + 1];
1040 if (idx >= relocs_chunk->length_dw) {
1041 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
1042 idx, relocs_chunk->length_dw);
1043 r100_cs_dump_packet(p, &p3reloc);
1044 return -EINVAL;
1045 }
1046 /* FIXME: we assume reloc size is 4 dwords */
1047 *cs_reloc = p->relocs_ptr[(idx / 4)];
1048 return 0;
1049}
1050
551ebd83
DA
1051static int r100_get_vtx_size(uint32_t vtx_fmt)
1052{
1053 int vtx_size;
1054 vtx_size = 2;
1055 /* ordered according to bits in spec */
1056 if (vtx_fmt & RADEON_SE_VTX_FMT_W0)
1057 vtx_size++;
1058 if (vtx_fmt & RADEON_SE_VTX_FMT_FPCOLOR)
1059 vtx_size += 3;
1060 if (vtx_fmt & RADEON_SE_VTX_FMT_FPALPHA)
1061 vtx_size++;
1062 if (vtx_fmt & RADEON_SE_VTX_FMT_PKCOLOR)
1063 vtx_size++;
1064 if (vtx_fmt & RADEON_SE_VTX_FMT_FPSPEC)
1065 vtx_size += 3;
1066 if (vtx_fmt & RADEON_SE_VTX_FMT_FPFOG)
1067 vtx_size++;
1068 if (vtx_fmt & RADEON_SE_VTX_FMT_PKSPEC)
1069 vtx_size++;
1070 if (vtx_fmt & RADEON_SE_VTX_FMT_ST0)
1071 vtx_size += 2;
1072 if (vtx_fmt & RADEON_SE_VTX_FMT_ST1)
1073 vtx_size += 2;
1074 if (vtx_fmt & RADEON_SE_VTX_FMT_Q1)
1075 vtx_size++;
1076 if (vtx_fmt & RADEON_SE_VTX_FMT_ST2)
1077 vtx_size += 2;
1078 if (vtx_fmt & RADEON_SE_VTX_FMT_Q2)
1079 vtx_size++;
1080 if (vtx_fmt & RADEON_SE_VTX_FMT_ST3)
1081 vtx_size += 2;
1082 if (vtx_fmt & RADEON_SE_VTX_FMT_Q3)
1083 vtx_size++;
1084 if (vtx_fmt & RADEON_SE_VTX_FMT_Q0)
1085 vtx_size++;
1086 /* blend weight */
1087 if (vtx_fmt & (0x7 << 15))
1088 vtx_size += (vtx_fmt >> 15) & 0x7;
1089 if (vtx_fmt & RADEON_SE_VTX_FMT_N0)
1090 vtx_size += 3;
1091 if (vtx_fmt & RADEON_SE_VTX_FMT_XY1)
1092 vtx_size += 2;
1093 if (vtx_fmt & RADEON_SE_VTX_FMT_Z1)
1094 vtx_size++;
1095 if (vtx_fmt & RADEON_SE_VTX_FMT_W1)
1096 vtx_size++;
1097 if (vtx_fmt & RADEON_SE_VTX_FMT_N1)
1098 vtx_size++;
1099 if (vtx_fmt & RADEON_SE_VTX_FMT_Z)
1100 vtx_size++;
1101 return vtx_size;
1102}
1103
771fe6b9 1104static int r100_packet0_check(struct radeon_cs_parser *p,
551ebd83
DA
1105 struct radeon_cs_packet *pkt,
1106 unsigned idx, unsigned reg)
771fe6b9
JG
1107{
1108 struct radeon_cs_chunk *ib_chunk;
1109 struct radeon_cs_reloc *reloc;
551ebd83 1110 struct r100_cs_track *track;
771fe6b9
JG
1111 volatile uint32_t *ib;
1112 uint32_t tmp;
771fe6b9 1113 int r;
551ebd83 1114 int i, face;
e024e110 1115 u32 tile_flags = 0;
771fe6b9
JG
1116
1117 ib = p->ib->ptr;
1118 ib_chunk = &p->chunks[p->chunk_ib_idx];
551ebd83
DA
1119 track = (struct r100_cs_track *)p->track;
1120
1121 switch (reg) {
1122 case RADEON_CRTC_GUI_TRIG_VLINE:
1123 r = r100_cs_packet_parse_vline(p);
1124 if (r) {
1125 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1126 idx, reg);
1127 r100_cs_dump_packet(p, pkt);
1128 return r;
1129 }
1130 break;
771fe6b9
JG
1131 /* FIXME: only allow PACKET3 blit? easier to check for out of
1132 * range access */
551ebd83
DA
1133 case RADEON_DST_PITCH_OFFSET:
1134 case RADEON_SRC_PITCH_OFFSET:
1135 r = r100_reloc_pitch_offset(p, pkt, idx, reg);
1136 if (r)
1137 return r;
1138 break;
1139 case RADEON_RB3D_DEPTHOFFSET:
1140 r = r100_cs_packet_next_reloc(p, &reloc);
1141 if (r) {
1142 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1143 idx, reg);
1144 r100_cs_dump_packet(p, pkt);
1145 return r;
1146 }
1147 track->zb.robj = reloc->robj;
1148 track->zb.offset = ib_chunk->kdata[idx];
1149 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1150 break;
1151 case RADEON_RB3D_COLOROFFSET:
1152 r = r100_cs_packet_next_reloc(p, &reloc);
1153 if (r) {
1154 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1155 idx, reg);
1156 r100_cs_dump_packet(p, pkt);
1157 return r;
1158 }
1159 track->cb[0].robj = reloc->robj;
1160 track->cb[0].offset = ib_chunk->kdata[idx];
1161 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1162 break;
1163 case RADEON_PP_TXOFFSET_0:
1164 case RADEON_PP_TXOFFSET_1:
1165 case RADEON_PP_TXOFFSET_2:
1166 i = (reg - RADEON_PP_TXOFFSET_0) / 24;
1167 r = r100_cs_packet_next_reloc(p, &reloc);
1168 if (r) {
1169 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1170 idx, reg);
1171 r100_cs_dump_packet(p, pkt);
1172 return r;
1173 }
1174 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1175 track->textures[i].robj = reloc->robj;
1176 break;
1177 case RADEON_PP_CUBIC_OFFSET_T0_0:
1178 case RADEON_PP_CUBIC_OFFSET_T0_1:
1179 case RADEON_PP_CUBIC_OFFSET_T0_2:
1180 case RADEON_PP_CUBIC_OFFSET_T0_3:
1181 case RADEON_PP_CUBIC_OFFSET_T0_4:
1182 i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4;
1183 r = r100_cs_packet_next_reloc(p, &reloc);
1184 if (r) {
1185 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1186 idx, reg);
1187 r100_cs_dump_packet(p, pkt);
1188 return r;
1189 }
1190 track->textures[0].cube_info[i].offset = ib_chunk->kdata[idx];
1191 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1192 track->textures[0].cube_info[i].robj = reloc->robj;
1193 break;
1194 case RADEON_PP_CUBIC_OFFSET_T1_0:
1195 case RADEON_PP_CUBIC_OFFSET_T1_1:
1196 case RADEON_PP_CUBIC_OFFSET_T1_2:
1197 case RADEON_PP_CUBIC_OFFSET_T1_3:
1198 case RADEON_PP_CUBIC_OFFSET_T1_4:
1199 i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4;
1200 r = r100_cs_packet_next_reloc(p, &reloc);
1201 if (r) {
1202 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1203 idx, reg);
1204 r100_cs_dump_packet(p, pkt);
1205 return r;
1206 }
1207 track->textures[1].cube_info[i].offset = ib_chunk->kdata[idx];
1208 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1209 track->textures[1].cube_info[i].robj = reloc->robj;
1210 break;
1211 case RADEON_PP_CUBIC_OFFSET_T2_0:
1212 case RADEON_PP_CUBIC_OFFSET_T2_1:
1213 case RADEON_PP_CUBIC_OFFSET_T2_2:
1214 case RADEON_PP_CUBIC_OFFSET_T2_3:
1215 case RADEON_PP_CUBIC_OFFSET_T2_4:
1216 i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4;
1217 r = r100_cs_packet_next_reloc(p, &reloc);
1218 if (r) {
1219 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1220 idx, reg);
1221 r100_cs_dump_packet(p, pkt);
1222 return r;
1223 }
1224 track->textures[2].cube_info[i].offset = ib_chunk->kdata[idx];
1225 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1226 track->textures[2].cube_info[i].robj = reloc->robj;
1227 break;
1228 case RADEON_RE_WIDTH_HEIGHT:
1229 track->maxy = ((ib_chunk->kdata[idx] >> 16) & 0x7FF);
1230 break;
1231 case RADEON_RB3D_COLORPITCH:
1232 r = r100_cs_packet_next_reloc(p, &reloc);
1233 if (r) {
1234 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1235 idx, reg);
1236 r100_cs_dump_packet(p, pkt);
1237 return r;
1238 }
e024e110 1239
551ebd83
DA
1240 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1241 tile_flags |= RADEON_COLOR_TILE_ENABLE;
1242 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1243 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
e024e110 1244
551ebd83
DA
1245 tmp = ib_chunk->kdata[idx] & ~(0x7 << 16);
1246 tmp |= tile_flags;
1247 ib[idx] = tmp;
e024e110 1248
551ebd83
DA
1249 track->cb[0].pitch = ib_chunk->kdata[idx] & RADEON_COLORPITCH_MASK;
1250 break;
1251 case RADEON_RB3D_DEPTHPITCH:
1252 track->zb.pitch = ib_chunk->kdata[idx] & RADEON_DEPTHPITCH_MASK;
1253 break;
1254 case RADEON_RB3D_CNTL:
1255 switch ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
1256 case 7:
1257 case 8:
1258 case 9:
1259 case 11:
1260 case 12:
1261 track->cb[0].cpp = 1;
e024e110 1262 break;
551ebd83
DA
1263 case 3:
1264 case 4:
1265 case 15:
1266 track->cb[0].cpp = 2;
1267 break;
1268 case 6:
1269 track->cb[0].cpp = 4;
1270 break;
1271 default:
1272 DRM_ERROR("Invalid color buffer format (%d) !\n",
1273 ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
1274 return -EINVAL;
1275 }
1276 track->z_enabled = !!(ib_chunk->kdata[idx] & RADEON_Z_ENABLE);
1277 break;
1278 case RADEON_RB3D_ZSTENCILCNTL:
1279 switch (ib_chunk->kdata[idx] & 0xf) {
1280 case 0:
1281 track->zb.cpp = 2;
1282 break;
1283 case 2:
1284 case 3:
1285 case 4:
1286 case 5:
1287 case 9:
1288 case 11:
1289 track->zb.cpp = 4;
17782d99 1290 break;
771fe6b9 1291 default:
771fe6b9
JG
1292 break;
1293 }
551ebd83
DA
1294 break;
1295 case RADEON_RB3D_ZPASS_ADDR:
1296 r = r100_cs_packet_next_reloc(p, &reloc);
1297 if (r) {
1298 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1299 idx, reg);
1300 r100_cs_dump_packet(p, pkt);
1301 return r;
1302 }
1303 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1304 break;
1305 case RADEON_PP_CNTL:
1306 {
1307 uint32_t temp = ib_chunk->kdata[idx] >> 4;
1308 for (i = 0; i < track->num_texture; i++)
1309 track->textures[i].enabled = !!(temp & (1 << i));
1310 }
1311 break;
1312 case RADEON_SE_VF_CNTL:
1313 track->vap_vf_cntl = ib_chunk->kdata[idx];
1314 break;
1315 case RADEON_SE_VTX_FMT:
1316 track->vtx_size = r100_get_vtx_size(ib_chunk->kdata[idx]);
1317 break;
1318 case RADEON_PP_TEX_SIZE_0:
1319 case RADEON_PP_TEX_SIZE_1:
1320 case RADEON_PP_TEX_SIZE_2:
1321 i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
1322 track->textures[i].width = (ib_chunk->kdata[idx] & RADEON_TEX_USIZE_MASK) + 1;
1323 track->textures[i].height = ((ib_chunk->kdata[idx] & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
1324 break;
1325 case RADEON_PP_TEX_PITCH_0:
1326 case RADEON_PP_TEX_PITCH_1:
1327 case RADEON_PP_TEX_PITCH_2:
1328 i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
1329 track->textures[i].pitch = ib_chunk->kdata[idx] + 32;
1330 break;
1331 case RADEON_PP_TXFILTER_0:
1332 case RADEON_PP_TXFILTER_1:
1333 case RADEON_PP_TXFILTER_2:
1334 i = (reg - RADEON_PP_TXFILTER_0) / 24;
1335 track->textures[i].num_levels = ((ib_chunk->kdata[idx] & RADEON_MAX_MIP_LEVEL_MASK)
1336 >> RADEON_MAX_MIP_LEVEL_SHIFT);
1337 tmp = (ib_chunk->kdata[idx] >> 23) & 0x7;
1338 if (tmp == 2 || tmp == 6)
1339 track->textures[i].roundup_w = false;
1340 tmp = (ib_chunk->kdata[idx] >> 27) & 0x7;
1341 if (tmp == 2 || tmp == 6)
1342 track->textures[i].roundup_h = false;
1343 break;
1344 case RADEON_PP_TXFORMAT_0:
1345 case RADEON_PP_TXFORMAT_1:
1346 case RADEON_PP_TXFORMAT_2:
1347 i = (reg - RADEON_PP_TXFORMAT_0) / 24;
1348 if (ib_chunk->kdata[idx] & RADEON_TXFORMAT_NON_POWER2) {
1349 track->textures[i].use_pitch = 1;
1350 } else {
1351 track->textures[i].use_pitch = 0;
1352 track->textures[i].width = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
1353 track->textures[i].height = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
1354 }
1355 if (ib_chunk->kdata[idx] & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
1356 track->textures[i].tex_coord_type = 2;
1357 switch ((ib_chunk->kdata[idx] & RADEON_TXFORMAT_FORMAT_MASK)) {
1358 case RADEON_TXFORMAT_I8:
1359 case RADEON_TXFORMAT_RGB332:
1360 case RADEON_TXFORMAT_Y8:
1361 track->textures[i].cpp = 1;
1362 break;
1363 case RADEON_TXFORMAT_AI88:
1364 case RADEON_TXFORMAT_ARGB1555:
1365 case RADEON_TXFORMAT_RGB565:
1366 case RADEON_TXFORMAT_ARGB4444:
1367 case RADEON_TXFORMAT_VYUY422:
1368 case RADEON_TXFORMAT_YVYU422:
1369 case RADEON_TXFORMAT_DXT1:
1370 case RADEON_TXFORMAT_SHADOW16:
1371 case RADEON_TXFORMAT_LDUDV655:
1372 case RADEON_TXFORMAT_DUDV88:
1373 track->textures[i].cpp = 2;
771fe6b9 1374 break;
551ebd83
DA
1375 case RADEON_TXFORMAT_ARGB8888:
1376 case RADEON_TXFORMAT_RGBA8888:
1377 case RADEON_TXFORMAT_DXT23:
1378 case RADEON_TXFORMAT_DXT45:
1379 case RADEON_TXFORMAT_SHADOW32:
1380 case RADEON_TXFORMAT_LDUDUV8888:
1381 track->textures[i].cpp = 4;
1382 break;
1383 }
1384 track->textures[i].cube_info[4].width = 1 << ((ib_chunk->kdata[idx] >> 16) & 0xf);
1385 track->textures[i].cube_info[4].height = 1 << ((ib_chunk->kdata[idx] >> 20) & 0xf);
1386 break;
1387 case RADEON_PP_CUBIC_FACES_0:
1388 case RADEON_PP_CUBIC_FACES_1:
1389 case RADEON_PP_CUBIC_FACES_2:
1390 tmp = ib_chunk->kdata[idx];
1391 i = (reg - RADEON_PP_CUBIC_FACES_0) / 4;
1392 for (face = 0; face < 4; face++) {
1393 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
1394 track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
771fe6b9 1395 }
551ebd83
DA
1396 break;
1397 default:
1398 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
1399 reg, idx);
1400 return -EINVAL;
771fe6b9
JG
1401 }
1402 return 0;
1403}
1404
068a117c
JG
1405int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
1406 struct radeon_cs_packet *pkt,
1407 struct radeon_object *robj)
1408{
1409 struct radeon_cs_chunk *ib_chunk;
1410 unsigned idx;
1411
1412 ib_chunk = &p->chunks[p->chunk_ib_idx];
1413 idx = pkt->idx + 1;
1414 if ((ib_chunk->kdata[idx+2] + 1) > radeon_object_size(robj)) {
1415 DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
1416 "(need %u have %lu) !\n",
1417 ib_chunk->kdata[idx+2] + 1,
1418 radeon_object_size(robj));
1419 return -EINVAL;
1420 }
1421 return 0;
1422}
1423
771fe6b9
JG
1424static int r100_packet3_check(struct radeon_cs_parser *p,
1425 struct radeon_cs_packet *pkt)
1426{
1427 struct radeon_cs_chunk *ib_chunk;
1428 struct radeon_cs_reloc *reloc;
551ebd83 1429 struct r100_cs_track *track;
771fe6b9
JG
1430 unsigned idx;
1431 unsigned i, c;
1432 volatile uint32_t *ib;
1433 int r;
1434
1435 ib = p->ib->ptr;
1436 ib_chunk = &p->chunks[p->chunk_ib_idx];
1437 idx = pkt->idx + 1;
551ebd83 1438 track = (struct r100_cs_track *)p->track;
771fe6b9
JG
1439 switch (pkt->opcode) {
1440 case PACKET3_3D_LOAD_VBPNTR:
1441 c = ib_chunk->kdata[idx++];
551ebd83 1442 track->num_arrays = c;
771fe6b9
JG
1443 for (i = 0; i < (c - 1); i += 2, idx += 3) {
1444 r = r100_cs_packet_next_reloc(p, &reloc);
1445 if (r) {
1446 DRM_ERROR("No reloc for packet3 %d\n",
1447 pkt->opcode);
1448 r100_cs_dump_packet(p, pkt);
1449 return r;
1450 }
1451 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
551ebd83
DA
1452 track->arrays[i + 0].robj = reloc->robj;
1453 track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
1454 track->arrays[i + 0].esize &= 0x7F;
771fe6b9
JG
1455 r = r100_cs_packet_next_reloc(p, &reloc);
1456 if (r) {
1457 DRM_ERROR("No reloc for packet3 %d\n",
1458 pkt->opcode);
1459 r100_cs_dump_packet(p, pkt);
1460 return r;
1461 }
1462 ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
551ebd83
DA
1463 track->arrays[i + 1].robj = reloc->robj;
1464 track->arrays[i + 1].esize = ib_chunk->kdata[idx] >> 24;
1465 track->arrays[i + 1].esize &= 0x7F;
771fe6b9
JG
1466 }
1467 if (c & 1) {
1468 r = r100_cs_packet_next_reloc(p, &reloc);
1469 if (r) {
1470 DRM_ERROR("No reloc for packet3 %d\n",
1471 pkt->opcode);
1472 r100_cs_dump_packet(p, pkt);
1473 return r;
1474 }
1475 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
551ebd83
DA
1476 track->arrays[i + 0].robj = reloc->robj;
1477 track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
1478 track->arrays[i + 0].esize &= 0x7F;
771fe6b9
JG
1479 }
1480 break;
1481 case PACKET3_INDX_BUFFER:
1482 r = r100_cs_packet_next_reloc(p, &reloc);
1483 if (r) {
1484 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1485 r100_cs_dump_packet(p, pkt);
1486 return r;
1487 }
1488 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
068a117c
JG
1489 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1490 if (r) {
1491 return r;
1492 }
771fe6b9
JG
1493 break;
1494 case 0x23:
771fe6b9
JG
1495 /* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
1496 r = r100_cs_packet_next_reloc(p, &reloc);
1497 if (r) {
1498 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1499 r100_cs_dump_packet(p, pkt);
1500 return r;
1501 }
1502 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
551ebd83
DA
1503 track->num_arrays = 1;
1504 track->vtx_size = r100_get_vtx_size(ib_chunk->kdata[idx+2]);
1505
1506 track->arrays[0].robj = reloc->robj;
1507 track->arrays[0].esize = track->vtx_size;
1508
1509 track->max_indx = ib_chunk->kdata[idx+1];
1510
1511 track->vap_vf_cntl = ib_chunk->kdata[idx+3];
1512 track->immd_dwords = pkt->count - 1;
1513 r = r100_cs_track_check(p->rdev, track);
1514 if (r)
1515 return r;
771fe6b9
JG
1516 break;
1517 case PACKET3_3D_DRAW_IMMD:
551ebd83
DA
1518 if (((ib_chunk->kdata[idx+1] >> 4) & 0x3) != 3) {
1519 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1520 return -EINVAL;
1521 }
1522 track->vap_vf_cntl = ib_chunk->kdata[idx+1];
1523 track->immd_dwords = pkt->count - 1;
1524 r = r100_cs_track_check(p->rdev, track);
1525 if (r)
1526 return r;
1527 break;
771fe6b9
JG
1528 /* triggers drawing using in-packet vertex data */
1529 case PACKET3_3D_DRAW_IMMD_2:
551ebd83
DA
1530 if (((ib_chunk->kdata[idx] >> 4) & 0x3) != 3) {
1531 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1532 return -EINVAL;
1533 }
1534 track->vap_vf_cntl = ib_chunk->kdata[idx];
1535 track->immd_dwords = pkt->count;
1536 r = r100_cs_track_check(p->rdev, track);
1537 if (r)
1538 return r;
1539 break;
771fe6b9
JG
1540 /* triggers drawing using in-packet vertex data */
1541 case PACKET3_3D_DRAW_VBUF_2:
551ebd83
DA
1542 track->vap_vf_cntl = ib_chunk->kdata[idx];
1543 r = r100_cs_track_check(p->rdev, track);
1544 if (r)
1545 return r;
1546 break;
771fe6b9
JG
1547 /* triggers drawing of vertex buffers setup elsewhere */
1548 case PACKET3_3D_DRAW_INDX_2:
551ebd83
DA
1549 track->vap_vf_cntl = ib_chunk->kdata[idx];
1550 r = r100_cs_track_check(p->rdev, track);
1551 if (r)
1552 return r;
1553 break;
771fe6b9
JG
1554 /* triggers drawing using indices to vertex buffer */
1555 case PACKET3_3D_DRAW_VBUF:
551ebd83
DA
1556 track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
1557 r = r100_cs_track_check(p->rdev, track);
1558 if (r)
1559 return r;
1560 break;
771fe6b9
JG
1561 /* triggers drawing of vertex buffers setup elsewhere */
1562 case PACKET3_3D_DRAW_INDX:
551ebd83
DA
1563 track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
1564 r = r100_cs_track_check(p->rdev, track);
1565 if (r)
1566 return r;
1567 break;
771fe6b9
JG
1568 /* triggers drawing using indices to vertex buffer */
1569 case PACKET3_NOP:
1570 break;
1571 default:
1572 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1573 return -EINVAL;
1574 }
1575 return 0;
1576}
1577
1578int r100_cs_parse(struct radeon_cs_parser *p)
1579{
1580 struct radeon_cs_packet pkt;
551ebd83 1581 struct r100_cs_track track;
771fe6b9
JG
1582 int r;
1583
551ebd83
DA
1584 r100_cs_track_clear(p->rdev, &track);
1585 p->track = &track;
771fe6b9
JG
1586 do {
1587 r = r100_cs_packet_parse(p, &pkt, p->idx);
1588 if (r) {
1589 return r;
1590 }
1591 p->idx += pkt.count + 2;
1592 switch (pkt.type) {
068a117c 1593 case PACKET_TYPE0:
551ebd83
DA
1594 if (p->rdev->family >= CHIP_R200)
1595 r = r100_cs_parse_packet0(p, &pkt,
1596 p->rdev->config.r100.reg_safe_bm,
1597 p->rdev->config.r100.reg_safe_bm_size,
1598 &r200_packet0_check);
1599 else
1600 r = r100_cs_parse_packet0(p, &pkt,
1601 p->rdev->config.r100.reg_safe_bm,
1602 p->rdev->config.r100.reg_safe_bm_size,
1603 &r100_packet0_check);
068a117c
JG
1604 break;
1605 case PACKET_TYPE2:
1606 break;
1607 case PACKET_TYPE3:
1608 r = r100_packet3_check(p, &pkt);
1609 break;
1610 default:
1611 DRM_ERROR("Unknown packet type %d !\n",
1612 pkt.type);
1613 return -EINVAL;
771fe6b9
JG
1614 }
1615 if (r) {
1616 return r;
1617 }
1618 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1619 return 0;
1620}
1621
1622
1623/*
1624 * Global GPU functions
1625 */
1626void r100_errata(struct radeon_device *rdev)
1627{
1628 rdev->pll_errata = 0;
1629
1630 if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) {
1631 rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS;
1632 }
1633
1634 if (rdev->family == CHIP_RV100 ||
1635 rdev->family == CHIP_RS100 ||
1636 rdev->family == CHIP_RS200) {
1637 rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY;
1638 }
1639}
1640
1641/* Wait for vertical sync on primary CRTC */
1642void r100_gpu_wait_for_vsync(struct radeon_device *rdev)
1643{
1644 uint32_t crtc_gen_cntl, tmp;
1645 int i;
1646
1647 crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
1648 if ((crtc_gen_cntl & RADEON_CRTC_DISP_REQ_EN_B) ||
1649 !(crtc_gen_cntl & RADEON_CRTC_EN)) {
1650 return;
1651 }
1652 /* Clear the CRTC_VBLANK_SAVE bit */
1653 WREG32(RADEON_CRTC_STATUS, RADEON_CRTC_VBLANK_SAVE_CLEAR);
1654 for (i = 0; i < rdev->usec_timeout; i++) {
1655 tmp = RREG32(RADEON_CRTC_STATUS);
1656 if (tmp & RADEON_CRTC_VBLANK_SAVE) {
1657 return;
1658 }
1659 DRM_UDELAY(1);
1660 }
1661}
1662
1663/* Wait for vertical sync on secondary CRTC */
1664void r100_gpu_wait_for_vsync2(struct radeon_device *rdev)
1665{
1666 uint32_t crtc2_gen_cntl, tmp;
1667 int i;
1668
1669 crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
1670 if ((crtc2_gen_cntl & RADEON_CRTC2_DISP_REQ_EN_B) ||
1671 !(crtc2_gen_cntl & RADEON_CRTC2_EN))
1672 return;
1673
1674 /* Clear the CRTC_VBLANK_SAVE bit */
1675 WREG32(RADEON_CRTC2_STATUS, RADEON_CRTC2_VBLANK_SAVE_CLEAR);
1676 for (i = 0; i < rdev->usec_timeout; i++) {
1677 tmp = RREG32(RADEON_CRTC2_STATUS);
1678 if (tmp & RADEON_CRTC2_VBLANK_SAVE) {
1679 return;
1680 }
1681 DRM_UDELAY(1);
1682 }
1683}
1684
1685int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
1686{
1687 unsigned i;
1688 uint32_t tmp;
1689
1690 for (i = 0; i < rdev->usec_timeout; i++) {
1691 tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK;
1692 if (tmp >= n) {
1693 return 0;
1694 }
1695 DRM_UDELAY(1);
1696 }
1697 return -1;
1698}
1699
1700int r100_gui_wait_for_idle(struct radeon_device *rdev)
1701{
1702 unsigned i;
1703 uint32_t tmp;
1704
1705 if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) {
1706 printk(KERN_WARNING "radeon: wait for empty RBBM fifo failed !"
1707 " Bad things might happen.\n");
1708 }
1709 for (i = 0; i < rdev->usec_timeout; i++) {
1710 tmp = RREG32(RADEON_RBBM_STATUS);
1711 if (!(tmp & (1 << 31))) {
1712 return 0;
1713 }
1714 DRM_UDELAY(1);
1715 }
1716 return -1;
1717}
1718
1719int r100_mc_wait_for_idle(struct radeon_device *rdev)
1720{
1721 unsigned i;
1722 uint32_t tmp;
1723
1724 for (i = 0; i < rdev->usec_timeout; i++) {
1725 /* read MC_STATUS */
1726 tmp = RREG32(0x0150);
1727 if (tmp & (1 << 2)) {
1728 return 0;
1729 }
1730 DRM_UDELAY(1);
1731 }
1732 return -1;
1733}
1734
1735void r100_gpu_init(struct radeon_device *rdev)
1736{
1737 /* TODO: anythings to do here ? pipes ? */
1738 r100_hdp_reset(rdev);
1739}
1740
1741void r100_hdp_reset(struct radeon_device *rdev)
1742{
1743 uint32_t tmp;
1744
1745 tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
1746 tmp |= (7 << 28);
1747 WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
1748 (void)RREG32(RADEON_HOST_PATH_CNTL);
1749 udelay(200);
1750 WREG32(RADEON_RBBM_SOFT_RESET, 0);
1751 WREG32(RADEON_HOST_PATH_CNTL, tmp);
1752 (void)RREG32(RADEON_HOST_PATH_CNTL);
1753}
1754
1755int r100_rb2d_reset(struct radeon_device *rdev)
1756{
1757 uint32_t tmp;
1758 int i;
1759
1760 WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2);
1761 (void)RREG32(RADEON_RBBM_SOFT_RESET);
1762 udelay(200);
1763 WREG32(RADEON_RBBM_SOFT_RESET, 0);
1764 /* Wait to prevent race in RBBM_STATUS */
1765 mdelay(1);
1766 for (i = 0; i < rdev->usec_timeout; i++) {
1767 tmp = RREG32(RADEON_RBBM_STATUS);
1768 if (!(tmp & (1 << 26))) {
1769 DRM_INFO("RB2D reset succeed (RBBM_STATUS=0x%08X)\n",
1770 tmp);
1771 return 0;
1772 }
1773 DRM_UDELAY(1);
1774 }
1775 tmp = RREG32(RADEON_RBBM_STATUS);
1776 DRM_ERROR("Failed to reset RB2D (RBBM_STATUS=0x%08X)!\n", tmp);
1777 return -1;
1778}
1779
1780int r100_gpu_reset(struct radeon_device *rdev)
1781{
1782 uint32_t status;
1783
1784 /* reset order likely matter */
1785 status = RREG32(RADEON_RBBM_STATUS);
1786 /* reset HDP */
1787 r100_hdp_reset(rdev);
1788 /* reset rb2d */
1789 if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
1790 r100_rb2d_reset(rdev);
1791 }
1792 /* TODO: reset 3D engine */
1793 /* reset CP */
1794 status = RREG32(RADEON_RBBM_STATUS);
1795 if (status & (1 << 16)) {
1796 r100_cp_reset(rdev);
1797 }
1798 /* Check if GPU is idle */
1799 status = RREG32(RADEON_RBBM_STATUS);
1800 if (status & (1 << 31)) {
1801 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
1802 return -1;
1803 }
1804 DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
1805 return 0;
1806}
1807
1808
1809/*
1810 * VRAM info
1811 */
1812static void r100_vram_get_type(struct radeon_device *rdev)
1813{
1814 uint32_t tmp;
1815
1816 rdev->mc.vram_is_ddr = false;
1817 if (rdev->flags & RADEON_IS_IGP)
1818 rdev->mc.vram_is_ddr = true;
1819 else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR)
1820 rdev->mc.vram_is_ddr = true;
1821 if ((rdev->family == CHIP_RV100) ||
1822 (rdev->family == CHIP_RS100) ||
1823 (rdev->family == CHIP_RS200)) {
1824 tmp = RREG32(RADEON_MEM_CNTL);
1825 if (tmp & RV100_HALF_MODE) {
1826 rdev->mc.vram_width = 32;
1827 } else {
1828 rdev->mc.vram_width = 64;
1829 }
1830 if (rdev->flags & RADEON_SINGLE_CRTC) {
1831 rdev->mc.vram_width /= 4;
1832 rdev->mc.vram_is_ddr = true;
1833 }
1834 } else if (rdev->family <= CHIP_RV280) {
1835 tmp = RREG32(RADEON_MEM_CNTL);
1836 if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) {
1837 rdev->mc.vram_width = 128;
1838 } else {
1839 rdev->mc.vram_width = 64;
1840 }
1841 } else {
1842 /* newer IGPs */
1843 rdev->mc.vram_width = 128;
1844 }
1845}
1846
2a0f8918 1847static u32 r100_get_accessible_vram(struct radeon_device *rdev)
771fe6b9 1848{
2a0f8918
DA
1849 u32 aper_size;
1850 u8 byte;
1851
1852 aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
1853
1854 /* Set HDP_APER_CNTL only on cards that are known not to be broken,
1855 * that is has the 2nd generation multifunction PCI interface
1856 */
1857 if (rdev->family == CHIP_RV280 ||
1858 rdev->family >= CHIP_RV350) {
1859 WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL,
1860 ~RADEON_HDP_APER_CNTL);
1861 DRM_INFO("Generation 2 PCI interface, using max accessible memory\n");
1862 return aper_size * 2;
1863 }
1864
1865 /* Older cards have all sorts of funny issues to deal with. First
1866 * check if it's a multifunction card by reading the PCI config
1867 * header type... Limit those to one aperture size
1868 */
1869 pci_read_config_byte(rdev->pdev, 0xe, &byte);
1870 if (byte & 0x80) {
1871 DRM_INFO("Generation 1 PCI interface in multifunction mode\n");
1872 DRM_INFO("Limiting VRAM to one aperture\n");
1873 return aper_size;
1874 }
1875
1876 /* Single function older card. We read HDP_APER_CNTL to see how the BIOS
1877 * have set it up. We don't write this as it's broken on some ASICs but
1878 * we expect the BIOS to have done the right thing (might be too optimistic...)
1879 */
1880 if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL)
1881 return aper_size * 2;
1882 return aper_size;
1883}
1884
1885void r100_vram_init_sizes(struct radeon_device *rdev)
1886{
1887 u64 config_aper_size;
1888 u32 accessible;
1889
1890 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
771fe6b9
JG
1891
1892 if (rdev->flags & RADEON_IS_IGP) {
1893 uint32_t tom;
1894 /* read NB_TOM to get the amount of ram stolen for the GPU */
1895 tom = RREG32(RADEON_NB_TOM);
7a50f01a 1896 rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
3e43d821
DA
1897 /* for IGPs we need to keep VRAM where it was put by the BIOS */
1898 rdev->mc.vram_location = (tom & 0xffff) << 16;
7a50f01a
DA
1899 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
1900 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
771fe6b9 1901 } else {
7a50f01a 1902 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
771fe6b9
JG
1903 /* Some production boards of m6 will report 0
1904 * if it's 8 MB
1905 */
7a50f01a
DA
1906 if (rdev->mc.real_vram_size == 0) {
1907 rdev->mc.real_vram_size = 8192 * 1024;
1908 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
771fe6b9 1909 }
3e43d821
DA
1910 /* let driver place VRAM */
1911 rdev->mc.vram_location = 0xFFFFFFFFUL;
2a0f8918
DA
1912 /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
1913 * Novell bug 204882 + along with lots of ubuntu ones */
7a50f01a
DA
1914 if (config_aper_size > rdev->mc.real_vram_size)
1915 rdev->mc.mc_vram_size = config_aper_size;
1916 else
1917 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
771fe6b9
JG
1918 }
1919
2a0f8918
DA
1920 /* work out accessible VRAM */
1921 accessible = r100_get_accessible_vram(rdev);
1922
771fe6b9
JG
1923 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
1924 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
2a0f8918
DA
1925
1926 if (accessible > rdev->mc.aper_size)
1927 accessible = rdev->mc.aper_size;
1928
7a50f01a
DA
1929 if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
1930 rdev->mc.mc_vram_size = rdev->mc.aper_size;
1931
1932 if (rdev->mc.real_vram_size > rdev->mc.aper_size)
1933 rdev->mc.real_vram_size = rdev->mc.aper_size;
2a0f8918
DA
1934}
1935
1936void r100_vram_info(struct radeon_device *rdev)
1937{
1938 r100_vram_get_type(rdev);
1939
1940 r100_vram_init_sizes(rdev);
771fe6b9
JG
1941}
1942
1943
1944/*
1945 * Indirect registers accessor
1946 */
1947void r100_pll_errata_after_index(struct radeon_device *rdev)
1948{
1949 if (!(rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS)) {
1950 return;
1951 }
1952 (void)RREG32(RADEON_CLOCK_CNTL_DATA);
1953 (void)RREG32(RADEON_CRTC_GEN_CNTL);
1954}
1955
1956static void r100_pll_errata_after_data(struct radeon_device *rdev)
1957{
1958 /* This workarounds is necessary on RV100, RS100 and RS200 chips
1959 * or the chip could hang on a subsequent access
1960 */
1961 if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) {
1962 udelay(5000);
1963 }
1964
1965 /* This function is required to workaround a hardware bug in some (all?)
1966 * revisions of the R300. This workaround should be called after every
1967 * CLOCK_CNTL_INDEX register access. If not, register reads afterward
1968 * may not be correct.
1969 */
1970 if (rdev->pll_errata & CHIP_ERRATA_R300_CG) {
1971 uint32_t save, tmp;
1972
1973 save = RREG32(RADEON_CLOCK_CNTL_INDEX);
1974 tmp = save & ~(0x3f | RADEON_PLL_WR_EN);
1975 WREG32(RADEON_CLOCK_CNTL_INDEX, tmp);
1976 tmp = RREG32(RADEON_CLOCK_CNTL_DATA);
1977 WREG32(RADEON_CLOCK_CNTL_INDEX, save);
1978 }
1979}
1980
1981uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
1982{
1983 uint32_t data;
1984
1985 WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
1986 r100_pll_errata_after_index(rdev);
1987 data = RREG32(RADEON_CLOCK_CNTL_DATA);
1988 r100_pll_errata_after_data(rdev);
1989 return data;
1990}
1991
1992void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1993{
1994 WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
1995 r100_pll_errata_after_index(rdev);
1996 WREG32(RADEON_CLOCK_CNTL_DATA, v);
1997 r100_pll_errata_after_data(rdev);
1998}
1999
068a117c
JG
2000int r100_init(struct radeon_device *rdev)
2001{
551ebd83
DA
2002 if (ASIC_IS_RN50(rdev)) {
2003 rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm;
2004 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(rn50_reg_safe_bm);
2005 } else if (rdev->family < CHIP_R200) {
2006 rdev->config.r100.reg_safe_bm = r100_reg_safe_bm;
2007 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm);
2008 } else {
2009 return r200_init(rdev);
2010 }
068a117c
JG
2011 return 0;
2012}
2013
771fe6b9
JG
2014/*
2015 * Debugfs info
2016 */
2017#if defined(CONFIG_DEBUG_FS)
2018static int r100_debugfs_rbbm_info(struct seq_file *m, void *data)
2019{
2020 struct drm_info_node *node = (struct drm_info_node *) m->private;
2021 struct drm_device *dev = node->minor->dev;
2022 struct radeon_device *rdev = dev->dev_private;
2023 uint32_t reg, value;
2024 unsigned i;
2025
2026 seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS));
2027 seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C));
2028 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2029 for (i = 0; i < 64; i++) {
2030 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100);
2031 reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2;
2032 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i);
2033 value = RREG32(RADEON_RBBM_CMDFIFO_DATA);
2034 seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value);
2035 }
2036 return 0;
2037}
2038
2039static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
2040{
2041 struct drm_info_node *node = (struct drm_info_node *) m->private;
2042 struct drm_device *dev = node->minor->dev;
2043 struct radeon_device *rdev = dev->dev_private;
2044 uint32_t rdp, wdp;
2045 unsigned count, i, j;
2046
2047 radeon_ring_free_size(rdev);
2048 rdp = RREG32(RADEON_CP_RB_RPTR);
2049 wdp = RREG32(RADEON_CP_RB_WPTR);
2050 count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
2051 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2052 seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
2053 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
2054 seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
2055 seq_printf(m, "%u dwords in ring\n", count);
2056 for (j = 0; j <= count; j++) {
2057 i = (rdp + j) & rdev->cp.ptr_mask;
2058 seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
2059 }
2060 return 0;
2061}
2062
2063
2064static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data)
2065{
2066 struct drm_info_node *node = (struct drm_info_node *) m->private;
2067 struct drm_device *dev = node->minor->dev;
2068 struct radeon_device *rdev = dev->dev_private;
2069 uint32_t csq_stat, csq2_stat, tmp;
2070 unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr;
2071 unsigned i;
2072
2073 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2074 seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE));
2075 csq_stat = RREG32(RADEON_CP_CSQ_STAT);
2076 csq2_stat = RREG32(RADEON_CP_CSQ2_STAT);
2077 r_rptr = (csq_stat >> 0) & 0x3ff;
2078 r_wptr = (csq_stat >> 10) & 0x3ff;
2079 ib1_rptr = (csq_stat >> 20) & 0x3ff;
2080 ib1_wptr = (csq2_stat >> 0) & 0x3ff;
2081 ib2_rptr = (csq2_stat >> 10) & 0x3ff;
2082 ib2_wptr = (csq2_stat >> 20) & 0x3ff;
2083 seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat);
2084 seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat);
2085 seq_printf(m, "Ring rptr %u\n", r_rptr);
2086 seq_printf(m, "Ring wptr %u\n", r_wptr);
2087 seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr);
2088 seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr);
2089 seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr);
2090 seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr);
2091 /* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms
2092 * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */
2093 seq_printf(m, "Ring fifo:\n");
2094 for (i = 0; i < 256; i++) {
2095 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
2096 tmp = RREG32(RADEON_CP_CSQ_DATA);
2097 seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp);
2098 }
2099 seq_printf(m, "Indirect1 fifo:\n");
2100 for (i = 256; i <= 512; i++) {
2101 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
2102 tmp = RREG32(RADEON_CP_CSQ_DATA);
2103 seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp);
2104 }
2105 seq_printf(m, "Indirect2 fifo:\n");
2106 for (i = 640; i < ib1_wptr; i++) {
2107 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
2108 tmp = RREG32(RADEON_CP_CSQ_DATA);
2109 seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp);
2110 }
2111 return 0;
2112}
2113
2114static int r100_debugfs_mc_info(struct seq_file *m, void *data)
2115{
2116 struct drm_info_node *node = (struct drm_info_node *) m->private;
2117 struct drm_device *dev = node->minor->dev;
2118 struct radeon_device *rdev = dev->dev_private;
2119 uint32_t tmp;
2120
2121 tmp = RREG32(RADEON_CONFIG_MEMSIZE);
2122 seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp);
2123 tmp = RREG32(RADEON_MC_FB_LOCATION);
2124 seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp);
2125 tmp = RREG32(RADEON_BUS_CNTL);
2126 seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
2127 tmp = RREG32(RADEON_MC_AGP_LOCATION);
2128 seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
2129 tmp = RREG32(RADEON_AGP_BASE);
2130 seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
2131 tmp = RREG32(RADEON_HOST_PATH_CNTL);
2132 seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
2133 tmp = RREG32(0x01D0);
2134 seq_printf(m, "AIC_CTRL 0x%08x\n", tmp);
2135 tmp = RREG32(RADEON_AIC_LO_ADDR);
2136 seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp);
2137 tmp = RREG32(RADEON_AIC_HI_ADDR);
2138 seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp);
2139 tmp = RREG32(0x01E4);
2140 seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp);
2141 return 0;
2142}
2143
2144static struct drm_info_list r100_debugfs_rbbm_list[] = {
2145 {"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL},
2146};
2147
2148static struct drm_info_list r100_debugfs_cp_list[] = {
2149 {"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL},
2150 {"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL},
2151};
2152
2153static struct drm_info_list r100_debugfs_mc_info_list[] = {
2154 {"r100_mc_info", r100_debugfs_mc_info, 0, NULL},
2155};
2156#endif
2157
2158int r100_debugfs_rbbm_init(struct radeon_device *rdev)
2159{
2160#if defined(CONFIG_DEBUG_FS)
2161 return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1);
2162#else
2163 return 0;
2164#endif
2165}
2166
2167int r100_debugfs_cp_init(struct radeon_device *rdev)
2168{
2169#if defined(CONFIG_DEBUG_FS)
2170 return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2);
2171#else
2172 return 0;
2173#endif
2174}
2175
2176int r100_debugfs_mc_info_init(struct radeon_device *rdev)
2177{
2178#if defined(CONFIG_DEBUG_FS)
2179 return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1);
2180#else
2181 return 0;
2182#endif
2183}
e024e110
DA
2184
2185int r100_set_surface_reg(struct radeon_device *rdev, int reg,
2186 uint32_t tiling_flags, uint32_t pitch,
2187 uint32_t offset, uint32_t obj_size)
2188{
2189 int surf_index = reg * 16;
2190 int flags = 0;
2191
2192 /* r100/r200 divide by 16 */
2193 if (rdev->family < CHIP_R300)
2194 flags = pitch / 16;
2195 else
2196 flags = pitch / 8;
2197
2198 if (rdev->family <= CHIP_RS200) {
2199 if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
2200 == (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
2201 flags |= RADEON_SURF_TILE_COLOR_BOTH;
2202 if (tiling_flags & RADEON_TILING_MACRO)
2203 flags |= RADEON_SURF_TILE_COLOR_MACRO;
2204 } else if (rdev->family <= CHIP_RV280) {
2205 if (tiling_flags & (RADEON_TILING_MACRO))
2206 flags |= R200_SURF_TILE_COLOR_MACRO;
2207 if (tiling_flags & RADEON_TILING_MICRO)
2208 flags |= R200_SURF_TILE_COLOR_MICRO;
2209 } else {
2210 if (tiling_flags & RADEON_TILING_MACRO)
2211 flags |= R300_SURF_TILE_MACRO;
2212 if (tiling_flags & RADEON_TILING_MICRO)
2213 flags |= R300_SURF_TILE_MICRO;
2214 }
2215
2216 DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1);
2217 WREG32(RADEON_SURFACE0_INFO + surf_index, flags);
2218 WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset);
2219 WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1);
2220 return 0;
2221}
2222
2223void r100_clear_surface_reg(struct radeon_device *rdev, int reg)
2224{
2225 int surf_index = reg * 16;
2226 WREG32(RADEON_SURFACE0_INFO + surf_index, 0);
2227}
c93bb85b
JG
2228
2229void r100_bandwidth_update(struct radeon_device *rdev)
2230{
2231 fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff;
2232 fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff;
2233 fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff;
2234 uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
2235 fixed20_12 memtcas_ff[8] = {
2236 fixed_init(1),
2237 fixed_init(2),
2238 fixed_init(3),
2239 fixed_init(0),
2240 fixed_init_half(1),
2241 fixed_init_half(2),
2242 fixed_init(0),
2243 };
2244 fixed20_12 memtcas_rs480_ff[8] = {
2245 fixed_init(0),
2246 fixed_init(1),
2247 fixed_init(2),
2248 fixed_init(3),
2249 fixed_init(0),
2250 fixed_init_half(1),
2251 fixed_init_half(2),
2252 fixed_init_half(3),
2253 };
2254 fixed20_12 memtcas2_ff[8] = {
2255 fixed_init(0),
2256 fixed_init(1),
2257 fixed_init(2),
2258 fixed_init(3),
2259 fixed_init(4),
2260 fixed_init(5),
2261 fixed_init(6),
2262 fixed_init(7),
2263 };
2264 fixed20_12 memtrbs[8] = {
2265 fixed_init(1),
2266 fixed_init_half(1),
2267 fixed_init(2),
2268 fixed_init_half(2),
2269 fixed_init(3),
2270 fixed_init_half(3),
2271 fixed_init(4),
2272 fixed_init_half(4)
2273 };
2274 fixed20_12 memtrbs_r4xx[8] = {
2275 fixed_init(4),
2276 fixed_init(5),
2277 fixed_init(6),
2278 fixed_init(7),
2279 fixed_init(8),
2280 fixed_init(9),
2281 fixed_init(10),
2282 fixed_init(11)
2283 };
2284 fixed20_12 min_mem_eff;
2285 fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
2286 fixed20_12 cur_latency_mclk, cur_latency_sclk;
2287 fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate,
2288 disp_drain_rate2, read_return_rate;
2289 fixed20_12 time_disp1_drop_priority;
2290 int c;
2291 int cur_size = 16; /* in octawords */
2292 int critical_point = 0, critical_point2;
2293/* uint32_t read_return_rate, time_disp1_drop_priority; */
2294 int stop_req, max_stop_req;
2295 struct drm_display_mode *mode1 = NULL;
2296 struct drm_display_mode *mode2 = NULL;
2297 uint32_t pixel_bytes1 = 0;
2298 uint32_t pixel_bytes2 = 0;
2299
2300 if (rdev->mode_info.crtcs[0]->base.enabled) {
2301 mode1 = &rdev->mode_info.crtcs[0]->base.mode;
2302 pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8;
2303 }
2304 if (rdev->mode_info.crtcs[1]->base.enabled) {
2305 mode2 = &rdev->mode_info.crtcs[1]->base.mode;
2306 pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8;
2307 }
2308
2309 min_mem_eff.full = rfixed_const_8(0);
2310 /* get modes */
2311 if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
2312 uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
2313 mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT);
2314 mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT);
2315 /* check crtc enables */
2316 if (mode2)
2317 mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
2318 if (mode1)
2319 mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT);
2320 WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer);
2321 }
2322
2323 /*
2324 * determine is there is enough bw for current mode
2325 */
2326 mclk_ff.full = rfixed_const(rdev->clock.default_mclk);
2327 temp_ff.full = rfixed_const(100);
2328 mclk_ff.full = rfixed_div(mclk_ff, temp_ff);
2329 sclk_ff.full = rfixed_const(rdev->clock.default_sclk);
2330 sclk_ff.full = rfixed_div(sclk_ff, temp_ff);
2331
2332 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
2333 temp_ff.full = rfixed_const(temp);
2334 mem_bw.full = rfixed_mul(mclk_ff, temp_ff);
2335
2336 pix_clk.full = 0;
2337 pix_clk2.full = 0;
2338 peak_disp_bw.full = 0;
2339 if (mode1) {
2340 temp_ff.full = rfixed_const(1000);
2341 pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */
2342 pix_clk.full = rfixed_div(pix_clk, temp_ff);
2343 temp_ff.full = rfixed_const(pixel_bytes1);
2344 peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff);
2345 }
2346 if (mode2) {
2347 temp_ff.full = rfixed_const(1000);
2348 pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */
2349 pix_clk2.full = rfixed_div(pix_clk2, temp_ff);
2350 temp_ff.full = rfixed_const(pixel_bytes2);
2351 peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff);
2352 }
2353
2354 mem_bw.full = rfixed_mul(mem_bw, min_mem_eff);
2355 if (peak_disp_bw.full >= mem_bw.full) {
2356 DRM_ERROR("You may not have enough display bandwidth for current mode\n"
2357 "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
2358 }
2359
2360 /* Get values from the EXT_MEM_CNTL register...converting its contents. */
2361 temp = RREG32(RADEON_MEM_TIMING_CNTL);
2362 if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */
2363 mem_trcd = ((temp >> 2) & 0x3) + 1;
2364 mem_trp = ((temp & 0x3)) + 1;
2365 mem_tras = ((temp & 0x70) >> 4) + 1;
2366 } else if (rdev->family == CHIP_R300 ||
2367 rdev->family == CHIP_R350) { /* r300, r350 */
2368 mem_trcd = (temp & 0x7) + 1;
2369 mem_trp = ((temp >> 8) & 0x7) + 1;
2370 mem_tras = ((temp >> 11) & 0xf) + 4;
2371 } else if (rdev->family == CHIP_RV350 ||
2372 rdev->family <= CHIP_RV380) {
2373 /* rv3x0 */
2374 mem_trcd = (temp & 0x7) + 3;
2375 mem_trp = ((temp >> 8) & 0x7) + 3;
2376 mem_tras = ((temp >> 11) & 0xf) + 6;
2377 } else if (rdev->family == CHIP_R420 ||
2378 rdev->family == CHIP_R423 ||
2379 rdev->family == CHIP_RV410) {
2380 /* r4xx */
2381 mem_trcd = (temp & 0xf) + 3;
2382 if (mem_trcd > 15)
2383 mem_trcd = 15;
2384 mem_trp = ((temp >> 8) & 0xf) + 3;
2385 if (mem_trp > 15)
2386 mem_trp = 15;
2387 mem_tras = ((temp >> 12) & 0x1f) + 6;
2388 if (mem_tras > 31)
2389 mem_tras = 31;
2390 } else { /* RV200, R200 */
2391 mem_trcd = (temp & 0x7) + 1;
2392 mem_trp = ((temp >> 8) & 0x7) + 1;
2393 mem_tras = ((temp >> 12) & 0xf) + 4;
2394 }
2395 /* convert to FF */
2396 trcd_ff.full = rfixed_const(mem_trcd);
2397 trp_ff.full = rfixed_const(mem_trp);
2398 tras_ff.full = rfixed_const(mem_tras);
2399
2400 /* Get values from the MEM_SDRAM_MODE_REG register...converting its */
2401 temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
2402 data = (temp & (7 << 20)) >> 20;
2403 if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) {
2404 if (rdev->family == CHIP_RS480) /* don't think rs400 */
2405 tcas_ff = memtcas_rs480_ff[data];
2406 else
2407 tcas_ff = memtcas_ff[data];
2408 } else
2409 tcas_ff = memtcas2_ff[data];
2410
2411 if (rdev->family == CHIP_RS400 ||
2412 rdev->family == CHIP_RS480) {
2413 /* extra cas latency stored in bits 23-25 0-4 clocks */
2414 data = (temp >> 23) & 0x7;
2415 if (data < 5)
2416 tcas_ff.full += rfixed_const(data);
2417 }
2418
2419 if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
2420 /* on the R300, Tcas is included in Trbs.
2421 */
2422 temp = RREG32(RADEON_MEM_CNTL);
2423 data = (R300_MEM_NUM_CHANNELS_MASK & temp);
2424 if (data == 1) {
2425 if (R300_MEM_USE_CD_CH_ONLY & temp) {
2426 temp = RREG32(R300_MC_IND_INDEX);
2427 temp &= ~R300_MC_IND_ADDR_MASK;
2428 temp |= R300_MC_READ_CNTL_CD_mcind;
2429 WREG32(R300_MC_IND_INDEX, temp);
2430 temp = RREG32(R300_MC_IND_DATA);
2431 data = (R300_MEM_RBS_POSITION_C_MASK & temp);
2432 } else {
2433 temp = RREG32(R300_MC_READ_CNTL_AB);
2434 data = (R300_MEM_RBS_POSITION_A_MASK & temp);
2435 }
2436 } else {
2437 temp = RREG32(R300_MC_READ_CNTL_AB);
2438 data = (R300_MEM_RBS_POSITION_A_MASK & temp);
2439 }
2440 if (rdev->family == CHIP_RV410 ||
2441 rdev->family == CHIP_R420 ||
2442 rdev->family == CHIP_R423)
2443 trbs_ff = memtrbs_r4xx[data];
2444 else
2445 trbs_ff = memtrbs[data];
2446 tcas_ff.full += trbs_ff.full;
2447 }
2448
2449 sclk_eff_ff.full = sclk_ff.full;
2450
2451 if (rdev->flags & RADEON_IS_AGP) {
2452 fixed20_12 agpmode_ff;
2453 agpmode_ff.full = rfixed_const(radeon_agpmode);
2454 temp_ff.full = rfixed_const_666(16);
2455 sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff);
2456 }
2457 /* TODO PCIE lanes may affect this - agpmode == 16?? */
2458
2459 if (ASIC_IS_R300(rdev)) {
2460 sclk_delay_ff.full = rfixed_const(250);
2461 } else {
2462 if ((rdev->family == CHIP_RV100) ||
2463 rdev->flags & RADEON_IS_IGP) {
2464 if (rdev->mc.vram_is_ddr)
2465 sclk_delay_ff.full = rfixed_const(41);
2466 else
2467 sclk_delay_ff.full = rfixed_const(33);
2468 } else {
2469 if (rdev->mc.vram_width == 128)
2470 sclk_delay_ff.full = rfixed_const(57);
2471 else
2472 sclk_delay_ff.full = rfixed_const(41);
2473 }
2474 }
2475
2476 mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff);
2477
2478 if (rdev->mc.vram_is_ddr) {
2479 if (rdev->mc.vram_width == 32) {
2480 k1.full = rfixed_const(40);
2481 c = 3;
2482 } else {
2483 k1.full = rfixed_const(20);
2484 c = 1;
2485 }
2486 } else {
2487 k1.full = rfixed_const(40);
2488 c = 3;
2489 }
2490
2491 temp_ff.full = rfixed_const(2);
2492 mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff);
2493 temp_ff.full = rfixed_const(c);
2494 mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff);
2495 temp_ff.full = rfixed_const(4);
2496 mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff);
2497 mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff);
2498 mc_latency_mclk.full += k1.full;
2499
2500 mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff);
2501 mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff);
2502
2503 /*
2504 HW cursor time assuming worst case of full size colour cursor.
2505 */
2506 temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
2507 temp_ff.full += trcd_ff.full;
2508 if (temp_ff.full < tras_ff.full)
2509 temp_ff.full = tras_ff.full;
2510 cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff);
2511
2512 temp_ff.full = rfixed_const(cur_size);
2513 cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff);
2514 /*
2515 Find the total latency for the display data.
2516 */
2517 disp_latency_overhead.full = rfixed_const(80);
2518 disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff);
2519 mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
2520 mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
2521
2522 if (mc_latency_mclk.full > mc_latency_sclk.full)
2523 disp_latency.full = mc_latency_mclk.full;
2524 else
2525 disp_latency.full = mc_latency_sclk.full;
2526
2527 /* setup Max GRPH_STOP_REQ default value */
2528 if (ASIC_IS_RV100(rdev))
2529 max_stop_req = 0x5c;
2530 else
2531 max_stop_req = 0x7c;
2532
2533 if (mode1) {
2534 /* CRTC1
2535 Set GRPH_BUFFER_CNTL register using h/w defined optimal values.
2536 GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ]
2537 */
2538 stop_req = mode1->hdisplay * pixel_bytes1 / 16;
2539
2540 if (stop_req > max_stop_req)
2541 stop_req = max_stop_req;
2542
2543 /*
2544 Find the drain rate of the display buffer.
2545 */
2546 temp_ff.full = rfixed_const((16/pixel_bytes1));
2547 disp_drain_rate.full = rfixed_div(pix_clk, temp_ff);
2548
2549 /*
2550 Find the critical point of the display buffer.
2551 */
2552 crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency);
2553 crit_point_ff.full += rfixed_const_half(0);
2554
2555 critical_point = rfixed_trunc(crit_point_ff);
2556
2557 if (rdev->disp_priority == 2) {
2558 critical_point = 0;
2559 }
2560
2561 /*
2562 The critical point should never be above max_stop_req-4. Setting
2563 GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time.
2564 */
2565 if (max_stop_req - critical_point < 4)
2566 critical_point = 0;
2567
2568 if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) {
2569 /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/
2570 critical_point = 0x10;
2571 }
2572
2573 temp = RREG32(RADEON_GRPH_BUFFER_CNTL);
2574 temp &= ~(RADEON_GRPH_STOP_REQ_MASK);
2575 temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
2576 temp &= ~(RADEON_GRPH_START_REQ_MASK);
2577 if ((rdev->family == CHIP_R350) &&
2578 (stop_req > 0x15)) {
2579 stop_req -= 0x10;
2580 }
2581 temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
2582 temp |= RADEON_GRPH_BUFFER_SIZE;
2583 temp &= ~(RADEON_GRPH_CRITICAL_CNTL |
2584 RADEON_GRPH_CRITICAL_AT_SOF |
2585 RADEON_GRPH_STOP_CNTL);
2586 /*
2587 Write the result into the register.
2588 */
2589 WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
2590 (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
2591
2592#if 0
2593 if ((rdev->family == CHIP_RS400) ||
2594 (rdev->family == CHIP_RS480)) {
2595 /* attempt to program RS400 disp regs correctly ??? */
2596 temp = RREG32(RS400_DISP1_REG_CNTL);
2597 temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK |
2598 RS400_DISP1_STOP_REQ_LEVEL_MASK);
2599 WREG32(RS400_DISP1_REQ_CNTL1, (temp |
2600 (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
2601 (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
2602 temp = RREG32(RS400_DMIF_MEM_CNTL1);
2603 temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK |
2604 RS400_DISP1_CRITICAL_POINT_STOP_MASK);
2605 WREG32(RS400_DMIF_MEM_CNTL1, (temp |
2606 (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) |
2607 (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT)));
2608 }
2609#endif
2610
2611 DRM_DEBUG("GRPH_BUFFER_CNTL from to %x\n",
2612 /* (unsigned int)info->SavedReg->grph_buffer_cntl, */
2613 (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL));
2614 }
2615
2616 if (mode2) {
2617 u32 grph2_cntl;
2618 stop_req = mode2->hdisplay * pixel_bytes2 / 16;
2619
2620 if (stop_req > max_stop_req)
2621 stop_req = max_stop_req;
2622
2623 /*
2624 Find the drain rate of the display buffer.
2625 */
2626 temp_ff.full = rfixed_const((16/pixel_bytes2));
2627 disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff);
2628
2629 grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
2630 grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
2631 grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
2632 grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK);
2633 if ((rdev->family == CHIP_R350) &&
2634 (stop_req > 0x15)) {
2635 stop_req -= 0x10;
2636 }
2637 grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
2638 grph2_cntl |= RADEON_GRPH_BUFFER_SIZE;
2639 grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL |
2640 RADEON_GRPH_CRITICAL_AT_SOF |
2641 RADEON_GRPH_STOP_CNTL);
2642
2643 if ((rdev->family == CHIP_RS100) ||
2644 (rdev->family == CHIP_RS200))
2645 critical_point2 = 0;
2646 else {
2647 temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
2648 temp_ff.full = rfixed_const(temp);
2649 temp_ff.full = rfixed_mul(mclk_ff, temp_ff);
2650 if (sclk_ff.full < temp_ff.full)
2651 temp_ff.full = sclk_ff.full;
2652
2653 read_return_rate.full = temp_ff.full;
2654
2655 if (mode1) {
2656 temp_ff.full = read_return_rate.full - disp_drain_rate.full;
2657 time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff);
2658 } else {
2659 time_disp1_drop_priority.full = 0;
2660 }
2661 crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
2662 crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2);
2663 crit_point_ff.full += rfixed_const_half(0);
2664
2665 critical_point2 = rfixed_trunc(crit_point_ff);
2666
2667 if (rdev->disp_priority == 2) {
2668 critical_point2 = 0;
2669 }
2670
2671 if (max_stop_req - critical_point2 < 4)
2672 critical_point2 = 0;
2673
2674 }
2675
2676 if (critical_point2 == 0 && rdev->family == CHIP_R300) {
2677 /* some R300 cards have problem with this set to 0 */
2678 critical_point2 = 0x10;
2679 }
2680
2681 WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
2682 (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
2683
2684 if ((rdev->family == CHIP_RS400) ||
2685 (rdev->family == CHIP_RS480)) {
2686#if 0
2687 /* attempt to program RS400 disp2 regs correctly ??? */
2688 temp = RREG32(RS400_DISP2_REQ_CNTL1);
2689 temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK |
2690 RS400_DISP2_STOP_REQ_LEVEL_MASK);
2691 WREG32(RS400_DISP2_REQ_CNTL1, (temp |
2692 (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
2693 (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
2694 temp = RREG32(RS400_DISP2_REQ_CNTL2);
2695 temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK |
2696 RS400_DISP2_CRITICAL_POINT_STOP_MASK);
2697 WREG32(RS400_DISP2_REQ_CNTL2, (temp |
2698 (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) |
2699 (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT)));
2700#endif
2701 WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC);
2702 WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000);
2703 WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC);
2704 WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC);
2705 }
2706
2707 DRM_DEBUG("GRPH2_BUFFER_CNTL from to %x\n",
2708 (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
2709 }
2710}
551ebd83
DA
2711
2712static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
2713{
2714 DRM_ERROR("pitch %d\n", t->pitch);
2715 DRM_ERROR("width %d\n", t->width);
2716 DRM_ERROR("height %d\n", t->height);
2717 DRM_ERROR("num levels %d\n", t->num_levels);
2718 DRM_ERROR("depth %d\n", t->txdepth);
2719 DRM_ERROR("bpp %d\n", t->cpp);
2720 DRM_ERROR("coordinate type %d\n", t->tex_coord_type);
2721 DRM_ERROR("width round to power of 2 %d\n", t->roundup_w);
2722 DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
2723}
2724
2725static int r100_cs_track_cube(struct radeon_device *rdev,
2726 struct r100_cs_track *track, unsigned idx)
2727{
2728 unsigned face, w, h;
2729 struct radeon_object *cube_robj;
2730 unsigned long size;
2731
2732 for (face = 0; face < 5; face++) {
2733 cube_robj = track->textures[idx].cube_info[face].robj;
2734 w = track->textures[idx].cube_info[face].width;
2735 h = track->textures[idx].cube_info[face].height;
2736
2737 size = w * h;
2738 size *= track->textures[idx].cpp;
2739
2740 size += track->textures[idx].cube_info[face].offset;
2741
2742 if (size > radeon_object_size(cube_robj)) {
2743 DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
2744 size, radeon_object_size(cube_robj));
2745 r100_cs_track_texture_print(&track->textures[idx]);
2746 return -1;
2747 }
2748 }
2749 return 0;
2750}
2751
2752static int r100_cs_track_texture_check(struct radeon_device *rdev,
2753 struct r100_cs_track *track)
2754{
2755 struct radeon_object *robj;
2756 unsigned long size;
2757 unsigned u, i, w, h;
2758 int ret;
2759
2760 for (u = 0; u < track->num_texture; u++) {
2761 if (!track->textures[u].enabled)
2762 continue;
2763 robj = track->textures[u].robj;
2764 if (robj == NULL) {
2765 DRM_ERROR("No texture bound to unit %u\n", u);
2766 return -EINVAL;
2767 }
2768 size = 0;
2769 for (i = 0; i <= track->textures[u].num_levels; i++) {
2770 if (track->textures[u].use_pitch) {
2771 if (rdev->family < CHIP_R300)
2772 w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i);
2773 else
2774 w = track->textures[u].pitch / (1 << i);
2775 } else {
2776 w = track->textures[u].width / (1 << i);
2777 if (rdev->family >= CHIP_RV515)
2778 w |= track->textures[u].width_11;
2779 if (track->textures[u].roundup_w)
2780 w = roundup_pow_of_two(w);
2781 }
2782 h = track->textures[u].height / (1 << i);
2783 if (rdev->family >= CHIP_RV515)
2784 h |= track->textures[u].height_11;
2785 if (track->textures[u].roundup_h)
2786 h = roundup_pow_of_two(h);
2787 size += w * h;
2788 }
2789 size *= track->textures[u].cpp;
2790 switch (track->textures[u].tex_coord_type) {
2791 case 0:
2792 break;
2793 case 1:
2794 size *= (1 << track->textures[u].txdepth);
2795 break;
2796 case 2:
2797 if (track->separate_cube) {
2798 ret = r100_cs_track_cube(rdev, track, u);
2799 if (ret)
2800 return ret;
2801 } else
2802 size *= 6;
2803 break;
2804 default:
2805 DRM_ERROR("Invalid texture coordinate type %u for unit "
2806 "%u\n", track->textures[u].tex_coord_type, u);
2807 return -EINVAL;
2808 }
2809 if (size > radeon_object_size(robj)) {
2810 DRM_ERROR("Texture of unit %u needs %lu bytes but is "
2811 "%lu\n", u, size, radeon_object_size(robj));
2812 r100_cs_track_texture_print(&track->textures[u]);
2813 return -EINVAL;
2814 }
2815 }
2816 return 0;
2817}
2818
2819int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
2820{
2821 unsigned i;
2822 unsigned long size;
2823 unsigned prim_walk;
2824 unsigned nverts;
2825
2826 for (i = 0; i < track->num_cb; i++) {
2827 if (track->cb[i].robj == NULL) {
2828 DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
2829 return -EINVAL;
2830 }
2831 size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
2832 size += track->cb[i].offset;
2833 if (size > radeon_object_size(track->cb[i].robj)) {
2834 DRM_ERROR("[drm] Buffer too small for color buffer %d "
2835 "(need %lu have %lu) !\n", i, size,
2836 radeon_object_size(track->cb[i].robj));
2837 DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
2838 i, track->cb[i].pitch, track->cb[i].cpp,
2839 track->cb[i].offset, track->maxy);
2840 return -EINVAL;
2841 }
2842 }
2843 if (track->z_enabled) {
2844 if (track->zb.robj == NULL) {
2845 DRM_ERROR("[drm] No buffer for z buffer !\n");
2846 return -EINVAL;
2847 }
2848 size = track->zb.pitch * track->zb.cpp * track->maxy;
2849 size += track->zb.offset;
2850 if (size > radeon_object_size(track->zb.robj)) {
2851 DRM_ERROR("[drm] Buffer too small for z buffer "
2852 "(need %lu have %lu) !\n", size,
2853 radeon_object_size(track->zb.robj));
2854 DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
2855 track->zb.pitch, track->zb.cpp,
2856 track->zb.offset, track->maxy);
2857 return -EINVAL;
2858 }
2859 }
2860 prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
2861 nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
2862 switch (prim_walk) {
2863 case 1:
2864 for (i = 0; i < track->num_arrays; i++) {
2865 size = track->arrays[i].esize * track->max_indx * 4;
2866 if (track->arrays[i].robj == NULL) {
2867 DRM_ERROR("(PW %u) Vertex array %u no buffer "
2868 "bound\n", prim_walk, i);
2869 return -EINVAL;
2870 }
2871 if (size > radeon_object_size(track->arrays[i].robj)) {
2872 DRM_ERROR("(PW %u) Vertex array %u need %lu dwords "
2873 "have %lu dwords\n", prim_walk, i,
2874 size >> 2,
2875 radeon_object_size(track->arrays[i].robj) >> 2);
2876 DRM_ERROR("Max indices %u\n", track->max_indx);
2877 return -EINVAL;
2878 }
2879 }
2880 break;
2881 case 2:
2882 for (i = 0; i < track->num_arrays; i++) {
2883 size = track->arrays[i].esize * (nverts - 1) * 4;
2884 if (track->arrays[i].robj == NULL) {
2885 DRM_ERROR("(PW %u) Vertex array %u no buffer "
2886 "bound\n", prim_walk, i);
2887 return -EINVAL;
2888 }
2889 if (size > radeon_object_size(track->arrays[i].robj)) {
2890 DRM_ERROR("(PW %u) Vertex array %u need %lu dwords "
2891 "have %lu dwords\n", prim_walk, i, size >> 2,
2892 radeon_object_size(track->arrays[i].robj) >> 2);
2893 return -EINVAL;
2894 }
2895 }
2896 break;
2897 case 3:
2898 size = track->vtx_size * nverts;
2899 if (size != track->immd_dwords) {
2900 DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
2901 track->immd_dwords, size);
2902 DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
2903 nverts, track->vtx_size);
2904 return -EINVAL;
2905 }
2906 break;
2907 default:
2908 DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
2909 prim_walk);
2910 return -EINVAL;
2911 }
2912 return r100_cs_track_texture_check(rdev, track);
2913}
2914
2915void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
2916{
2917 unsigned i, face;
2918
2919 if (rdev->family < CHIP_R300) {
2920 track->num_cb = 1;
2921 if (rdev->family <= CHIP_RS200)
2922 track->num_texture = 3;
2923 else
2924 track->num_texture = 6;
2925 track->maxy = 2048;
2926 track->separate_cube = 1;
2927 } else {
2928 track->num_cb = 4;
2929 track->num_texture = 16;
2930 track->maxy = 4096;
2931 track->separate_cube = 0;
2932 }
2933
2934 for (i = 0; i < track->num_cb; i++) {
2935 track->cb[i].robj = NULL;
2936 track->cb[i].pitch = 8192;
2937 track->cb[i].cpp = 16;
2938 track->cb[i].offset = 0;
2939 }
2940 track->z_enabled = true;
2941 track->zb.robj = NULL;
2942 track->zb.pitch = 8192;
2943 track->zb.cpp = 4;
2944 track->zb.offset = 0;
2945 track->vtx_size = 0x7F;
2946 track->immd_dwords = 0xFFFFFFFFUL;
2947 track->num_arrays = 11;
2948 track->max_indx = 0x00FFFFFFUL;
2949 for (i = 0; i < track->num_arrays; i++) {
2950 track->arrays[i].robj = NULL;
2951 track->arrays[i].esize = 0x7F;
2952 }
2953 for (i = 0; i < track->num_texture; i++) {
2954 track->textures[i].pitch = 16536;
2955 track->textures[i].width = 16536;
2956 track->textures[i].height = 16536;
2957 track->textures[i].width_11 = 1 << 11;
2958 track->textures[i].height_11 = 1 << 11;
2959 track->textures[i].num_levels = 12;
2960 if (rdev->family <= CHIP_RS200) {
2961 track->textures[i].tex_coord_type = 0;
2962 track->textures[i].txdepth = 0;
2963 } else {
2964 track->textures[i].txdepth = 16;
2965 track->textures[i].tex_coord_type = 1;
2966 }
2967 track->textures[i].cpp = 64;
2968 track->textures[i].robj = NULL;
2969 /* CS IB emission code makes sure texture unit are disabled */
2970 track->textures[i].enabled = false;
2971 track->textures[i].roundup_w = true;
2972 track->textures[i].roundup_h = true;
2973 if (track->separate_cube)
2974 for (face = 0; face < 5; face++) {
2975 track->textures[i].cube_info[face].robj = NULL;
2976 track->textures[i].cube_info[face].width = 16536;
2977 track->textures[i].cube_info[face].height = 16536;
2978 track->textures[i].cube_info[face].offset = 0;
2979 }
2980 }
2981}
3ce0a23d
JG
2982
2983int r100_ring_test(struct radeon_device *rdev)
2984{
2985 uint32_t scratch;
2986 uint32_t tmp = 0;
2987 unsigned i;
2988 int r;
2989
2990 r = radeon_scratch_get(rdev, &scratch);
2991 if (r) {
2992 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
2993 return r;
2994 }
2995 WREG32(scratch, 0xCAFEDEAD);
2996 r = radeon_ring_lock(rdev, 2);
2997 if (r) {
2998 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2999 radeon_scratch_free(rdev, scratch);
3000 return r;
3001 }
3002 radeon_ring_write(rdev, PACKET0(scratch, 0));
3003 radeon_ring_write(rdev, 0xDEADBEEF);
3004 radeon_ring_unlock_commit(rdev);
3005 for (i = 0; i < rdev->usec_timeout; i++) {
3006 tmp = RREG32(scratch);
3007 if (tmp == 0xDEADBEEF) {
3008 break;
3009 }
3010 DRM_UDELAY(1);
3011 }
3012 if (i < rdev->usec_timeout) {
3013 DRM_INFO("ring test succeeded in %d usecs\n", i);
3014 } else {
3015 DRM_ERROR("radeon: ring test failed (sracth(0x%04X)=0x%08X)\n",
3016 scratch, tmp);
3017 r = -EINVAL;
3018 }
3019 radeon_scratch_free(rdev, scratch);
3020 return r;
3021}
3022
3023void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3024{
3025 radeon_ring_write(rdev, PACKET0(RADEON_CP_IB_BASE, 1));
3026 radeon_ring_write(rdev, ib->gpu_addr);
3027 radeon_ring_write(rdev, ib->length_dw);
3028}
3029
3030int r100_ib_test(struct radeon_device *rdev)
3031{
3032 struct radeon_ib *ib;
3033 uint32_t scratch;
3034 uint32_t tmp = 0;
3035 unsigned i;
3036 int r;
3037
3038 r = radeon_scratch_get(rdev, &scratch);
3039 if (r) {
3040 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
3041 return r;
3042 }
3043 WREG32(scratch, 0xCAFEDEAD);
3044 r = radeon_ib_get(rdev, &ib);
3045 if (r) {
3046 return r;
3047 }
3048 ib->ptr[0] = PACKET0(scratch, 0);
3049 ib->ptr[1] = 0xDEADBEEF;
3050 ib->ptr[2] = PACKET2(0);
3051 ib->ptr[3] = PACKET2(0);
3052 ib->ptr[4] = PACKET2(0);
3053 ib->ptr[5] = PACKET2(0);
3054 ib->ptr[6] = PACKET2(0);
3055 ib->ptr[7] = PACKET2(0);
3056 ib->length_dw = 8;
3057 r = radeon_ib_schedule(rdev, ib);
3058 if (r) {
3059 radeon_scratch_free(rdev, scratch);
3060 radeon_ib_free(rdev, &ib);
3061 return r;
3062 }
3063 r = radeon_fence_wait(ib->fence, false);
3064 if (r) {
3065 return r;
3066 }
3067 for (i = 0; i < rdev->usec_timeout; i++) {
3068 tmp = RREG32(scratch);
3069 if (tmp == 0xDEADBEEF) {
3070 break;
3071 }
3072 DRM_UDELAY(1);
3073 }
3074 if (i < rdev->usec_timeout) {
3075 DRM_INFO("ib test succeeded in %u usecs\n", i);
3076 } else {
3077 DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
3078 scratch, tmp);
3079 r = -EINVAL;
3080 }
3081 radeon_scratch_free(rdev, scratch);
3082 radeon_ib_free(rdev, &ib);
3083 return r;
3084}