]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/gpu/drm/radeon/r100.c
drm/radeon/kms: add r600 KMS support
[net-next-2.6.git] / drivers / gpu / drm / radeon / r100.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/seq_file.h>
29#include "drmP.h"
30#include "drm.h"
31#include "radeon_drm.h"
771fe6b9
JG
32#include "radeon_reg.h"
33#include "radeon.h"
3ce0a23d
JG
34#include "r100d.h"
35
70967ab9
BH
36#include <linux/firmware.h>
37#include <linux/platform_device.h>
38
551ebd83
DA
39#include "r100_reg_safe.h"
40#include "rn50_reg_safe.h"
41
70967ab9
BH
42/* Firmware Names */
43#define FIRMWARE_R100 "radeon/R100_cp.bin"
44#define FIRMWARE_R200 "radeon/R200_cp.bin"
45#define FIRMWARE_R300 "radeon/R300_cp.bin"
46#define FIRMWARE_R420 "radeon/R420_cp.bin"
47#define FIRMWARE_RS690 "radeon/RS690_cp.bin"
48#define FIRMWARE_RS600 "radeon/RS600_cp.bin"
49#define FIRMWARE_R520 "radeon/R520_cp.bin"
50
51MODULE_FIRMWARE(FIRMWARE_R100);
52MODULE_FIRMWARE(FIRMWARE_R200);
53MODULE_FIRMWARE(FIRMWARE_R300);
54MODULE_FIRMWARE(FIRMWARE_R420);
55MODULE_FIRMWARE(FIRMWARE_RS690);
56MODULE_FIRMWARE(FIRMWARE_RS600);
57MODULE_FIRMWARE(FIRMWARE_R520);
771fe6b9 58
551ebd83
DA
59#include "r100_track.h"
60
771fe6b9
JG
61/* This files gather functions specifics to:
62 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
63 *
64 * Some of these functions might be used by newer ASICs.
65 */
551ebd83 66int r200_init(struct radeon_device *rdev);
771fe6b9
JG
67void r100_hdp_reset(struct radeon_device *rdev);
68void r100_gpu_init(struct radeon_device *rdev);
69int r100_gui_wait_for_idle(struct radeon_device *rdev);
70int r100_mc_wait_for_idle(struct radeon_device *rdev);
71void r100_gpu_wait_for_vsync(struct radeon_device *rdev);
72void r100_gpu_wait_for_vsync2(struct radeon_device *rdev);
73int r100_debugfs_mc_info_init(struct radeon_device *rdev);
74
75
76/*
77 * PCI GART
78 */
79void r100_pci_gart_tlb_flush(struct radeon_device *rdev)
80{
81 /* TODO: can we do somethings here ? */
82 /* It seems hw only cache one entry so we should discard this
83 * entry otherwise if first GPU GART read hit this entry it
84 * could end up in wrong address. */
85}
86
87int r100_pci_gart_enable(struct radeon_device *rdev)
88{
89 uint32_t tmp;
90 int r;
91
92 /* Initialize common gart structure */
93 r = radeon_gart_init(rdev);
94 if (r) {
95 return r;
96 }
97 if (rdev->gart.table.ram.ptr == NULL) {
98 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
99 r = radeon_gart_table_ram_alloc(rdev);
100 if (r) {
101 return r;
102 }
103 }
104 /* discard memory request outside of configured range */
105 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
106 WREG32(RADEON_AIC_CNTL, tmp);
107 /* set address range for PCI address translate */
108 WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location);
109 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
110 WREG32(RADEON_AIC_HI_ADDR, tmp);
111 /* Enable bus mastering */
112 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
113 WREG32(RADEON_BUS_CNTL, tmp);
114 /* set PCI GART page-table base address */
115 WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
116 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
117 WREG32(RADEON_AIC_CNTL, tmp);
118 r100_pci_gart_tlb_flush(rdev);
119 rdev->gart.ready = true;
120 return 0;
121}
122
123void r100_pci_gart_disable(struct radeon_device *rdev)
124{
125 uint32_t tmp;
126
127 /* discard memory request outside of configured range */
128 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
129 WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN);
130 WREG32(RADEON_AIC_LO_ADDR, 0);
131 WREG32(RADEON_AIC_HI_ADDR, 0);
132}
133
134int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
135{
136 if (i < 0 || i > rdev->gart.num_gpu_pages) {
137 return -EINVAL;
138 }
ed10f95d 139 rdev->gart.table.ram.ptr[i] = cpu_to_le32(lower_32_bits(addr));
771fe6b9
JG
140 return 0;
141}
142
143int r100_gart_enable(struct radeon_device *rdev)
144{
145 if (rdev->flags & RADEON_IS_AGP) {
146 r100_pci_gart_disable(rdev);
147 return 0;
148 }
149 return r100_pci_gart_enable(rdev);
150}
151
152
153/*
154 * MC
155 */
156void r100_mc_disable_clients(struct radeon_device *rdev)
157{
158 uint32_t ov0_scale_cntl, crtc_ext_cntl, crtc_gen_cntl, crtc2_gen_cntl;
159
160 /* FIXME: is this function correct for rs100,rs200,rs300 ? */
161 if (r100_gui_wait_for_idle(rdev)) {
162 printk(KERN_WARNING "Failed to wait GUI idle while "
163 "programming pipes. Bad things might happen.\n");
164 }
165
166 /* stop display and memory access */
167 ov0_scale_cntl = RREG32(RADEON_OV0_SCALE_CNTL);
168 WREG32(RADEON_OV0_SCALE_CNTL, ov0_scale_cntl & ~RADEON_SCALER_ENABLE);
169 crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
170 WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl | RADEON_CRTC_DISPLAY_DIS);
171 crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
172
173 r100_gpu_wait_for_vsync(rdev);
174
175 WREG32(RADEON_CRTC_GEN_CNTL,
176 (crtc_gen_cntl & ~(RADEON_CRTC_CUR_EN | RADEON_CRTC_ICON_EN)) |
177 RADEON_CRTC_DISP_REQ_EN_B | RADEON_CRTC_EXT_DISP_EN);
178
179 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
180 crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
181
182 r100_gpu_wait_for_vsync2(rdev);
183 WREG32(RADEON_CRTC2_GEN_CNTL,
184 (crtc2_gen_cntl &
185 ~(RADEON_CRTC2_CUR_EN | RADEON_CRTC2_ICON_EN)) |
186 RADEON_CRTC2_DISP_REQ_EN_B);
187 }
188
189 udelay(500);
190}
191
192void r100_mc_setup(struct radeon_device *rdev)
193{
194 uint32_t tmp;
195 int r;
196
197 r = r100_debugfs_mc_info_init(rdev);
198 if (r) {
199 DRM_ERROR("Failed to register debugfs file for R100 MC !\n");
200 }
201 /* Write VRAM size in case we are limiting it */
7a50f01a
DA
202 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
203 /* Novell bug 204882 for RN50/M6/M7 with 8/16/32MB VRAM,
204 * if the aperture is 64MB but we have 32MB VRAM
205 * we report only 32MB VRAM but we have to set MC_FB_LOCATION
206 * to 64MB, otherwise the gpu accidentially dies */
207 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
771fe6b9
JG
208 tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16);
209 tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16);
210 WREG32(RADEON_MC_FB_LOCATION, tmp);
211
212 /* Enable bus mastering */
213 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
214 WREG32(RADEON_BUS_CNTL, tmp);
215
216 if (rdev->flags & RADEON_IS_AGP) {
217 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
218 tmp = REG_SET(RADEON_MC_AGP_TOP, tmp >> 16);
219 tmp |= REG_SET(RADEON_MC_AGP_START, rdev->mc.gtt_location >> 16);
220 WREG32(RADEON_MC_AGP_LOCATION, tmp);
221 WREG32(RADEON_AGP_BASE, rdev->mc.agp_base);
222 } else {
223 WREG32(RADEON_MC_AGP_LOCATION, 0x0FFFFFFF);
224 WREG32(RADEON_AGP_BASE, 0);
225 }
226
227 tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
228 tmp |= (7 << 28);
229 WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
230 (void)RREG32(RADEON_HOST_PATH_CNTL);
231 WREG32(RADEON_HOST_PATH_CNTL, tmp);
232 (void)RREG32(RADEON_HOST_PATH_CNTL);
233}
234
235int r100_mc_init(struct radeon_device *rdev)
236{
237 int r;
238
239 if (r100_debugfs_rbbm_init(rdev)) {
240 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
241 }
242
243 r100_gpu_init(rdev);
244 /* Disable gart which also disable out of gart access */
245 r100_pci_gart_disable(rdev);
246
247 /* Setup GPU memory space */
771fe6b9
JG
248 rdev->mc.gtt_location = 0xFFFFFFFFUL;
249 if (rdev->flags & RADEON_IS_AGP) {
250 r = radeon_agp_init(rdev);
251 if (r) {
252 printk(KERN_WARNING "[drm] Disabling AGP\n");
253 rdev->flags &= ~RADEON_IS_AGP;
254 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
255 } else {
256 rdev->mc.gtt_location = rdev->mc.agp_base;
257 }
258 }
259 r = radeon_mc_setup(rdev);
260 if (r) {
261 return r;
262 }
263
264 r100_mc_disable_clients(rdev);
265 if (r100_mc_wait_for_idle(rdev)) {
266 printk(KERN_WARNING "Failed to wait MC idle while "
267 "programming pipes. Bad things might happen.\n");
268 }
269
270 r100_mc_setup(rdev);
271 return 0;
272}
273
274void r100_mc_fini(struct radeon_device *rdev)
275{
276 r100_pci_gart_disable(rdev);
277 radeon_gart_table_ram_free(rdev);
278 radeon_gart_fini(rdev);
279}
280
281
7ed220d7
MD
282/*
283 * Interrupts
284 */
285int r100_irq_set(struct radeon_device *rdev)
286{
287 uint32_t tmp = 0;
288
289 if (rdev->irq.sw_int) {
290 tmp |= RADEON_SW_INT_ENABLE;
291 }
292 if (rdev->irq.crtc_vblank_int[0]) {
293 tmp |= RADEON_CRTC_VBLANK_MASK;
294 }
295 if (rdev->irq.crtc_vblank_int[1]) {
296 tmp |= RADEON_CRTC2_VBLANK_MASK;
297 }
298 WREG32(RADEON_GEN_INT_CNTL, tmp);
299 return 0;
300}
301
302static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
303{
304 uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
305 uint32_t irq_mask = RADEON_SW_INT_TEST | RADEON_CRTC_VBLANK_STAT |
306 RADEON_CRTC2_VBLANK_STAT;
307
308 if (irqs) {
309 WREG32(RADEON_GEN_INT_STATUS, irqs);
310 }
311 return irqs & irq_mask;
312}
313
314int r100_irq_process(struct radeon_device *rdev)
315{
316 uint32_t status;
317
318 status = r100_irq_ack(rdev);
319 if (!status) {
320 return IRQ_NONE;
321 }
322 while (status) {
323 /* SW interrupt */
324 if (status & RADEON_SW_INT_TEST) {
325 radeon_fence_process(rdev);
326 }
327 /* Vertical blank interrupts */
328 if (status & RADEON_CRTC_VBLANK_STAT) {
329 drm_handle_vblank(rdev->ddev, 0);
330 }
331 if (status & RADEON_CRTC2_VBLANK_STAT) {
332 drm_handle_vblank(rdev->ddev, 1);
333 }
334 status = r100_irq_ack(rdev);
335 }
336 return IRQ_HANDLED;
337}
338
339u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
340{
341 if (crtc == 0)
342 return RREG32(RADEON_CRTC_CRNT_FRAME);
343 else
344 return RREG32(RADEON_CRTC2_CRNT_FRAME);
345}
346
347
771fe6b9
JG
348/*
349 * Fence emission
350 */
351void r100_fence_ring_emit(struct radeon_device *rdev,
352 struct radeon_fence *fence)
353{
354 /* Who ever call radeon_fence_emit should call ring_lock and ask
355 * for enough space (today caller are ib schedule and buffer move) */
356 /* Wait until IDLE & CLEAN */
357 radeon_ring_write(rdev, PACKET0(0x1720, 0));
358 radeon_ring_write(rdev, (1 << 16) | (1 << 17));
359 /* Emit fence sequence & fire IRQ */
360 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
361 radeon_ring_write(rdev, fence->seq);
362 radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
363 radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
364}
365
366
367/*
368 * Writeback
369 */
370int r100_wb_init(struct radeon_device *rdev)
371{
372 int r;
373
374 if (rdev->wb.wb_obj == NULL) {
375 r = radeon_object_create(rdev, NULL, 4096,
376 true,
377 RADEON_GEM_DOMAIN_GTT,
378 false, &rdev->wb.wb_obj);
379 if (r) {
380 DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r);
381 return r;
382 }
383 r = radeon_object_pin(rdev->wb.wb_obj,
384 RADEON_GEM_DOMAIN_GTT,
385 &rdev->wb.gpu_addr);
386 if (r) {
387 DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r);
388 return r;
389 }
390 r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
391 if (r) {
392 DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r);
393 return r;
394 }
395 }
3ce0a23d
JG
396 WREG32(RADEON_SCRATCH_ADDR, rdev->wb.gpu_addr);
397 WREG32(RADEON_CP_RB_RPTR_ADDR, rdev->wb.gpu_addr + 1024);
398 WREG32(RADEON_SCRATCH_UMSK, 0xff);
771fe6b9
JG
399 return 0;
400}
401
402void r100_wb_fini(struct radeon_device *rdev)
403{
404 if (rdev->wb.wb_obj) {
405 radeon_object_kunmap(rdev->wb.wb_obj);
406 radeon_object_unpin(rdev->wb.wb_obj);
407 radeon_object_unref(&rdev->wb.wb_obj);
408 rdev->wb.wb = NULL;
409 rdev->wb.wb_obj = NULL;
410 }
411}
412
413int r100_copy_blit(struct radeon_device *rdev,
414 uint64_t src_offset,
415 uint64_t dst_offset,
416 unsigned num_pages,
417 struct radeon_fence *fence)
418{
419 uint32_t cur_pages;
420 uint32_t stride_bytes = PAGE_SIZE;
421 uint32_t pitch;
422 uint32_t stride_pixels;
423 unsigned ndw;
424 int num_loops;
425 int r = 0;
426
427 /* radeon limited to 16k stride */
428 stride_bytes &= 0x3fff;
429 /* radeon pitch is /64 */
430 pitch = stride_bytes / 64;
431 stride_pixels = stride_bytes / 4;
432 num_loops = DIV_ROUND_UP(num_pages, 8191);
433
434 /* Ask for enough room for blit + flush + fence */
435 ndw = 64 + (10 * num_loops);
436 r = radeon_ring_lock(rdev, ndw);
437 if (r) {
438 DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
439 return -EINVAL;
440 }
441 while (num_pages > 0) {
442 cur_pages = num_pages;
443 if (cur_pages > 8191) {
444 cur_pages = 8191;
445 }
446 num_pages -= cur_pages;
447
448 /* pages are in Y direction - height
449 page width in X direction - width */
450 radeon_ring_write(rdev, PACKET3(PACKET3_BITBLT_MULTI, 8));
451 radeon_ring_write(rdev,
452 RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
453 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
454 RADEON_GMC_SRC_CLIPPING |
455 RADEON_GMC_DST_CLIPPING |
456 RADEON_GMC_BRUSH_NONE |
457 (RADEON_COLOR_FORMAT_ARGB8888 << 8) |
458 RADEON_GMC_SRC_DATATYPE_COLOR |
459 RADEON_ROP3_S |
460 RADEON_DP_SRC_SOURCE_MEMORY |
461 RADEON_GMC_CLR_CMP_CNTL_DIS |
462 RADEON_GMC_WR_MSK_DIS);
463 radeon_ring_write(rdev, (pitch << 22) | (src_offset >> 10));
464 radeon_ring_write(rdev, (pitch << 22) | (dst_offset >> 10));
465 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
466 radeon_ring_write(rdev, 0);
467 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
468 radeon_ring_write(rdev, num_pages);
469 radeon_ring_write(rdev, num_pages);
470 radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));
471 }
472 radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
473 radeon_ring_write(rdev, RADEON_RB2D_DC_FLUSH_ALL);
474 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
475 radeon_ring_write(rdev,
476 RADEON_WAIT_2D_IDLECLEAN |
477 RADEON_WAIT_HOST_IDLECLEAN |
478 RADEON_WAIT_DMA_GUI_IDLE);
479 if (fence) {
480 r = radeon_fence_emit(rdev, fence);
481 }
482 radeon_ring_unlock_commit(rdev);
483 return r;
484}
485
486
487/*
488 * CP
489 */
490void r100_ring_start(struct radeon_device *rdev)
491{
492 int r;
493
494 r = radeon_ring_lock(rdev, 2);
495 if (r) {
496 return;
497 }
498 radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
499 radeon_ring_write(rdev,
500 RADEON_ISYNC_ANY2D_IDLE3D |
501 RADEON_ISYNC_ANY3D_IDLE2D |
502 RADEON_ISYNC_WAIT_IDLEGUI |
503 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
504 radeon_ring_unlock_commit(rdev);
505}
506
70967ab9
BH
507
508/* Load the microcode for the CP */
509static int r100_cp_init_microcode(struct radeon_device *rdev)
771fe6b9 510{
70967ab9
BH
511 struct platform_device *pdev;
512 const char *fw_name = NULL;
513 int err;
771fe6b9 514
70967ab9 515 DRM_DEBUG("\n");
771fe6b9 516
70967ab9
BH
517 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
518 err = IS_ERR(pdev);
519 if (err) {
520 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
521 return -EINVAL;
522 }
771fe6b9
JG
523 if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
524 (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) ||
525 (rdev->family == CHIP_RS200)) {
526 DRM_INFO("Loading R100 Microcode\n");
70967ab9 527 fw_name = FIRMWARE_R100;
771fe6b9
JG
528 } else if ((rdev->family == CHIP_R200) ||
529 (rdev->family == CHIP_RV250) ||
530 (rdev->family == CHIP_RV280) ||
531 (rdev->family == CHIP_RS300)) {
532 DRM_INFO("Loading R200 Microcode\n");
70967ab9 533 fw_name = FIRMWARE_R200;
771fe6b9
JG
534 } else if ((rdev->family == CHIP_R300) ||
535 (rdev->family == CHIP_R350) ||
536 (rdev->family == CHIP_RV350) ||
537 (rdev->family == CHIP_RV380) ||
538 (rdev->family == CHIP_RS400) ||
539 (rdev->family == CHIP_RS480)) {
540 DRM_INFO("Loading R300 Microcode\n");
70967ab9 541 fw_name = FIRMWARE_R300;
771fe6b9
JG
542 } else if ((rdev->family == CHIP_R420) ||
543 (rdev->family == CHIP_R423) ||
544 (rdev->family == CHIP_RV410)) {
545 DRM_INFO("Loading R400 Microcode\n");
70967ab9 546 fw_name = FIRMWARE_R420;
771fe6b9
JG
547 } else if ((rdev->family == CHIP_RS690) ||
548 (rdev->family == CHIP_RS740)) {
549 DRM_INFO("Loading RS690/RS740 Microcode\n");
70967ab9 550 fw_name = FIRMWARE_RS690;
771fe6b9
JG
551 } else if (rdev->family == CHIP_RS600) {
552 DRM_INFO("Loading RS600 Microcode\n");
70967ab9 553 fw_name = FIRMWARE_RS600;
771fe6b9
JG
554 } else if ((rdev->family == CHIP_RV515) ||
555 (rdev->family == CHIP_R520) ||
556 (rdev->family == CHIP_RV530) ||
557 (rdev->family == CHIP_R580) ||
558 (rdev->family == CHIP_RV560) ||
559 (rdev->family == CHIP_RV570)) {
560 DRM_INFO("Loading R500 Microcode\n");
70967ab9
BH
561 fw_name = FIRMWARE_R520;
562 }
563
3ce0a23d 564 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
70967ab9
BH
565 platform_device_unregister(pdev);
566 if (err) {
567 printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n",
568 fw_name);
3ce0a23d 569 } else if (rdev->me_fw->size % 8) {
70967ab9
BH
570 printk(KERN_ERR
571 "radeon_cp: Bogus length %zu in firmware \"%s\"\n",
3ce0a23d 572 rdev->me_fw->size, fw_name);
70967ab9 573 err = -EINVAL;
3ce0a23d
JG
574 release_firmware(rdev->me_fw);
575 rdev->me_fw = NULL;
70967ab9
BH
576 }
577 return err;
578}
579static void r100_cp_load_microcode(struct radeon_device *rdev)
580{
581 const __be32 *fw_data;
582 int i, size;
583
584 if (r100_gui_wait_for_idle(rdev)) {
585 printk(KERN_WARNING "Failed to wait GUI idle while "
586 "programming pipes. Bad things might happen.\n");
587 }
588
3ce0a23d
JG
589 if (rdev->me_fw) {
590 size = rdev->me_fw->size / 4;
591 fw_data = (const __be32 *)&rdev->me_fw->data[0];
70967ab9
BH
592 WREG32(RADEON_CP_ME_RAM_ADDR, 0);
593 for (i = 0; i < size; i += 2) {
594 WREG32(RADEON_CP_ME_RAM_DATAH,
595 be32_to_cpup(&fw_data[i]));
596 WREG32(RADEON_CP_ME_RAM_DATAL,
597 be32_to_cpup(&fw_data[i + 1]));
771fe6b9
JG
598 }
599 }
600}
601
602int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
603{
604 unsigned rb_bufsz;
605 unsigned rb_blksz;
606 unsigned max_fetch;
607 unsigned pre_write_timer;
608 unsigned pre_write_limit;
609 unsigned indirect2_start;
610 unsigned indirect1_start;
611 uint32_t tmp;
612 int r;
613
614 if (r100_debugfs_cp_init(rdev)) {
615 DRM_ERROR("Failed to register debugfs file for CP !\n");
616 }
617 /* Reset CP */
618 tmp = RREG32(RADEON_CP_CSQ_STAT);
619 if ((tmp & (1 << 31))) {
620 DRM_INFO("radeon: cp busy (0x%08X) resetting\n", tmp);
621 WREG32(RADEON_CP_CSQ_MODE, 0);
622 WREG32(RADEON_CP_CSQ_CNTL, 0);
623 WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
624 tmp = RREG32(RADEON_RBBM_SOFT_RESET);
625 mdelay(2);
626 WREG32(RADEON_RBBM_SOFT_RESET, 0);
627 tmp = RREG32(RADEON_RBBM_SOFT_RESET);
628 mdelay(2);
629 tmp = RREG32(RADEON_CP_CSQ_STAT);
630 if ((tmp & (1 << 31))) {
631 DRM_INFO("radeon: cp reset failed (0x%08X)\n", tmp);
632 }
633 } else {
634 DRM_INFO("radeon: cp idle (0x%08X)\n", tmp);
635 }
70967ab9 636
3ce0a23d 637 if (!rdev->me_fw) {
70967ab9
BH
638 r = r100_cp_init_microcode(rdev);
639 if (r) {
640 DRM_ERROR("Failed to load firmware!\n");
641 return r;
642 }
643 }
644
771fe6b9
JG
645 /* Align ring size */
646 rb_bufsz = drm_order(ring_size / 8);
647 ring_size = (1 << (rb_bufsz + 1)) * 4;
648 r100_cp_load_microcode(rdev);
649 r = radeon_ring_init(rdev, ring_size);
650 if (r) {
651 return r;
652 }
653 /* Each time the cp read 1024 bytes (16 dword/quadword) update
654 * the rptr copy in system ram */
655 rb_blksz = 9;
656 /* cp will read 128bytes at a time (4 dwords) */
657 max_fetch = 1;
658 rdev->cp.align_mask = 16 - 1;
659 /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
660 pre_write_timer = 64;
661 /* Force CP_RB_WPTR write if written more than one time before the
662 * delay expire
663 */
664 pre_write_limit = 0;
665 /* Setup the cp cache like this (cache size is 96 dwords) :
666 * RING 0 to 15
667 * INDIRECT1 16 to 79
668 * INDIRECT2 80 to 95
669 * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
670 * indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords))
671 * indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
672 * Idea being that most of the gpu cmd will be through indirect1 buffer
673 * so it gets the bigger cache.
674 */
675 indirect2_start = 80;
676 indirect1_start = 16;
677 /* cp setup */
678 WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
679 WREG32(RADEON_CP_RB_CNTL,
4e484e7d
MD
680#ifdef __BIG_ENDIAN
681 RADEON_BUF_SWAP_32BIT |
682#endif
771fe6b9
JG
683 REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
684 REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
685 REG_SET(RADEON_MAX_FETCH, max_fetch) |
686 RADEON_RB_NO_UPDATE);
687 /* Set ring address */
688 DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr);
689 WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr);
690 /* Force read & write ptr to 0 */
691 tmp = RREG32(RADEON_CP_RB_CNTL);
692 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
693 WREG32(RADEON_CP_RB_RPTR_WR, 0);
694 WREG32(RADEON_CP_RB_WPTR, 0);
695 WREG32(RADEON_CP_RB_CNTL, tmp);
696 udelay(10);
697 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
698 rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR);
699 /* Set cp mode to bus mastering & enable cp*/
700 WREG32(RADEON_CP_CSQ_MODE,
701 REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
702 REG_SET(RADEON_INDIRECT1_START, indirect1_start));
703 WREG32(0x718, 0);
704 WREG32(0x744, 0x00004D4D);
705 WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
706 radeon_ring_start(rdev);
707 r = radeon_ring_test(rdev);
708 if (r) {
709 DRM_ERROR("radeon: cp isn't working (%d).\n", r);
710 return r;
711 }
712 rdev->cp.ready = true;
713 return 0;
714}
715
716void r100_cp_fini(struct radeon_device *rdev)
717{
718 /* Disable ring */
719 rdev->cp.ready = false;
720 WREG32(RADEON_CP_CSQ_CNTL, 0);
721 radeon_ring_fini(rdev);
722 DRM_INFO("radeon: cp finalized\n");
723}
724
725void r100_cp_disable(struct radeon_device *rdev)
726{
727 /* Disable ring */
728 rdev->cp.ready = false;
729 WREG32(RADEON_CP_CSQ_MODE, 0);
730 WREG32(RADEON_CP_CSQ_CNTL, 0);
731 if (r100_gui_wait_for_idle(rdev)) {
732 printk(KERN_WARNING "Failed to wait GUI idle while "
733 "programming pipes. Bad things might happen.\n");
734 }
735}
736
737int r100_cp_reset(struct radeon_device *rdev)
738{
739 uint32_t tmp;
740 bool reinit_cp;
741 int i;
742
743 reinit_cp = rdev->cp.ready;
744 rdev->cp.ready = false;
745 WREG32(RADEON_CP_CSQ_MODE, 0);
746 WREG32(RADEON_CP_CSQ_CNTL, 0);
747 WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
748 (void)RREG32(RADEON_RBBM_SOFT_RESET);
749 udelay(200);
750 WREG32(RADEON_RBBM_SOFT_RESET, 0);
751 /* Wait to prevent race in RBBM_STATUS */
752 mdelay(1);
753 for (i = 0; i < rdev->usec_timeout; i++) {
754 tmp = RREG32(RADEON_RBBM_STATUS);
755 if (!(tmp & (1 << 16))) {
756 DRM_INFO("CP reset succeed (RBBM_STATUS=0x%08X)\n",
757 tmp);
758 if (reinit_cp) {
759 return r100_cp_init(rdev, rdev->cp.ring_size);
760 }
761 return 0;
762 }
763 DRM_UDELAY(1);
764 }
765 tmp = RREG32(RADEON_RBBM_STATUS);
766 DRM_ERROR("Failed to reset CP (RBBM_STATUS=0x%08X)!\n", tmp);
767 return -1;
768}
769
3ce0a23d
JG
770void r100_cp_commit(struct radeon_device *rdev)
771{
772 WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
773 (void)RREG32(RADEON_CP_RB_WPTR);
774}
775
771fe6b9
JG
776
777/*
778 * CS functions
779 */
780int r100_cs_parse_packet0(struct radeon_cs_parser *p,
781 struct radeon_cs_packet *pkt,
068a117c 782 const unsigned *auth, unsigned n,
771fe6b9
JG
783 radeon_packet0_check_t check)
784{
785 unsigned reg;
786 unsigned i, j, m;
787 unsigned idx;
788 int r;
789
790 idx = pkt->idx + 1;
791 reg = pkt->reg;
068a117c
JG
792 /* Check that register fall into register range
793 * determined by the number of entry (n) in the
794 * safe register bitmap.
795 */
771fe6b9
JG
796 if (pkt->one_reg_wr) {
797 if ((reg >> 7) > n) {
798 return -EINVAL;
799 }
800 } else {
801 if (((reg + (pkt->count << 2)) >> 7) > n) {
802 return -EINVAL;
803 }
804 }
805 for (i = 0; i <= pkt->count; i++, idx++) {
806 j = (reg >> 7);
807 m = 1 << ((reg >> 2) & 31);
808 if (auth[j] & m) {
809 r = check(p, pkt, idx, reg);
810 if (r) {
811 return r;
812 }
813 }
814 if (pkt->one_reg_wr) {
815 if (!(auth[j] & m)) {
816 break;
817 }
818 } else {
819 reg += 4;
820 }
821 }
822 return 0;
823}
824
771fe6b9
JG
825void r100_cs_dump_packet(struct radeon_cs_parser *p,
826 struct radeon_cs_packet *pkt)
827{
828 struct radeon_cs_chunk *ib_chunk;
829 volatile uint32_t *ib;
830 unsigned i;
831 unsigned idx;
832
833 ib = p->ib->ptr;
834 ib_chunk = &p->chunks[p->chunk_ib_idx];
835 idx = pkt->idx;
836 for (i = 0; i <= (pkt->count + 1); i++, idx++) {
837 DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
838 }
839}
840
841/**
842 * r100_cs_packet_parse() - parse cp packet and point ib index to next packet
843 * @parser: parser structure holding parsing context.
844 * @pkt: where to store packet informations
845 *
846 * Assume that chunk_ib_index is properly set. Will return -EINVAL
847 * if packet is bigger than remaining ib size. or if packets is unknown.
848 **/
849int r100_cs_packet_parse(struct radeon_cs_parser *p,
850 struct radeon_cs_packet *pkt,
851 unsigned idx)
852{
853 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
fa99239c 854 uint32_t header;
771fe6b9
JG
855
856 if (idx >= ib_chunk->length_dw) {
857 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
858 idx, ib_chunk->length_dw);
859 return -EINVAL;
860 }
fa99239c 861 header = ib_chunk->kdata[idx];
771fe6b9
JG
862 pkt->idx = idx;
863 pkt->type = CP_PACKET_GET_TYPE(header);
864 pkt->count = CP_PACKET_GET_COUNT(header);
865 switch (pkt->type) {
866 case PACKET_TYPE0:
867 pkt->reg = CP_PACKET0_GET_REG(header);
868 pkt->one_reg_wr = CP_PACKET0_GET_ONE_REG_WR(header);
869 break;
870 case PACKET_TYPE3:
871 pkt->opcode = CP_PACKET3_GET_OPCODE(header);
872 break;
873 case PACKET_TYPE2:
874 pkt->count = -1;
875 break;
876 default:
877 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
878 return -EINVAL;
879 }
880 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
881 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
882 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
883 return -EINVAL;
884 }
885 return 0;
886}
887
531369e6
DA
888/**
889 * r100_cs_packet_next_vline() - parse userspace VLINE packet
890 * @parser: parser structure holding parsing context.
891 *
892 * Userspace sends a special sequence for VLINE waits.
893 * PACKET0 - VLINE_START_END + value
894 * PACKET0 - WAIT_UNTIL +_value
895 * RELOC (P3) - crtc_id in reloc.
896 *
897 * This function parses this and relocates the VLINE START END
898 * and WAIT UNTIL packets to the correct crtc.
899 * It also detects a switched off crtc and nulls out the
900 * wait in that case.
901 */
902int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
903{
904 struct radeon_cs_chunk *ib_chunk;
905 struct drm_mode_object *obj;
906 struct drm_crtc *crtc;
907 struct radeon_crtc *radeon_crtc;
908 struct radeon_cs_packet p3reloc, waitreloc;
909 int crtc_id;
910 int r;
911 uint32_t header, h_idx, reg;
912
913 ib_chunk = &p->chunks[p->chunk_ib_idx];
914
915 /* parse the wait until */
916 r = r100_cs_packet_parse(p, &waitreloc, p->idx);
917 if (r)
918 return r;
919
920 /* check its a wait until and only 1 count */
921 if (waitreloc.reg != RADEON_WAIT_UNTIL ||
922 waitreloc.count != 0) {
923 DRM_ERROR("vline wait had illegal wait until segment\n");
924 r = -EINVAL;
925 return r;
926 }
927
928 if (ib_chunk->kdata[waitreloc.idx + 1] != RADEON_WAIT_CRTC_VLINE) {
929 DRM_ERROR("vline wait had illegal wait until\n");
930 r = -EINVAL;
931 return r;
932 }
933
934 /* jump over the NOP */
935 r = r100_cs_packet_parse(p, &p3reloc, p->idx);
936 if (r)
937 return r;
938
939 h_idx = p->idx - 2;
940 p->idx += waitreloc.count;
941 p->idx += p3reloc.count;
942
943 header = ib_chunk->kdata[h_idx];
944 crtc_id = ib_chunk->kdata[h_idx + 5];
945 reg = ib_chunk->kdata[h_idx] >> 2;
946 mutex_lock(&p->rdev->ddev->mode_config.mutex);
947 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
948 if (!obj) {
949 DRM_ERROR("cannot find crtc %d\n", crtc_id);
950 r = -EINVAL;
951 goto out;
952 }
953 crtc = obj_to_crtc(obj);
954 radeon_crtc = to_radeon_crtc(crtc);
955 crtc_id = radeon_crtc->crtc_id;
956
957 if (!crtc->enabled) {
958 /* if the CRTC isn't enabled - we need to nop out the wait until */
959 ib_chunk->kdata[h_idx + 2] = PACKET2(0);
960 ib_chunk->kdata[h_idx + 3] = PACKET2(0);
961 } else if (crtc_id == 1) {
962 switch (reg) {
963 case AVIVO_D1MODE_VLINE_START_END:
964 header &= R300_CP_PACKET0_REG_MASK;
965 header |= AVIVO_D2MODE_VLINE_START_END >> 2;
966 break;
967 case RADEON_CRTC_GUI_TRIG_VLINE:
968 header &= R300_CP_PACKET0_REG_MASK;
969 header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2;
970 break;
971 default:
972 DRM_ERROR("unknown crtc reloc\n");
973 r = -EINVAL;
974 goto out;
975 }
976 ib_chunk->kdata[h_idx] = header;
977 ib_chunk->kdata[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
978 }
979out:
980 mutex_unlock(&p->rdev->ddev->mode_config.mutex);
981 return r;
982}
983
771fe6b9
JG
984/**
985 * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3
986 * @parser: parser structure holding parsing context.
987 * @data: pointer to relocation data
988 * @offset_start: starting offset
989 * @offset_mask: offset mask (to align start offset on)
990 * @reloc: reloc informations
991 *
992 * Check next packet is relocation packet3, do bo validation and compute
993 * GPU offset using the provided start.
994 **/
995int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
996 struct radeon_cs_reloc **cs_reloc)
997{
998 struct radeon_cs_chunk *ib_chunk;
999 struct radeon_cs_chunk *relocs_chunk;
1000 struct radeon_cs_packet p3reloc;
1001 unsigned idx;
1002 int r;
1003
1004 if (p->chunk_relocs_idx == -1) {
1005 DRM_ERROR("No relocation chunk !\n");
1006 return -EINVAL;
1007 }
1008 *cs_reloc = NULL;
1009 ib_chunk = &p->chunks[p->chunk_ib_idx];
1010 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
1011 r = r100_cs_packet_parse(p, &p3reloc, p->idx);
1012 if (r) {
1013 return r;
1014 }
1015 p->idx += p3reloc.count + 2;
1016 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
1017 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
1018 p3reloc.idx);
1019 r100_cs_dump_packet(p, &p3reloc);
1020 return -EINVAL;
1021 }
1022 idx = ib_chunk->kdata[p3reloc.idx + 1];
1023 if (idx >= relocs_chunk->length_dw) {
1024 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
1025 idx, relocs_chunk->length_dw);
1026 r100_cs_dump_packet(p, &p3reloc);
1027 return -EINVAL;
1028 }
1029 /* FIXME: we assume reloc size is 4 dwords */
1030 *cs_reloc = p->relocs_ptr[(idx / 4)];
1031 return 0;
1032}
1033
551ebd83
DA
1034static int r100_get_vtx_size(uint32_t vtx_fmt)
1035{
1036 int vtx_size;
1037 vtx_size = 2;
1038 /* ordered according to bits in spec */
1039 if (vtx_fmt & RADEON_SE_VTX_FMT_W0)
1040 vtx_size++;
1041 if (vtx_fmt & RADEON_SE_VTX_FMT_FPCOLOR)
1042 vtx_size += 3;
1043 if (vtx_fmt & RADEON_SE_VTX_FMT_FPALPHA)
1044 vtx_size++;
1045 if (vtx_fmt & RADEON_SE_VTX_FMT_PKCOLOR)
1046 vtx_size++;
1047 if (vtx_fmt & RADEON_SE_VTX_FMT_FPSPEC)
1048 vtx_size += 3;
1049 if (vtx_fmt & RADEON_SE_VTX_FMT_FPFOG)
1050 vtx_size++;
1051 if (vtx_fmt & RADEON_SE_VTX_FMT_PKSPEC)
1052 vtx_size++;
1053 if (vtx_fmt & RADEON_SE_VTX_FMT_ST0)
1054 vtx_size += 2;
1055 if (vtx_fmt & RADEON_SE_VTX_FMT_ST1)
1056 vtx_size += 2;
1057 if (vtx_fmt & RADEON_SE_VTX_FMT_Q1)
1058 vtx_size++;
1059 if (vtx_fmt & RADEON_SE_VTX_FMT_ST2)
1060 vtx_size += 2;
1061 if (vtx_fmt & RADEON_SE_VTX_FMT_Q2)
1062 vtx_size++;
1063 if (vtx_fmt & RADEON_SE_VTX_FMT_ST3)
1064 vtx_size += 2;
1065 if (vtx_fmt & RADEON_SE_VTX_FMT_Q3)
1066 vtx_size++;
1067 if (vtx_fmt & RADEON_SE_VTX_FMT_Q0)
1068 vtx_size++;
1069 /* blend weight */
1070 if (vtx_fmt & (0x7 << 15))
1071 vtx_size += (vtx_fmt >> 15) & 0x7;
1072 if (vtx_fmt & RADEON_SE_VTX_FMT_N0)
1073 vtx_size += 3;
1074 if (vtx_fmt & RADEON_SE_VTX_FMT_XY1)
1075 vtx_size += 2;
1076 if (vtx_fmt & RADEON_SE_VTX_FMT_Z1)
1077 vtx_size++;
1078 if (vtx_fmt & RADEON_SE_VTX_FMT_W1)
1079 vtx_size++;
1080 if (vtx_fmt & RADEON_SE_VTX_FMT_N1)
1081 vtx_size++;
1082 if (vtx_fmt & RADEON_SE_VTX_FMT_Z)
1083 vtx_size++;
1084 return vtx_size;
1085}
1086
771fe6b9 1087static int r100_packet0_check(struct radeon_cs_parser *p,
551ebd83
DA
1088 struct radeon_cs_packet *pkt,
1089 unsigned idx, unsigned reg)
771fe6b9
JG
1090{
1091 struct radeon_cs_chunk *ib_chunk;
1092 struct radeon_cs_reloc *reloc;
551ebd83 1093 struct r100_cs_track *track;
771fe6b9
JG
1094 volatile uint32_t *ib;
1095 uint32_t tmp;
771fe6b9 1096 int r;
551ebd83 1097 int i, face;
e024e110 1098 u32 tile_flags = 0;
771fe6b9
JG
1099
1100 ib = p->ib->ptr;
1101 ib_chunk = &p->chunks[p->chunk_ib_idx];
551ebd83
DA
1102 track = (struct r100_cs_track *)p->track;
1103
1104 switch (reg) {
1105 case RADEON_CRTC_GUI_TRIG_VLINE:
1106 r = r100_cs_packet_parse_vline(p);
1107 if (r) {
1108 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1109 idx, reg);
1110 r100_cs_dump_packet(p, pkt);
1111 return r;
1112 }
1113 break;
771fe6b9
JG
1114 /* FIXME: only allow PACKET3 blit? easier to check for out of
1115 * range access */
551ebd83
DA
1116 case RADEON_DST_PITCH_OFFSET:
1117 case RADEON_SRC_PITCH_OFFSET:
1118 r = r100_reloc_pitch_offset(p, pkt, idx, reg);
1119 if (r)
1120 return r;
1121 break;
1122 case RADEON_RB3D_DEPTHOFFSET:
1123 r = r100_cs_packet_next_reloc(p, &reloc);
1124 if (r) {
1125 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1126 idx, reg);
1127 r100_cs_dump_packet(p, pkt);
1128 return r;
1129 }
1130 track->zb.robj = reloc->robj;
1131 track->zb.offset = ib_chunk->kdata[idx];
1132 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1133 break;
1134 case RADEON_RB3D_COLOROFFSET:
1135 r = r100_cs_packet_next_reloc(p, &reloc);
1136 if (r) {
1137 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1138 idx, reg);
1139 r100_cs_dump_packet(p, pkt);
1140 return r;
1141 }
1142 track->cb[0].robj = reloc->robj;
1143 track->cb[0].offset = ib_chunk->kdata[idx];
1144 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1145 break;
1146 case RADEON_PP_TXOFFSET_0:
1147 case RADEON_PP_TXOFFSET_1:
1148 case RADEON_PP_TXOFFSET_2:
1149 i = (reg - RADEON_PP_TXOFFSET_0) / 24;
1150 r = r100_cs_packet_next_reloc(p, &reloc);
1151 if (r) {
1152 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1153 idx, reg);
1154 r100_cs_dump_packet(p, pkt);
1155 return r;
1156 }
1157 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1158 track->textures[i].robj = reloc->robj;
1159 break;
1160 case RADEON_PP_CUBIC_OFFSET_T0_0:
1161 case RADEON_PP_CUBIC_OFFSET_T0_1:
1162 case RADEON_PP_CUBIC_OFFSET_T0_2:
1163 case RADEON_PP_CUBIC_OFFSET_T0_3:
1164 case RADEON_PP_CUBIC_OFFSET_T0_4:
1165 i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4;
1166 r = r100_cs_packet_next_reloc(p, &reloc);
1167 if (r) {
1168 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1169 idx, reg);
1170 r100_cs_dump_packet(p, pkt);
1171 return r;
1172 }
1173 track->textures[0].cube_info[i].offset = ib_chunk->kdata[idx];
1174 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1175 track->textures[0].cube_info[i].robj = reloc->robj;
1176 break;
1177 case RADEON_PP_CUBIC_OFFSET_T1_0:
1178 case RADEON_PP_CUBIC_OFFSET_T1_1:
1179 case RADEON_PP_CUBIC_OFFSET_T1_2:
1180 case RADEON_PP_CUBIC_OFFSET_T1_3:
1181 case RADEON_PP_CUBIC_OFFSET_T1_4:
1182 i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4;
1183 r = r100_cs_packet_next_reloc(p, &reloc);
1184 if (r) {
1185 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1186 idx, reg);
1187 r100_cs_dump_packet(p, pkt);
1188 return r;
1189 }
1190 track->textures[1].cube_info[i].offset = ib_chunk->kdata[idx];
1191 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1192 track->textures[1].cube_info[i].robj = reloc->robj;
1193 break;
1194 case RADEON_PP_CUBIC_OFFSET_T2_0:
1195 case RADEON_PP_CUBIC_OFFSET_T2_1:
1196 case RADEON_PP_CUBIC_OFFSET_T2_2:
1197 case RADEON_PP_CUBIC_OFFSET_T2_3:
1198 case RADEON_PP_CUBIC_OFFSET_T2_4:
1199 i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4;
1200 r = r100_cs_packet_next_reloc(p, &reloc);
1201 if (r) {
1202 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1203 idx, reg);
1204 r100_cs_dump_packet(p, pkt);
1205 return r;
1206 }
1207 track->textures[2].cube_info[i].offset = ib_chunk->kdata[idx];
1208 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1209 track->textures[2].cube_info[i].robj = reloc->robj;
1210 break;
1211 case RADEON_RE_WIDTH_HEIGHT:
1212 track->maxy = ((ib_chunk->kdata[idx] >> 16) & 0x7FF);
1213 break;
1214 case RADEON_RB3D_COLORPITCH:
1215 r = r100_cs_packet_next_reloc(p, &reloc);
1216 if (r) {
1217 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1218 idx, reg);
1219 r100_cs_dump_packet(p, pkt);
1220 return r;
1221 }
e024e110 1222
551ebd83
DA
1223 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1224 tile_flags |= RADEON_COLOR_TILE_ENABLE;
1225 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1226 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
e024e110 1227
551ebd83
DA
1228 tmp = ib_chunk->kdata[idx] & ~(0x7 << 16);
1229 tmp |= tile_flags;
1230 ib[idx] = tmp;
e024e110 1231
551ebd83
DA
1232 track->cb[0].pitch = ib_chunk->kdata[idx] & RADEON_COLORPITCH_MASK;
1233 break;
1234 case RADEON_RB3D_DEPTHPITCH:
1235 track->zb.pitch = ib_chunk->kdata[idx] & RADEON_DEPTHPITCH_MASK;
1236 break;
1237 case RADEON_RB3D_CNTL:
1238 switch ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
1239 case 7:
1240 case 8:
1241 case 9:
1242 case 11:
1243 case 12:
1244 track->cb[0].cpp = 1;
e024e110 1245 break;
551ebd83
DA
1246 case 3:
1247 case 4:
1248 case 15:
1249 track->cb[0].cpp = 2;
1250 break;
1251 case 6:
1252 track->cb[0].cpp = 4;
1253 break;
1254 default:
1255 DRM_ERROR("Invalid color buffer format (%d) !\n",
1256 ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
1257 return -EINVAL;
1258 }
1259 track->z_enabled = !!(ib_chunk->kdata[idx] & RADEON_Z_ENABLE);
1260 break;
1261 case RADEON_RB3D_ZSTENCILCNTL:
1262 switch (ib_chunk->kdata[idx] & 0xf) {
1263 case 0:
1264 track->zb.cpp = 2;
1265 break;
1266 case 2:
1267 case 3:
1268 case 4:
1269 case 5:
1270 case 9:
1271 case 11:
1272 track->zb.cpp = 4;
17782d99 1273 break;
771fe6b9 1274 default:
771fe6b9
JG
1275 break;
1276 }
551ebd83
DA
1277 break;
1278 case RADEON_RB3D_ZPASS_ADDR:
1279 r = r100_cs_packet_next_reloc(p, &reloc);
1280 if (r) {
1281 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1282 idx, reg);
1283 r100_cs_dump_packet(p, pkt);
1284 return r;
1285 }
1286 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1287 break;
1288 case RADEON_PP_CNTL:
1289 {
1290 uint32_t temp = ib_chunk->kdata[idx] >> 4;
1291 for (i = 0; i < track->num_texture; i++)
1292 track->textures[i].enabled = !!(temp & (1 << i));
1293 }
1294 break;
1295 case RADEON_SE_VF_CNTL:
1296 track->vap_vf_cntl = ib_chunk->kdata[idx];
1297 break;
1298 case RADEON_SE_VTX_FMT:
1299 track->vtx_size = r100_get_vtx_size(ib_chunk->kdata[idx]);
1300 break;
1301 case RADEON_PP_TEX_SIZE_0:
1302 case RADEON_PP_TEX_SIZE_1:
1303 case RADEON_PP_TEX_SIZE_2:
1304 i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
1305 track->textures[i].width = (ib_chunk->kdata[idx] & RADEON_TEX_USIZE_MASK) + 1;
1306 track->textures[i].height = ((ib_chunk->kdata[idx] & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
1307 break;
1308 case RADEON_PP_TEX_PITCH_0:
1309 case RADEON_PP_TEX_PITCH_1:
1310 case RADEON_PP_TEX_PITCH_2:
1311 i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
1312 track->textures[i].pitch = ib_chunk->kdata[idx] + 32;
1313 break;
1314 case RADEON_PP_TXFILTER_0:
1315 case RADEON_PP_TXFILTER_1:
1316 case RADEON_PP_TXFILTER_2:
1317 i = (reg - RADEON_PP_TXFILTER_0) / 24;
1318 track->textures[i].num_levels = ((ib_chunk->kdata[idx] & RADEON_MAX_MIP_LEVEL_MASK)
1319 >> RADEON_MAX_MIP_LEVEL_SHIFT);
1320 tmp = (ib_chunk->kdata[idx] >> 23) & 0x7;
1321 if (tmp == 2 || tmp == 6)
1322 track->textures[i].roundup_w = false;
1323 tmp = (ib_chunk->kdata[idx] >> 27) & 0x7;
1324 if (tmp == 2 || tmp == 6)
1325 track->textures[i].roundup_h = false;
1326 break;
1327 case RADEON_PP_TXFORMAT_0:
1328 case RADEON_PP_TXFORMAT_1:
1329 case RADEON_PP_TXFORMAT_2:
1330 i = (reg - RADEON_PP_TXFORMAT_0) / 24;
1331 if (ib_chunk->kdata[idx] & RADEON_TXFORMAT_NON_POWER2) {
1332 track->textures[i].use_pitch = 1;
1333 } else {
1334 track->textures[i].use_pitch = 0;
1335 track->textures[i].width = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
1336 track->textures[i].height = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
1337 }
1338 if (ib_chunk->kdata[idx] & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
1339 track->textures[i].tex_coord_type = 2;
1340 switch ((ib_chunk->kdata[idx] & RADEON_TXFORMAT_FORMAT_MASK)) {
1341 case RADEON_TXFORMAT_I8:
1342 case RADEON_TXFORMAT_RGB332:
1343 case RADEON_TXFORMAT_Y8:
1344 track->textures[i].cpp = 1;
1345 break;
1346 case RADEON_TXFORMAT_AI88:
1347 case RADEON_TXFORMAT_ARGB1555:
1348 case RADEON_TXFORMAT_RGB565:
1349 case RADEON_TXFORMAT_ARGB4444:
1350 case RADEON_TXFORMAT_VYUY422:
1351 case RADEON_TXFORMAT_YVYU422:
1352 case RADEON_TXFORMAT_DXT1:
1353 case RADEON_TXFORMAT_SHADOW16:
1354 case RADEON_TXFORMAT_LDUDV655:
1355 case RADEON_TXFORMAT_DUDV88:
1356 track->textures[i].cpp = 2;
771fe6b9 1357 break;
551ebd83
DA
1358 case RADEON_TXFORMAT_ARGB8888:
1359 case RADEON_TXFORMAT_RGBA8888:
1360 case RADEON_TXFORMAT_DXT23:
1361 case RADEON_TXFORMAT_DXT45:
1362 case RADEON_TXFORMAT_SHADOW32:
1363 case RADEON_TXFORMAT_LDUDUV8888:
1364 track->textures[i].cpp = 4;
1365 break;
1366 }
1367 track->textures[i].cube_info[4].width = 1 << ((ib_chunk->kdata[idx] >> 16) & 0xf);
1368 track->textures[i].cube_info[4].height = 1 << ((ib_chunk->kdata[idx] >> 20) & 0xf);
1369 break;
1370 case RADEON_PP_CUBIC_FACES_0:
1371 case RADEON_PP_CUBIC_FACES_1:
1372 case RADEON_PP_CUBIC_FACES_2:
1373 tmp = ib_chunk->kdata[idx];
1374 i = (reg - RADEON_PP_CUBIC_FACES_0) / 4;
1375 for (face = 0; face < 4; face++) {
1376 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
1377 track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
771fe6b9 1378 }
551ebd83
DA
1379 break;
1380 default:
1381 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
1382 reg, idx);
1383 return -EINVAL;
771fe6b9
JG
1384 }
1385 return 0;
1386}
1387
068a117c
JG
1388int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
1389 struct radeon_cs_packet *pkt,
1390 struct radeon_object *robj)
1391{
1392 struct radeon_cs_chunk *ib_chunk;
1393 unsigned idx;
1394
1395 ib_chunk = &p->chunks[p->chunk_ib_idx];
1396 idx = pkt->idx + 1;
1397 if ((ib_chunk->kdata[idx+2] + 1) > radeon_object_size(robj)) {
1398 DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
1399 "(need %u have %lu) !\n",
1400 ib_chunk->kdata[idx+2] + 1,
1401 radeon_object_size(robj));
1402 return -EINVAL;
1403 }
1404 return 0;
1405}
1406
771fe6b9
JG
1407static int r100_packet3_check(struct radeon_cs_parser *p,
1408 struct radeon_cs_packet *pkt)
1409{
1410 struct radeon_cs_chunk *ib_chunk;
1411 struct radeon_cs_reloc *reloc;
551ebd83 1412 struct r100_cs_track *track;
771fe6b9
JG
1413 unsigned idx;
1414 unsigned i, c;
1415 volatile uint32_t *ib;
1416 int r;
1417
1418 ib = p->ib->ptr;
1419 ib_chunk = &p->chunks[p->chunk_ib_idx];
1420 idx = pkt->idx + 1;
551ebd83 1421 track = (struct r100_cs_track *)p->track;
771fe6b9
JG
1422 switch (pkt->opcode) {
1423 case PACKET3_3D_LOAD_VBPNTR:
1424 c = ib_chunk->kdata[idx++];
551ebd83 1425 track->num_arrays = c;
771fe6b9
JG
1426 for (i = 0; i < (c - 1); i += 2, idx += 3) {
1427 r = r100_cs_packet_next_reloc(p, &reloc);
1428 if (r) {
1429 DRM_ERROR("No reloc for packet3 %d\n",
1430 pkt->opcode);
1431 r100_cs_dump_packet(p, pkt);
1432 return r;
1433 }
1434 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
551ebd83
DA
1435 track->arrays[i + 0].robj = reloc->robj;
1436 track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
1437 track->arrays[i + 0].esize &= 0x7F;
771fe6b9
JG
1438 r = r100_cs_packet_next_reloc(p, &reloc);
1439 if (r) {
1440 DRM_ERROR("No reloc for packet3 %d\n",
1441 pkt->opcode);
1442 r100_cs_dump_packet(p, pkt);
1443 return r;
1444 }
1445 ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
551ebd83
DA
1446 track->arrays[i + 1].robj = reloc->robj;
1447 track->arrays[i + 1].esize = ib_chunk->kdata[idx] >> 24;
1448 track->arrays[i + 1].esize &= 0x7F;
771fe6b9
JG
1449 }
1450 if (c & 1) {
1451 r = r100_cs_packet_next_reloc(p, &reloc);
1452 if (r) {
1453 DRM_ERROR("No reloc for packet3 %d\n",
1454 pkt->opcode);
1455 r100_cs_dump_packet(p, pkt);
1456 return r;
1457 }
1458 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
551ebd83
DA
1459 track->arrays[i + 0].robj = reloc->robj;
1460 track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
1461 track->arrays[i + 0].esize &= 0x7F;
771fe6b9
JG
1462 }
1463 break;
1464 case PACKET3_INDX_BUFFER:
1465 r = r100_cs_packet_next_reloc(p, &reloc);
1466 if (r) {
1467 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1468 r100_cs_dump_packet(p, pkt);
1469 return r;
1470 }
1471 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
068a117c
JG
1472 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1473 if (r) {
1474 return r;
1475 }
771fe6b9
JG
1476 break;
1477 case 0x23:
771fe6b9
JG
1478 /* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
1479 r = r100_cs_packet_next_reloc(p, &reloc);
1480 if (r) {
1481 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1482 r100_cs_dump_packet(p, pkt);
1483 return r;
1484 }
1485 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
551ebd83
DA
1486 track->num_arrays = 1;
1487 track->vtx_size = r100_get_vtx_size(ib_chunk->kdata[idx+2]);
1488
1489 track->arrays[0].robj = reloc->robj;
1490 track->arrays[0].esize = track->vtx_size;
1491
1492 track->max_indx = ib_chunk->kdata[idx+1];
1493
1494 track->vap_vf_cntl = ib_chunk->kdata[idx+3];
1495 track->immd_dwords = pkt->count - 1;
1496 r = r100_cs_track_check(p->rdev, track);
1497 if (r)
1498 return r;
771fe6b9
JG
1499 break;
1500 case PACKET3_3D_DRAW_IMMD:
551ebd83
DA
1501 if (((ib_chunk->kdata[idx+1] >> 4) & 0x3) != 3) {
1502 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1503 return -EINVAL;
1504 }
1505 track->vap_vf_cntl = ib_chunk->kdata[idx+1];
1506 track->immd_dwords = pkt->count - 1;
1507 r = r100_cs_track_check(p->rdev, track);
1508 if (r)
1509 return r;
1510 break;
771fe6b9
JG
1511 /* triggers drawing using in-packet vertex data */
1512 case PACKET3_3D_DRAW_IMMD_2:
551ebd83
DA
1513 if (((ib_chunk->kdata[idx] >> 4) & 0x3) != 3) {
1514 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1515 return -EINVAL;
1516 }
1517 track->vap_vf_cntl = ib_chunk->kdata[idx];
1518 track->immd_dwords = pkt->count;
1519 r = r100_cs_track_check(p->rdev, track);
1520 if (r)
1521 return r;
1522 break;
771fe6b9
JG
1523 /* triggers drawing using in-packet vertex data */
1524 case PACKET3_3D_DRAW_VBUF_2:
551ebd83
DA
1525 track->vap_vf_cntl = ib_chunk->kdata[idx];
1526 r = r100_cs_track_check(p->rdev, track);
1527 if (r)
1528 return r;
1529 break;
771fe6b9
JG
1530 /* triggers drawing of vertex buffers setup elsewhere */
1531 case PACKET3_3D_DRAW_INDX_2:
551ebd83
DA
1532 track->vap_vf_cntl = ib_chunk->kdata[idx];
1533 r = r100_cs_track_check(p->rdev, track);
1534 if (r)
1535 return r;
1536 break;
771fe6b9
JG
1537 /* triggers drawing using indices to vertex buffer */
1538 case PACKET3_3D_DRAW_VBUF:
551ebd83
DA
1539 track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
1540 r = r100_cs_track_check(p->rdev, track);
1541 if (r)
1542 return r;
1543 break;
771fe6b9
JG
1544 /* triggers drawing of vertex buffers setup elsewhere */
1545 case PACKET3_3D_DRAW_INDX:
551ebd83
DA
1546 track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
1547 r = r100_cs_track_check(p->rdev, track);
1548 if (r)
1549 return r;
1550 break;
771fe6b9
JG
1551 /* triggers drawing using indices to vertex buffer */
1552 case PACKET3_NOP:
1553 break;
1554 default:
1555 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1556 return -EINVAL;
1557 }
1558 return 0;
1559}
1560
1561int r100_cs_parse(struct radeon_cs_parser *p)
1562{
1563 struct radeon_cs_packet pkt;
551ebd83 1564 struct r100_cs_track track;
771fe6b9
JG
1565 int r;
1566
551ebd83
DA
1567 r100_cs_track_clear(p->rdev, &track);
1568 p->track = &track;
771fe6b9
JG
1569 do {
1570 r = r100_cs_packet_parse(p, &pkt, p->idx);
1571 if (r) {
1572 return r;
1573 }
1574 p->idx += pkt.count + 2;
1575 switch (pkt.type) {
068a117c 1576 case PACKET_TYPE0:
551ebd83
DA
1577 if (p->rdev->family >= CHIP_R200)
1578 r = r100_cs_parse_packet0(p, &pkt,
1579 p->rdev->config.r100.reg_safe_bm,
1580 p->rdev->config.r100.reg_safe_bm_size,
1581 &r200_packet0_check);
1582 else
1583 r = r100_cs_parse_packet0(p, &pkt,
1584 p->rdev->config.r100.reg_safe_bm,
1585 p->rdev->config.r100.reg_safe_bm_size,
1586 &r100_packet0_check);
068a117c
JG
1587 break;
1588 case PACKET_TYPE2:
1589 break;
1590 case PACKET_TYPE3:
1591 r = r100_packet3_check(p, &pkt);
1592 break;
1593 default:
1594 DRM_ERROR("Unknown packet type %d !\n",
1595 pkt.type);
1596 return -EINVAL;
771fe6b9
JG
1597 }
1598 if (r) {
1599 return r;
1600 }
1601 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1602 return 0;
1603}
1604
1605
1606/*
1607 * Global GPU functions
1608 */
1609void r100_errata(struct radeon_device *rdev)
1610{
1611 rdev->pll_errata = 0;
1612
1613 if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) {
1614 rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS;
1615 }
1616
1617 if (rdev->family == CHIP_RV100 ||
1618 rdev->family == CHIP_RS100 ||
1619 rdev->family == CHIP_RS200) {
1620 rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY;
1621 }
1622}
1623
1624/* Wait for vertical sync on primary CRTC */
1625void r100_gpu_wait_for_vsync(struct radeon_device *rdev)
1626{
1627 uint32_t crtc_gen_cntl, tmp;
1628 int i;
1629
1630 crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
1631 if ((crtc_gen_cntl & RADEON_CRTC_DISP_REQ_EN_B) ||
1632 !(crtc_gen_cntl & RADEON_CRTC_EN)) {
1633 return;
1634 }
1635 /* Clear the CRTC_VBLANK_SAVE bit */
1636 WREG32(RADEON_CRTC_STATUS, RADEON_CRTC_VBLANK_SAVE_CLEAR);
1637 for (i = 0; i < rdev->usec_timeout; i++) {
1638 tmp = RREG32(RADEON_CRTC_STATUS);
1639 if (tmp & RADEON_CRTC_VBLANK_SAVE) {
1640 return;
1641 }
1642 DRM_UDELAY(1);
1643 }
1644}
1645
1646/* Wait for vertical sync on secondary CRTC */
1647void r100_gpu_wait_for_vsync2(struct radeon_device *rdev)
1648{
1649 uint32_t crtc2_gen_cntl, tmp;
1650 int i;
1651
1652 crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
1653 if ((crtc2_gen_cntl & RADEON_CRTC2_DISP_REQ_EN_B) ||
1654 !(crtc2_gen_cntl & RADEON_CRTC2_EN))
1655 return;
1656
1657 /* Clear the CRTC_VBLANK_SAVE bit */
1658 WREG32(RADEON_CRTC2_STATUS, RADEON_CRTC2_VBLANK_SAVE_CLEAR);
1659 for (i = 0; i < rdev->usec_timeout; i++) {
1660 tmp = RREG32(RADEON_CRTC2_STATUS);
1661 if (tmp & RADEON_CRTC2_VBLANK_SAVE) {
1662 return;
1663 }
1664 DRM_UDELAY(1);
1665 }
1666}
1667
1668int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
1669{
1670 unsigned i;
1671 uint32_t tmp;
1672
1673 for (i = 0; i < rdev->usec_timeout; i++) {
1674 tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK;
1675 if (tmp >= n) {
1676 return 0;
1677 }
1678 DRM_UDELAY(1);
1679 }
1680 return -1;
1681}
1682
1683int r100_gui_wait_for_idle(struct radeon_device *rdev)
1684{
1685 unsigned i;
1686 uint32_t tmp;
1687
1688 if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) {
1689 printk(KERN_WARNING "radeon: wait for empty RBBM fifo failed !"
1690 " Bad things might happen.\n");
1691 }
1692 for (i = 0; i < rdev->usec_timeout; i++) {
1693 tmp = RREG32(RADEON_RBBM_STATUS);
1694 if (!(tmp & (1 << 31))) {
1695 return 0;
1696 }
1697 DRM_UDELAY(1);
1698 }
1699 return -1;
1700}
1701
1702int r100_mc_wait_for_idle(struct radeon_device *rdev)
1703{
1704 unsigned i;
1705 uint32_t tmp;
1706
1707 for (i = 0; i < rdev->usec_timeout; i++) {
1708 /* read MC_STATUS */
1709 tmp = RREG32(0x0150);
1710 if (tmp & (1 << 2)) {
1711 return 0;
1712 }
1713 DRM_UDELAY(1);
1714 }
1715 return -1;
1716}
1717
1718void r100_gpu_init(struct radeon_device *rdev)
1719{
1720 /* TODO: anythings to do here ? pipes ? */
1721 r100_hdp_reset(rdev);
1722}
1723
1724void r100_hdp_reset(struct radeon_device *rdev)
1725{
1726 uint32_t tmp;
1727
1728 tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
1729 tmp |= (7 << 28);
1730 WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
1731 (void)RREG32(RADEON_HOST_PATH_CNTL);
1732 udelay(200);
1733 WREG32(RADEON_RBBM_SOFT_RESET, 0);
1734 WREG32(RADEON_HOST_PATH_CNTL, tmp);
1735 (void)RREG32(RADEON_HOST_PATH_CNTL);
1736}
1737
1738int r100_rb2d_reset(struct radeon_device *rdev)
1739{
1740 uint32_t tmp;
1741 int i;
1742
1743 WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2);
1744 (void)RREG32(RADEON_RBBM_SOFT_RESET);
1745 udelay(200);
1746 WREG32(RADEON_RBBM_SOFT_RESET, 0);
1747 /* Wait to prevent race in RBBM_STATUS */
1748 mdelay(1);
1749 for (i = 0; i < rdev->usec_timeout; i++) {
1750 tmp = RREG32(RADEON_RBBM_STATUS);
1751 if (!(tmp & (1 << 26))) {
1752 DRM_INFO("RB2D reset succeed (RBBM_STATUS=0x%08X)\n",
1753 tmp);
1754 return 0;
1755 }
1756 DRM_UDELAY(1);
1757 }
1758 tmp = RREG32(RADEON_RBBM_STATUS);
1759 DRM_ERROR("Failed to reset RB2D (RBBM_STATUS=0x%08X)!\n", tmp);
1760 return -1;
1761}
1762
1763int r100_gpu_reset(struct radeon_device *rdev)
1764{
1765 uint32_t status;
1766
1767 /* reset order likely matter */
1768 status = RREG32(RADEON_RBBM_STATUS);
1769 /* reset HDP */
1770 r100_hdp_reset(rdev);
1771 /* reset rb2d */
1772 if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
1773 r100_rb2d_reset(rdev);
1774 }
1775 /* TODO: reset 3D engine */
1776 /* reset CP */
1777 status = RREG32(RADEON_RBBM_STATUS);
1778 if (status & (1 << 16)) {
1779 r100_cp_reset(rdev);
1780 }
1781 /* Check if GPU is idle */
1782 status = RREG32(RADEON_RBBM_STATUS);
1783 if (status & (1 << 31)) {
1784 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
1785 return -1;
1786 }
1787 DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
1788 return 0;
1789}
1790
1791
1792/*
1793 * VRAM info
1794 */
1795static void r100_vram_get_type(struct radeon_device *rdev)
1796{
1797 uint32_t tmp;
1798
1799 rdev->mc.vram_is_ddr = false;
1800 if (rdev->flags & RADEON_IS_IGP)
1801 rdev->mc.vram_is_ddr = true;
1802 else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR)
1803 rdev->mc.vram_is_ddr = true;
1804 if ((rdev->family == CHIP_RV100) ||
1805 (rdev->family == CHIP_RS100) ||
1806 (rdev->family == CHIP_RS200)) {
1807 tmp = RREG32(RADEON_MEM_CNTL);
1808 if (tmp & RV100_HALF_MODE) {
1809 rdev->mc.vram_width = 32;
1810 } else {
1811 rdev->mc.vram_width = 64;
1812 }
1813 if (rdev->flags & RADEON_SINGLE_CRTC) {
1814 rdev->mc.vram_width /= 4;
1815 rdev->mc.vram_is_ddr = true;
1816 }
1817 } else if (rdev->family <= CHIP_RV280) {
1818 tmp = RREG32(RADEON_MEM_CNTL);
1819 if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) {
1820 rdev->mc.vram_width = 128;
1821 } else {
1822 rdev->mc.vram_width = 64;
1823 }
1824 } else {
1825 /* newer IGPs */
1826 rdev->mc.vram_width = 128;
1827 }
1828}
1829
2a0f8918 1830static u32 r100_get_accessible_vram(struct radeon_device *rdev)
771fe6b9 1831{
2a0f8918
DA
1832 u32 aper_size;
1833 u8 byte;
1834
1835 aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
1836
1837 /* Set HDP_APER_CNTL only on cards that are known not to be broken,
1838 * that is has the 2nd generation multifunction PCI interface
1839 */
1840 if (rdev->family == CHIP_RV280 ||
1841 rdev->family >= CHIP_RV350) {
1842 WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL,
1843 ~RADEON_HDP_APER_CNTL);
1844 DRM_INFO("Generation 2 PCI interface, using max accessible memory\n");
1845 return aper_size * 2;
1846 }
1847
1848 /* Older cards have all sorts of funny issues to deal with. First
1849 * check if it's a multifunction card by reading the PCI config
1850 * header type... Limit those to one aperture size
1851 */
1852 pci_read_config_byte(rdev->pdev, 0xe, &byte);
1853 if (byte & 0x80) {
1854 DRM_INFO("Generation 1 PCI interface in multifunction mode\n");
1855 DRM_INFO("Limiting VRAM to one aperture\n");
1856 return aper_size;
1857 }
1858
1859 /* Single function older card. We read HDP_APER_CNTL to see how the BIOS
1860 * have set it up. We don't write this as it's broken on some ASICs but
1861 * we expect the BIOS to have done the right thing (might be too optimistic...)
1862 */
1863 if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL)
1864 return aper_size * 2;
1865 return aper_size;
1866}
1867
1868void r100_vram_init_sizes(struct radeon_device *rdev)
1869{
1870 u64 config_aper_size;
1871 u32 accessible;
1872
1873 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
771fe6b9
JG
1874
1875 if (rdev->flags & RADEON_IS_IGP) {
1876 uint32_t tom;
1877 /* read NB_TOM to get the amount of ram stolen for the GPU */
1878 tom = RREG32(RADEON_NB_TOM);
7a50f01a 1879 rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
3e43d821
DA
1880 /* for IGPs we need to keep VRAM where it was put by the BIOS */
1881 rdev->mc.vram_location = (tom & 0xffff) << 16;
7a50f01a
DA
1882 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
1883 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
771fe6b9 1884 } else {
7a50f01a 1885 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
771fe6b9
JG
1886 /* Some production boards of m6 will report 0
1887 * if it's 8 MB
1888 */
7a50f01a
DA
1889 if (rdev->mc.real_vram_size == 0) {
1890 rdev->mc.real_vram_size = 8192 * 1024;
1891 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
771fe6b9 1892 }
3e43d821
DA
1893 /* let driver place VRAM */
1894 rdev->mc.vram_location = 0xFFFFFFFFUL;
2a0f8918
DA
1895 /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
1896 * Novell bug 204882 + along with lots of ubuntu ones */
7a50f01a
DA
1897 if (config_aper_size > rdev->mc.real_vram_size)
1898 rdev->mc.mc_vram_size = config_aper_size;
1899 else
1900 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
771fe6b9
JG
1901 }
1902
2a0f8918
DA
1903 /* work out accessible VRAM */
1904 accessible = r100_get_accessible_vram(rdev);
1905
771fe6b9
JG
1906 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
1907 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
2a0f8918
DA
1908
1909 if (accessible > rdev->mc.aper_size)
1910 accessible = rdev->mc.aper_size;
1911
7a50f01a
DA
1912 if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
1913 rdev->mc.mc_vram_size = rdev->mc.aper_size;
1914
1915 if (rdev->mc.real_vram_size > rdev->mc.aper_size)
1916 rdev->mc.real_vram_size = rdev->mc.aper_size;
2a0f8918
DA
1917}
1918
1919void r100_vram_info(struct radeon_device *rdev)
1920{
1921 r100_vram_get_type(rdev);
1922
1923 r100_vram_init_sizes(rdev);
771fe6b9
JG
1924}
1925
1926
1927/*
1928 * Indirect registers accessor
1929 */
1930void r100_pll_errata_after_index(struct radeon_device *rdev)
1931{
1932 if (!(rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS)) {
1933 return;
1934 }
1935 (void)RREG32(RADEON_CLOCK_CNTL_DATA);
1936 (void)RREG32(RADEON_CRTC_GEN_CNTL);
1937}
1938
1939static void r100_pll_errata_after_data(struct radeon_device *rdev)
1940{
1941 /* This workarounds is necessary on RV100, RS100 and RS200 chips
1942 * or the chip could hang on a subsequent access
1943 */
1944 if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) {
1945 udelay(5000);
1946 }
1947
1948 /* This function is required to workaround a hardware bug in some (all?)
1949 * revisions of the R300. This workaround should be called after every
1950 * CLOCK_CNTL_INDEX register access. If not, register reads afterward
1951 * may not be correct.
1952 */
1953 if (rdev->pll_errata & CHIP_ERRATA_R300_CG) {
1954 uint32_t save, tmp;
1955
1956 save = RREG32(RADEON_CLOCK_CNTL_INDEX);
1957 tmp = save & ~(0x3f | RADEON_PLL_WR_EN);
1958 WREG32(RADEON_CLOCK_CNTL_INDEX, tmp);
1959 tmp = RREG32(RADEON_CLOCK_CNTL_DATA);
1960 WREG32(RADEON_CLOCK_CNTL_INDEX, save);
1961 }
1962}
1963
1964uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
1965{
1966 uint32_t data;
1967
1968 WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
1969 r100_pll_errata_after_index(rdev);
1970 data = RREG32(RADEON_CLOCK_CNTL_DATA);
1971 r100_pll_errata_after_data(rdev);
1972 return data;
1973}
1974
1975void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1976{
1977 WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
1978 r100_pll_errata_after_index(rdev);
1979 WREG32(RADEON_CLOCK_CNTL_DATA, v);
1980 r100_pll_errata_after_data(rdev);
1981}
1982
068a117c
JG
1983int r100_init(struct radeon_device *rdev)
1984{
551ebd83
DA
1985 if (ASIC_IS_RN50(rdev)) {
1986 rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm;
1987 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(rn50_reg_safe_bm);
1988 } else if (rdev->family < CHIP_R200) {
1989 rdev->config.r100.reg_safe_bm = r100_reg_safe_bm;
1990 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm);
1991 } else {
1992 return r200_init(rdev);
1993 }
068a117c
JG
1994 return 0;
1995}
1996
771fe6b9
JG
1997/*
1998 * Debugfs info
1999 */
2000#if defined(CONFIG_DEBUG_FS)
2001static int r100_debugfs_rbbm_info(struct seq_file *m, void *data)
2002{
2003 struct drm_info_node *node = (struct drm_info_node *) m->private;
2004 struct drm_device *dev = node->minor->dev;
2005 struct radeon_device *rdev = dev->dev_private;
2006 uint32_t reg, value;
2007 unsigned i;
2008
2009 seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS));
2010 seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C));
2011 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2012 for (i = 0; i < 64; i++) {
2013 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100);
2014 reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2;
2015 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i);
2016 value = RREG32(RADEON_RBBM_CMDFIFO_DATA);
2017 seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value);
2018 }
2019 return 0;
2020}
2021
2022static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
2023{
2024 struct drm_info_node *node = (struct drm_info_node *) m->private;
2025 struct drm_device *dev = node->minor->dev;
2026 struct radeon_device *rdev = dev->dev_private;
2027 uint32_t rdp, wdp;
2028 unsigned count, i, j;
2029
2030 radeon_ring_free_size(rdev);
2031 rdp = RREG32(RADEON_CP_RB_RPTR);
2032 wdp = RREG32(RADEON_CP_RB_WPTR);
2033 count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
2034 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2035 seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
2036 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
2037 seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
2038 seq_printf(m, "%u dwords in ring\n", count);
2039 for (j = 0; j <= count; j++) {
2040 i = (rdp + j) & rdev->cp.ptr_mask;
2041 seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
2042 }
2043 return 0;
2044}
2045
2046
2047static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data)
2048{
2049 struct drm_info_node *node = (struct drm_info_node *) m->private;
2050 struct drm_device *dev = node->minor->dev;
2051 struct radeon_device *rdev = dev->dev_private;
2052 uint32_t csq_stat, csq2_stat, tmp;
2053 unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr;
2054 unsigned i;
2055
2056 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2057 seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE));
2058 csq_stat = RREG32(RADEON_CP_CSQ_STAT);
2059 csq2_stat = RREG32(RADEON_CP_CSQ2_STAT);
2060 r_rptr = (csq_stat >> 0) & 0x3ff;
2061 r_wptr = (csq_stat >> 10) & 0x3ff;
2062 ib1_rptr = (csq_stat >> 20) & 0x3ff;
2063 ib1_wptr = (csq2_stat >> 0) & 0x3ff;
2064 ib2_rptr = (csq2_stat >> 10) & 0x3ff;
2065 ib2_wptr = (csq2_stat >> 20) & 0x3ff;
2066 seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat);
2067 seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat);
2068 seq_printf(m, "Ring rptr %u\n", r_rptr);
2069 seq_printf(m, "Ring wptr %u\n", r_wptr);
2070 seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr);
2071 seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr);
2072 seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr);
2073 seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr);
2074 /* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms
2075 * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */
2076 seq_printf(m, "Ring fifo:\n");
2077 for (i = 0; i < 256; i++) {
2078 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
2079 tmp = RREG32(RADEON_CP_CSQ_DATA);
2080 seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp);
2081 }
2082 seq_printf(m, "Indirect1 fifo:\n");
2083 for (i = 256; i <= 512; i++) {
2084 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
2085 tmp = RREG32(RADEON_CP_CSQ_DATA);
2086 seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp);
2087 }
2088 seq_printf(m, "Indirect2 fifo:\n");
2089 for (i = 640; i < ib1_wptr; i++) {
2090 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
2091 tmp = RREG32(RADEON_CP_CSQ_DATA);
2092 seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp);
2093 }
2094 return 0;
2095}
2096
2097static int r100_debugfs_mc_info(struct seq_file *m, void *data)
2098{
2099 struct drm_info_node *node = (struct drm_info_node *) m->private;
2100 struct drm_device *dev = node->minor->dev;
2101 struct radeon_device *rdev = dev->dev_private;
2102 uint32_t tmp;
2103
2104 tmp = RREG32(RADEON_CONFIG_MEMSIZE);
2105 seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp);
2106 tmp = RREG32(RADEON_MC_FB_LOCATION);
2107 seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp);
2108 tmp = RREG32(RADEON_BUS_CNTL);
2109 seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
2110 tmp = RREG32(RADEON_MC_AGP_LOCATION);
2111 seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
2112 tmp = RREG32(RADEON_AGP_BASE);
2113 seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
2114 tmp = RREG32(RADEON_HOST_PATH_CNTL);
2115 seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
2116 tmp = RREG32(0x01D0);
2117 seq_printf(m, "AIC_CTRL 0x%08x\n", tmp);
2118 tmp = RREG32(RADEON_AIC_LO_ADDR);
2119 seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp);
2120 tmp = RREG32(RADEON_AIC_HI_ADDR);
2121 seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp);
2122 tmp = RREG32(0x01E4);
2123 seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp);
2124 return 0;
2125}
2126
2127static struct drm_info_list r100_debugfs_rbbm_list[] = {
2128 {"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL},
2129};
2130
2131static struct drm_info_list r100_debugfs_cp_list[] = {
2132 {"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL},
2133 {"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL},
2134};
2135
2136static struct drm_info_list r100_debugfs_mc_info_list[] = {
2137 {"r100_mc_info", r100_debugfs_mc_info, 0, NULL},
2138};
2139#endif
2140
2141int r100_debugfs_rbbm_init(struct radeon_device *rdev)
2142{
2143#if defined(CONFIG_DEBUG_FS)
2144 return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1);
2145#else
2146 return 0;
2147#endif
2148}
2149
2150int r100_debugfs_cp_init(struct radeon_device *rdev)
2151{
2152#if defined(CONFIG_DEBUG_FS)
2153 return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2);
2154#else
2155 return 0;
2156#endif
2157}
2158
2159int r100_debugfs_mc_info_init(struct radeon_device *rdev)
2160{
2161#if defined(CONFIG_DEBUG_FS)
2162 return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1);
2163#else
2164 return 0;
2165#endif
2166}
e024e110
DA
2167
2168int r100_set_surface_reg(struct radeon_device *rdev, int reg,
2169 uint32_t tiling_flags, uint32_t pitch,
2170 uint32_t offset, uint32_t obj_size)
2171{
2172 int surf_index = reg * 16;
2173 int flags = 0;
2174
2175 /* r100/r200 divide by 16 */
2176 if (rdev->family < CHIP_R300)
2177 flags = pitch / 16;
2178 else
2179 flags = pitch / 8;
2180
2181 if (rdev->family <= CHIP_RS200) {
2182 if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
2183 == (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
2184 flags |= RADEON_SURF_TILE_COLOR_BOTH;
2185 if (tiling_flags & RADEON_TILING_MACRO)
2186 flags |= RADEON_SURF_TILE_COLOR_MACRO;
2187 } else if (rdev->family <= CHIP_RV280) {
2188 if (tiling_flags & (RADEON_TILING_MACRO))
2189 flags |= R200_SURF_TILE_COLOR_MACRO;
2190 if (tiling_flags & RADEON_TILING_MICRO)
2191 flags |= R200_SURF_TILE_COLOR_MICRO;
2192 } else {
2193 if (tiling_flags & RADEON_TILING_MACRO)
2194 flags |= R300_SURF_TILE_MACRO;
2195 if (tiling_flags & RADEON_TILING_MICRO)
2196 flags |= R300_SURF_TILE_MICRO;
2197 }
2198
2199 DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1);
2200 WREG32(RADEON_SURFACE0_INFO + surf_index, flags);
2201 WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset);
2202 WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1);
2203 return 0;
2204}
2205
2206void r100_clear_surface_reg(struct radeon_device *rdev, int reg)
2207{
2208 int surf_index = reg * 16;
2209 WREG32(RADEON_SURFACE0_INFO + surf_index, 0);
2210}
c93bb85b
JG
2211
2212void r100_bandwidth_update(struct radeon_device *rdev)
2213{
2214 fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff;
2215 fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff;
2216 fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff;
2217 uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
2218 fixed20_12 memtcas_ff[8] = {
2219 fixed_init(1),
2220 fixed_init(2),
2221 fixed_init(3),
2222 fixed_init(0),
2223 fixed_init_half(1),
2224 fixed_init_half(2),
2225 fixed_init(0),
2226 };
2227 fixed20_12 memtcas_rs480_ff[8] = {
2228 fixed_init(0),
2229 fixed_init(1),
2230 fixed_init(2),
2231 fixed_init(3),
2232 fixed_init(0),
2233 fixed_init_half(1),
2234 fixed_init_half(2),
2235 fixed_init_half(3),
2236 };
2237 fixed20_12 memtcas2_ff[8] = {
2238 fixed_init(0),
2239 fixed_init(1),
2240 fixed_init(2),
2241 fixed_init(3),
2242 fixed_init(4),
2243 fixed_init(5),
2244 fixed_init(6),
2245 fixed_init(7),
2246 };
2247 fixed20_12 memtrbs[8] = {
2248 fixed_init(1),
2249 fixed_init_half(1),
2250 fixed_init(2),
2251 fixed_init_half(2),
2252 fixed_init(3),
2253 fixed_init_half(3),
2254 fixed_init(4),
2255 fixed_init_half(4)
2256 };
2257 fixed20_12 memtrbs_r4xx[8] = {
2258 fixed_init(4),
2259 fixed_init(5),
2260 fixed_init(6),
2261 fixed_init(7),
2262 fixed_init(8),
2263 fixed_init(9),
2264 fixed_init(10),
2265 fixed_init(11)
2266 };
2267 fixed20_12 min_mem_eff;
2268 fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
2269 fixed20_12 cur_latency_mclk, cur_latency_sclk;
2270 fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate,
2271 disp_drain_rate2, read_return_rate;
2272 fixed20_12 time_disp1_drop_priority;
2273 int c;
2274 int cur_size = 16; /* in octawords */
2275 int critical_point = 0, critical_point2;
2276/* uint32_t read_return_rate, time_disp1_drop_priority; */
2277 int stop_req, max_stop_req;
2278 struct drm_display_mode *mode1 = NULL;
2279 struct drm_display_mode *mode2 = NULL;
2280 uint32_t pixel_bytes1 = 0;
2281 uint32_t pixel_bytes2 = 0;
2282
2283 if (rdev->mode_info.crtcs[0]->base.enabled) {
2284 mode1 = &rdev->mode_info.crtcs[0]->base.mode;
2285 pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8;
2286 }
2287 if (rdev->mode_info.crtcs[1]->base.enabled) {
2288 mode2 = &rdev->mode_info.crtcs[1]->base.mode;
2289 pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8;
2290 }
2291
2292 min_mem_eff.full = rfixed_const_8(0);
2293 /* get modes */
2294 if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
2295 uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
2296 mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT);
2297 mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT);
2298 /* check crtc enables */
2299 if (mode2)
2300 mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
2301 if (mode1)
2302 mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT);
2303 WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer);
2304 }
2305
2306 /*
2307 * determine is there is enough bw for current mode
2308 */
2309 mclk_ff.full = rfixed_const(rdev->clock.default_mclk);
2310 temp_ff.full = rfixed_const(100);
2311 mclk_ff.full = rfixed_div(mclk_ff, temp_ff);
2312 sclk_ff.full = rfixed_const(rdev->clock.default_sclk);
2313 sclk_ff.full = rfixed_div(sclk_ff, temp_ff);
2314
2315 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
2316 temp_ff.full = rfixed_const(temp);
2317 mem_bw.full = rfixed_mul(mclk_ff, temp_ff);
2318
2319 pix_clk.full = 0;
2320 pix_clk2.full = 0;
2321 peak_disp_bw.full = 0;
2322 if (mode1) {
2323 temp_ff.full = rfixed_const(1000);
2324 pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */
2325 pix_clk.full = rfixed_div(pix_clk, temp_ff);
2326 temp_ff.full = rfixed_const(pixel_bytes1);
2327 peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff);
2328 }
2329 if (mode2) {
2330 temp_ff.full = rfixed_const(1000);
2331 pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */
2332 pix_clk2.full = rfixed_div(pix_clk2, temp_ff);
2333 temp_ff.full = rfixed_const(pixel_bytes2);
2334 peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff);
2335 }
2336
2337 mem_bw.full = rfixed_mul(mem_bw, min_mem_eff);
2338 if (peak_disp_bw.full >= mem_bw.full) {
2339 DRM_ERROR("You may not have enough display bandwidth for current mode\n"
2340 "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
2341 }
2342
2343 /* Get values from the EXT_MEM_CNTL register...converting its contents. */
2344 temp = RREG32(RADEON_MEM_TIMING_CNTL);
2345 if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */
2346 mem_trcd = ((temp >> 2) & 0x3) + 1;
2347 mem_trp = ((temp & 0x3)) + 1;
2348 mem_tras = ((temp & 0x70) >> 4) + 1;
2349 } else if (rdev->family == CHIP_R300 ||
2350 rdev->family == CHIP_R350) { /* r300, r350 */
2351 mem_trcd = (temp & 0x7) + 1;
2352 mem_trp = ((temp >> 8) & 0x7) + 1;
2353 mem_tras = ((temp >> 11) & 0xf) + 4;
2354 } else if (rdev->family == CHIP_RV350 ||
2355 rdev->family <= CHIP_RV380) {
2356 /* rv3x0 */
2357 mem_trcd = (temp & 0x7) + 3;
2358 mem_trp = ((temp >> 8) & 0x7) + 3;
2359 mem_tras = ((temp >> 11) & 0xf) + 6;
2360 } else if (rdev->family == CHIP_R420 ||
2361 rdev->family == CHIP_R423 ||
2362 rdev->family == CHIP_RV410) {
2363 /* r4xx */
2364 mem_trcd = (temp & 0xf) + 3;
2365 if (mem_trcd > 15)
2366 mem_trcd = 15;
2367 mem_trp = ((temp >> 8) & 0xf) + 3;
2368 if (mem_trp > 15)
2369 mem_trp = 15;
2370 mem_tras = ((temp >> 12) & 0x1f) + 6;
2371 if (mem_tras > 31)
2372 mem_tras = 31;
2373 } else { /* RV200, R200 */
2374 mem_trcd = (temp & 0x7) + 1;
2375 mem_trp = ((temp >> 8) & 0x7) + 1;
2376 mem_tras = ((temp >> 12) & 0xf) + 4;
2377 }
2378 /* convert to FF */
2379 trcd_ff.full = rfixed_const(mem_trcd);
2380 trp_ff.full = rfixed_const(mem_trp);
2381 tras_ff.full = rfixed_const(mem_tras);
2382
2383 /* Get values from the MEM_SDRAM_MODE_REG register...converting its */
2384 temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
2385 data = (temp & (7 << 20)) >> 20;
2386 if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) {
2387 if (rdev->family == CHIP_RS480) /* don't think rs400 */
2388 tcas_ff = memtcas_rs480_ff[data];
2389 else
2390 tcas_ff = memtcas_ff[data];
2391 } else
2392 tcas_ff = memtcas2_ff[data];
2393
2394 if (rdev->family == CHIP_RS400 ||
2395 rdev->family == CHIP_RS480) {
2396 /* extra cas latency stored in bits 23-25 0-4 clocks */
2397 data = (temp >> 23) & 0x7;
2398 if (data < 5)
2399 tcas_ff.full += rfixed_const(data);
2400 }
2401
2402 if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
2403 /* on the R300, Tcas is included in Trbs.
2404 */
2405 temp = RREG32(RADEON_MEM_CNTL);
2406 data = (R300_MEM_NUM_CHANNELS_MASK & temp);
2407 if (data == 1) {
2408 if (R300_MEM_USE_CD_CH_ONLY & temp) {
2409 temp = RREG32(R300_MC_IND_INDEX);
2410 temp &= ~R300_MC_IND_ADDR_MASK;
2411 temp |= R300_MC_READ_CNTL_CD_mcind;
2412 WREG32(R300_MC_IND_INDEX, temp);
2413 temp = RREG32(R300_MC_IND_DATA);
2414 data = (R300_MEM_RBS_POSITION_C_MASK & temp);
2415 } else {
2416 temp = RREG32(R300_MC_READ_CNTL_AB);
2417 data = (R300_MEM_RBS_POSITION_A_MASK & temp);
2418 }
2419 } else {
2420 temp = RREG32(R300_MC_READ_CNTL_AB);
2421 data = (R300_MEM_RBS_POSITION_A_MASK & temp);
2422 }
2423 if (rdev->family == CHIP_RV410 ||
2424 rdev->family == CHIP_R420 ||
2425 rdev->family == CHIP_R423)
2426 trbs_ff = memtrbs_r4xx[data];
2427 else
2428 trbs_ff = memtrbs[data];
2429 tcas_ff.full += trbs_ff.full;
2430 }
2431
2432 sclk_eff_ff.full = sclk_ff.full;
2433
2434 if (rdev->flags & RADEON_IS_AGP) {
2435 fixed20_12 agpmode_ff;
2436 agpmode_ff.full = rfixed_const(radeon_agpmode);
2437 temp_ff.full = rfixed_const_666(16);
2438 sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff);
2439 }
2440 /* TODO PCIE lanes may affect this - agpmode == 16?? */
2441
2442 if (ASIC_IS_R300(rdev)) {
2443 sclk_delay_ff.full = rfixed_const(250);
2444 } else {
2445 if ((rdev->family == CHIP_RV100) ||
2446 rdev->flags & RADEON_IS_IGP) {
2447 if (rdev->mc.vram_is_ddr)
2448 sclk_delay_ff.full = rfixed_const(41);
2449 else
2450 sclk_delay_ff.full = rfixed_const(33);
2451 } else {
2452 if (rdev->mc.vram_width == 128)
2453 sclk_delay_ff.full = rfixed_const(57);
2454 else
2455 sclk_delay_ff.full = rfixed_const(41);
2456 }
2457 }
2458
2459 mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff);
2460
2461 if (rdev->mc.vram_is_ddr) {
2462 if (rdev->mc.vram_width == 32) {
2463 k1.full = rfixed_const(40);
2464 c = 3;
2465 } else {
2466 k1.full = rfixed_const(20);
2467 c = 1;
2468 }
2469 } else {
2470 k1.full = rfixed_const(40);
2471 c = 3;
2472 }
2473
2474 temp_ff.full = rfixed_const(2);
2475 mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff);
2476 temp_ff.full = rfixed_const(c);
2477 mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff);
2478 temp_ff.full = rfixed_const(4);
2479 mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff);
2480 mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff);
2481 mc_latency_mclk.full += k1.full;
2482
2483 mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff);
2484 mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff);
2485
2486 /*
2487 HW cursor time assuming worst case of full size colour cursor.
2488 */
2489 temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
2490 temp_ff.full += trcd_ff.full;
2491 if (temp_ff.full < tras_ff.full)
2492 temp_ff.full = tras_ff.full;
2493 cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff);
2494
2495 temp_ff.full = rfixed_const(cur_size);
2496 cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff);
2497 /*
2498 Find the total latency for the display data.
2499 */
2500 disp_latency_overhead.full = rfixed_const(80);
2501 disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff);
2502 mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
2503 mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
2504
2505 if (mc_latency_mclk.full > mc_latency_sclk.full)
2506 disp_latency.full = mc_latency_mclk.full;
2507 else
2508 disp_latency.full = mc_latency_sclk.full;
2509
2510 /* setup Max GRPH_STOP_REQ default value */
2511 if (ASIC_IS_RV100(rdev))
2512 max_stop_req = 0x5c;
2513 else
2514 max_stop_req = 0x7c;
2515
2516 if (mode1) {
2517 /* CRTC1
2518 Set GRPH_BUFFER_CNTL register using h/w defined optimal values.
2519 GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ]
2520 */
2521 stop_req = mode1->hdisplay * pixel_bytes1 / 16;
2522
2523 if (stop_req > max_stop_req)
2524 stop_req = max_stop_req;
2525
2526 /*
2527 Find the drain rate of the display buffer.
2528 */
2529 temp_ff.full = rfixed_const((16/pixel_bytes1));
2530 disp_drain_rate.full = rfixed_div(pix_clk, temp_ff);
2531
2532 /*
2533 Find the critical point of the display buffer.
2534 */
2535 crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency);
2536 crit_point_ff.full += rfixed_const_half(0);
2537
2538 critical_point = rfixed_trunc(crit_point_ff);
2539
2540 if (rdev->disp_priority == 2) {
2541 critical_point = 0;
2542 }
2543
2544 /*
2545 The critical point should never be above max_stop_req-4. Setting
2546 GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time.
2547 */
2548 if (max_stop_req - critical_point < 4)
2549 critical_point = 0;
2550
2551 if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) {
2552 /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/
2553 critical_point = 0x10;
2554 }
2555
2556 temp = RREG32(RADEON_GRPH_BUFFER_CNTL);
2557 temp &= ~(RADEON_GRPH_STOP_REQ_MASK);
2558 temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
2559 temp &= ~(RADEON_GRPH_START_REQ_MASK);
2560 if ((rdev->family == CHIP_R350) &&
2561 (stop_req > 0x15)) {
2562 stop_req -= 0x10;
2563 }
2564 temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
2565 temp |= RADEON_GRPH_BUFFER_SIZE;
2566 temp &= ~(RADEON_GRPH_CRITICAL_CNTL |
2567 RADEON_GRPH_CRITICAL_AT_SOF |
2568 RADEON_GRPH_STOP_CNTL);
2569 /*
2570 Write the result into the register.
2571 */
2572 WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
2573 (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
2574
2575#if 0
2576 if ((rdev->family == CHIP_RS400) ||
2577 (rdev->family == CHIP_RS480)) {
2578 /* attempt to program RS400 disp regs correctly ??? */
2579 temp = RREG32(RS400_DISP1_REG_CNTL);
2580 temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK |
2581 RS400_DISP1_STOP_REQ_LEVEL_MASK);
2582 WREG32(RS400_DISP1_REQ_CNTL1, (temp |
2583 (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
2584 (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
2585 temp = RREG32(RS400_DMIF_MEM_CNTL1);
2586 temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK |
2587 RS400_DISP1_CRITICAL_POINT_STOP_MASK);
2588 WREG32(RS400_DMIF_MEM_CNTL1, (temp |
2589 (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) |
2590 (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT)));
2591 }
2592#endif
2593
2594 DRM_DEBUG("GRPH_BUFFER_CNTL from to %x\n",
2595 /* (unsigned int)info->SavedReg->grph_buffer_cntl, */
2596 (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL));
2597 }
2598
2599 if (mode2) {
2600 u32 grph2_cntl;
2601 stop_req = mode2->hdisplay * pixel_bytes2 / 16;
2602
2603 if (stop_req > max_stop_req)
2604 stop_req = max_stop_req;
2605
2606 /*
2607 Find the drain rate of the display buffer.
2608 */
2609 temp_ff.full = rfixed_const((16/pixel_bytes2));
2610 disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff);
2611
2612 grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
2613 grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
2614 grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
2615 grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK);
2616 if ((rdev->family == CHIP_R350) &&
2617 (stop_req > 0x15)) {
2618 stop_req -= 0x10;
2619 }
2620 grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
2621 grph2_cntl |= RADEON_GRPH_BUFFER_SIZE;
2622 grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL |
2623 RADEON_GRPH_CRITICAL_AT_SOF |
2624 RADEON_GRPH_STOP_CNTL);
2625
2626 if ((rdev->family == CHIP_RS100) ||
2627 (rdev->family == CHIP_RS200))
2628 critical_point2 = 0;
2629 else {
2630 temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
2631 temp_ff.full = rfixed_const(temp);
2632 temp_ff.full = rfixed_mul(mclk_ff, temp_ff);
2633 if (sclk_ff.full < temp_ff.full)
2634 temp_ff.full = sclk_ff.full;
2635
2636 read_return_rate.full = temp_ff.full;
2637
2638 if (mode1) {
2639 temp_ff.full = read_return_rate.full - disp_drain_rate.full;
2640 time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff);
2641 } else {
2642 time_disp1_drop_priority.full = 0;
2643 }
2644 crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
2645 crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2);
2646 crit_point_ff.full += rfixed_const_half(0);
2647
2648 critical_point2 = rfixed_trunc(crit_point_ff);
2649
2650 if (rdev->disp_priority == 2) {
2651 critical_point2 = 0;
2652 }
2653
2654 if (max_stop_req - critical_point2 < 4)
2655 critical_point2 = 0;
2656
2657 }
2658
2659 if (critical_point2 == 0 && rdev->family == CHIP_R300) {
2660 /* some R300 cards have problem with this set to 0 */
2661 critical_point2 = 0x10;
2662 }
2663
2664 WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
2665 (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
2666
2667 if ((rdev->family == CHIP_RS400) ||
2668 (rdev->family == CHIP_RS480)) {
2669#if 0
2670 /* attempt to program RS400 disp2 regs correctly ??? */
2671 temp = RREG32(RS400_DISP2_REQ_CNTL1);
2672 temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK |
2673 RS400_DISP2_STOP_REQ_LEVEL_MASK);
2674 WREG32(RS400_DISP2_REQ_CNTL1, (temp |
2675 (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
2676 (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
2677 temp = RREG32(RS400_DISP2_REQ_CNTL2);
2678 temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK |
2679 RS400_DISP2_CRITICAL_POINT_STOP_MASK);
2680 WREG32(RS400_DISP2_REQ_CNTL2, (temp |
2681 (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) |
2682 (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT)));
2683#endif
2684 WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC);
2685 WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000);
2686 WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC);
2687 WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC);
2688 }
2689
2690 DRM_DEBUG("GRPH2_BUFFER_CNTL from to %x\n",
2691 (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
2692 }
2693}
551ebd83
DA
2694
2695static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
2696{
2697 DRM_ERROR("pitch %d\n", t->pitch);
2698 DRM_ERROR("width %d\n", t->width);
2699 DRM_ERROR("height %d\n", t->height);
2700 DRM_ERROR("num levels %d\n", t->num_levels);
2701 DRM_ERROR("depth %d\n", t->txdepth);
2702 DRM_ERROR("bpp %d\n", t->cpp);
2703 DRM_ERROR("coordinate type %d\n", t->tex_coord_type);
2704 DRM_ERROR("width round to power of 2 %d\n", t->roundup_w);
2705 DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
2706}
2707
2708static int r100_cs_track_cube(struct radeon_device *rdev,
2709 struct r100_cs_track *track, unsigned idx)
2710{
2711 unsigned face, w, h;
2712 struct radeon_object *cube_robj;
2713 unsigned long size;
2714
2715 for (face = 0; face < 5; face++) {
2716 cube_robj = track->textures[idx].cube_info[face].robj;
2717 w = track->textures[idx].cube_info[face].width;
2718 h = track->textures[idx].cube_info[face].height;
2719
2720 size = w * h;
2721 size *= track->textures[idx].cpp;
2722
2723 size += track->textures[idx].cube_info[face].offset;
2724
2725 if (size > radeon_object_size(cube_robj)) {
2726 DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
2727 size, radeon_object_size(cube_robj));
2728 r100_cs_track_texture_print(&track->textures[idx]);
2729 return -1;
2730 }
2731 }
2732 return 0;
2733}
2734
2735static int r100_cs_track_texture_check(struct radeon_device *rdev,
2736 struct r100_cs_track *track)
2737{
2738 struct radeon_object *robj;
2739 unsigned long size;
2740 unsigned u, i, w, h;
2741 int ret;
2742
2743 for (u = 0; u < track->num_texture; u++) {
2744 if (!track->textures[u].enabled)
2745 continue;
2746 robj = track->textures[u].robj;
2747 if (robj == NULL) {
2748 DRM_ERROR("No texture bound to unit %u\n", u);
2749 return -EINVAL;
2750 }
2751 size = 0;
2752 for (i = 0; i <= track->textures[u].num_levels; i++) {
2753 if (track->textures[u].use_pitch) {
2754 if (rdev->family < CHIP_R300)
2755 w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i);
2756 else
2757 w = track->textures[u].pitch / (1 << i);
2758 } else {
2759 w = track->textures[u].width / (1 << i);
2760 if (rdev->family >= CHIP_RV515)
2761 w |= track->textures[u].width_11;
2762 if (track->textures[u].roundup_w)
2763 w = roundup_pow_of_two(w);
2764 }
2765 h = track->textures[u].height / (1 << i);
2766 if (rdev->family >= CHIP_RV515)
2767 h |= track->textures[u].height_11;
2768 if (track->textures[u].roundup_h)
2769 h = roundup_pow_of_two(h);
2770 size += w * h;
2771 }
2772 size *= track->textures[u].cpp;
2773 switch (track->textures[u].tex_coord_type) {
2774 case 0:
2775 break;
2776 case 1:
2777 size *= (1 << track->textures[u].txdepth);
2778 break;
2779 case 2:
2780 if (track->separate_cube) {
2781 ret = r100_cs_track_cube(rdev, track, u);
2782 if (ret)
2783 return ret;
2784 } else
2785 size *= 6;
2786 break;
2787 default:
2788 DRM_ERROR("Invalid texture coordinate type %u for unit "
2789 "%u\n", track->textures[u].tex_coord_type, u);
2790 return -EINVAL;
2791 }
2792 if (size > radeon_object_size(robj)) {
2793 DRM_ERROR("Texture of unit %u needs %lu bytes but is "
2794 "%lu\n", u, size, radeon_object_size(robj));
2795 r100_cs_track_texture_print(&track->textures[u]);
2796 return -EINVAL;
2797 }
2798 }
2799 return 0;
2800}
2801
2802int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
2803{
2804 unsigned i;
2805 unsigned long size;
2806 unsigned prim_walk;
2807 unsigned nverts;
2808
2809 for (i = 0; i < track->num_cb; i++) {
2810 if (track->cb[i].robj == NULL) {
2811 DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
2812 return -EINVAL;
2813 }
2814 size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
2815 size += track->cb[i].offset;
2816 if (size > radeon_object_size(track->cb[i].robj)) {
2817 DRM_ERROR("[drm] Buffer too small for color buffer %d "
2818 "(need %lu have %lu) !\n", i, size,
2819 radeon_object_size(track->cb[i].robj));
2820 DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
2821 i, track->cb[i].pitch, track->cb[i].cpp,
2822 track->cb[i].offset, track->maxy);
2823 return -EINVAL;
2824 }
2825 }
2826 if (track->z_enabled) {
2827 if (track->zb.robj == NULL) {
2828 DRM_ERROR("[drm] No buffer for z buffer !\n");
2829 return -EINVAL;
2830 }
2831 size = track->zb.pitch * track->zb.cpp * track->maxy;
2832 size += track->zb.offset;
2833 if (size > radeon_object_size(track->zb.robj)) {
2834 DRM_ERROR("[drm] Buffer too small for z buffer "
2835 "(need %lu have %lu) !\n", size,
2836 radeon_object_size(track->zb.robj));
2837 DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
2838 track->zb.pitch, track->zb.cpp,
2839 track->zb.offset, track->maxy);
2840 return -EINVAL;
2841 }
2842 }
2843 prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
2844 nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
2845 switch (prim_walk) {
2846 case 1:
2847 for (i = 0; i < track->num_arrays; i++) {
2848 size = track->arrays[i].esize * track->max_indx * 4;
2849 if (track->arrays[i].robj == NULL) {
2850 DRM_ERROR("(PW %u) Vertex array %u no buffer "
2851 "bound\n", prim_walk, i);
2852 return -EINVAL;
2853 }
2854 if (size > radeon_object_size(track->arrays[i].robj)) {
2855 DRM_ERROR("(PW %u) Vertex array %u need %lu dwords "
2856 "have %lu dwords\n", prim_walk, i,
2857 size >> 2,
2858 radeon_object_size(track->arrays[i].robj) >> 2);
2859 DRM_ERROR("Max indices %u\n", track->max_indx);
2860 return -EINVAL;
2861 }
2862 }
2863 break;
2864 case 2:
2865 for (i = 0; i < track->num_arrays; i++) {
2866 size = track->arrays[i].esize * (nverts - 1) * 4;
2867 if (track->arrays[i].robj == NULL) {
2868 DRM_ERROR("(PW %u) Vertex array %u no buffer "
2869 "bound\n", prim_walk, i);
2870 return -EINVAL;
2871 }
2872 if (size > radeon_object_size(track->arrays[i].robj)) {
2873 DRM_ERROR("(PW %u) Vertex array %u need %lu dwords "
2874 "have %lu dwords\n", prim_walk, i, size >> 2,
2875 radeon_object_size(track->arrays[i].robj) >> 2);
2876 return -EINVAL;
2877 }
2878 }
2879 break;
2880 case 3:
2881 size = track->vtx_size * nverts;
2882 if (size != track->immd_dwords) {
2883 DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
2884 track->immd_dwords, size);
2885 DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
2886 nverts, track->vtx_size);
2887 return -EINVAL;
2888 }
2889 break;
2890 default:
2891 DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
2892 prim_walk);
2893 return -EINVAL;
2894 }
2895 return r100_cs_track_texture_check(rdev, track);
2896}
2897
2898void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
2899{
2900 unsigned i, face;
2901
2902 if (rdev->family < CHIP_R300) {
2903 track->num_cb = 1;
2904 if (rdev->family <= CHIP_RS200)
2905 track->num_texture = 3;
2906 else
2907 track->num_texture = 6;
2908 track->maxy = 2048;
2909 track->separate_cube = 1;
2910 } else {
2911 track->num_cb = 4;
2912 track->num_texture = 16;
2913 track->maxy = 4096;
2914 track->separate_cube = 0;
2915 }
2916
2917 for (i = 0; i < track->num_cb; i++) {
2918 track->cb[i].robj = NULL;
2919 track->cb[i].pitch = 8192;
2920 track->cb[i].cpp = 16;
2921 track->cb[i].offset = 0;
2922 }
2923 track->z_enabled = true;
2924 track->zb.robj = NULL;
2925 track->zb.pitch = 8192;
2926 track->zb.cpp = 4;
2927 track->zb.offset = 0;
2928 track->vtx_size = 0x7F;
2929 track->immd_dwords = 0xFFFFFFFFUL;
2930 track->num_arrays = 11;
2931 track->max_indx = 0x00FFFFFFUL;
2932 for (i = 0; i < track->num_arrays; i++) {
2933 track->arrays[i].robj = NULL;
2934 track->arrays[i].esize = 0x7F;
2935 }
2936 for (i = 0; i < track->num_texture; i++) {
2937 track->textures[i].pitch = 16536;
2938 track->textures[i].width = 16536;
2939 track->textures[i].height = 16536;
2940 track->textures[i].width_11 = 1 << 11;
2941 track->textures[i].height_11 = 1 << 11;
2942 track->textures[i].num_levels = 12;
2943 if (rdev->family <= CHIP_RS200) {
2944 track->textures[i].tex_coord_type = 0;
2945 track->textures[i].txdepth = 0;
2946 } else {
2947 track->textures[i].txdepth = 16;
2948 track->textures[i].tex_coord_type = 1;
2949 }
2950 track->textures[i].cpp = 64;
2951 track->textures[i].robj = NULL;
2952 /* CS IB emission code makes sure texture unit are disabled */
2953 track->textures[i].enabled = false;
2954 track->textures[i].roundup_w = true;
2955 track->textures[i].roundup_h = true;
2956 if (track->separate_cube)
2957 for (face = 0; face < 5; face++) {
2958 track->textures[i].cube_info[face].robj = NULL;
2959 track->textures[i].cube_info[face].width = 16536;
2960 track->textures[i].cube_info[face].height = 16536;
2961 track->textures[i].cube_info[face].offset = 0;
2962 }
2963 }
2964}
3ce0a23d
JG
2965
2966int r100_ring_test(struct radeon_device *rdev)
2967{
2968 uint32_t scratch;
2969 uint32_t tmp = 0;
2970 unsigned i;
2971 int r;
2972
2973 r = radeon_scratch_get(rdev, &scratch);
2974 if (r) {
2975 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
2976 return r;
2977 }
2978 WREG32(scratch, 0xCAFEDEAD);
2979 r = radeon_ring_lock(rdev, 2);
2980 if (r) {
2981 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2982 radeon_scratch_free(rdev, scratch);
2983 return r;
2984 }
2985 radeon_ring_write(rdev, PACKET0(scratch, 0));
2986 radeon_ring_write(rdev, 0xDEADBEEF);
2987 radeon_ring_unlock_commit(rdev);
2988 for (i = 0; i < rdev->usec_timeout; i++) {
2989 tmp = RREG32(scratch);
2990 if (tmp == 0xDEADBEEF) {
2991 break;
2992 }
2993 DRM_UDELAY(1);
2994 }
2995 if (i < rdev->usec_timeout) {
2996 DRM_INFO("ring test succeeded in %d usecs\n", i);
2997 } else {
2998 DRM_ERROR("radeon: ring test failed (sracth(0x%04X)=0x%08X)\n",
2999 scratch, tmp);
3000 r = -EINVAL;
3001 }
3002 radeon_scratch_free(rdev, scratch);
3003 return r;
3004}
3005
3006void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3007{
3008 radeon_ring_write(rdev, PACKET0(RADEON_CP_IB_BASE, 1));
3009 radeon_ring_write(rdev, ib->gpu_addr);
3010 radeon_ring_write(rdev, ib->length_dw);
3011}
3012
3013int r100_ib_test(struct radeon_device *rdev)
3014{
3015 struct radeon_ib *ib;
3016 uint32_t scratch;
3017 uint32_t tmp = 0;
3018 unsigned i;
3019 int r;
3020
3021 r = radeon_scratch_get(rdev, &scratch);
3022 if (r) {
3023 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
3024 return r;
3025 }
3026 WREG32(scratch, 0xCAFEDEAD);
3027 r = radeon_ib_get(rdev, &ib);
3028 if (r) {
3029 return r;
3030 }
3031 ib->ptr[0] = PACKET0(scratch, 0);
3032 ib->ptr[1] = 0xDEADBEEF;
3033 ib->ptr[2] = PACKET2(0);
3034 ib->ptr[3] = PACKET2(0);
3035 ib->ptr[4] = PACKET2(0);
3036 ib->ptr[5] = PACKET2(0);
3037 ib->ptr[6] = PACKET2(0);
3038 ib->ptr[7] = PACKET2(0);
3039 ib->length_dw = 8;
3040 r = radeon_ib_schedule(rdev, ib);
3041 if (r) {
3042 radeon_scratch_free(rdev, scratch);
3043 radeon_ib_free(rdev, &ib);
3044 return r;
3045 }
3046 r = radeon_fence_wait(ib->fence, false);
3047 if (r) {
3048 return r;
3049 }
3050 for (i = 0; i < rdev->usec_timeout; i++) {
3051 tmp = RREG32(scratch);
3052 if (tmp == 0xDEADBEEF) {
3053 break;
3054 }
3055 DRM_UDELAY(1);
3056 }
3057 if (i < rdev->usec_timeout) {
3058 DRM_INFO("ib test succeeded in %u usecs\n", i);
3059 } else {
3060 DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
3061 scratch, tmp);
3062 r = -EINVAL;
3063 }
3064 radeon_scratch_free(rdev, scratch);
3065 radeon_ib_free(rdev, &ib);
3066 return r;
3067}