]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/gpu/drm/radeon/r600.c
drm/radeon/kms: fix coherency issues on AGP cards.
[net-next-2.6.git] / drivers / gpu / drm / radeon / r600.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
3ce0a23d
JG
28#include <linux/seq_file.h>
29#include <linux/firmware.h>
30#include <linux/platform_device.h>
771fe6b9 31#include "drmP.h"
3ce0a23d 32#include "radeon_drm.h"
771fe6b9 33#include "radeon.h"
3ce0a23d 34#include "radeon_mode.h"
3ce0a23d 35#include "r600d.h"
3ce0a23d 36#include "atom.h"
d39c3b89 37#include "avivod.h"
771fe6b9 38
3ce0a23d
JG
39#define PFP_UCODE_SIZE 576
40#define PM4_UCODE_SIZE 1792
41#define R700_PFP_UCODE_SIZE 848
42#define R700_PM4_UCODE_SIZE 1360
43
44/* Firmware Names */
45MODULE_FIRMWARE("radeon/R600_pfp.bin");
46MODULE_FIRMWARE("radeon/R600_me.bin");
47MODULE_FIRMWARE("radeon/RV610_pfp.bin");
48MODULE_FIRMWARE("radeon/RV610_me.bin");
49MODULE_FIRMWARE("radeon/RV630_pfp.bin");
50MODULE_FIRMWARE("radeon/RV630_me.bin");
51MODULE_FIRMWARE("radeon/RV620_pfp.bin");
52MODULE_FIRMWARE("radeon/RV620_me.bin");
53MODULE_FIRMWARE("radeon/RV635_pfp.bin");
54MODULE_FIRMWARE("radeon/RV635_me.bin");
55MODULE_FIRMWARE("radeon/RV670_pfp.bin");
56MODULE_FIRMWARE("radeon/RV670_me.bin");
57MODULE_FIRMWARE("radeon/RS780_pfp.bin");
58MODULE_FIRMWARE("radeon/RS780_me.bin");
59MODULE_FIRMWARE("radeon/RV770_pfp.bin");
60MODULE_FIRMWARE("radeon/RV770_me.bin");
61MODULE_FIRMWARE("radeon/RV730_pfp.bin");
62MODULE_FIRMWARE("radeon/RV730_me.bin");
63MODULE_FIRMWARE("radeon/RV710_pfp.bin");
64MODULE_FIRMWARE("radeon/RV710_me.bin");
65
66int r600_debugfs_mc_info_init(struct radeon_device *rdev);
771fe6b9 67
1a029b76 68/* r600,rv610,rv630,rv620,rv635,rv670 */
771fe6b9
JG
69int r600_mc_wait_for_idle(struct radeon_device *rdev);
70void r600_gpu_init(struct radeon_device *rdev);
3ce0a23d 71void r600_fini(struct radeon_device *rdev);
771fe6b9 72
771fe6b9 73/*
3ce0a23d 74 * R600 PCIE GART
771fe6b9 75 */
3ce0a23d 76int r600_gart_clear_page(struct radeon_device *rdev, int i)
771fe6b9 77{
3ce0a23d
JG
78 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
79 u64 pte;
771fe6b9 80
3ce0a23d
JG
81 if (i < 0 || i > rdev->gart.num_gpu_pages)
82 return -EINVAL;
83 pte = 0;
84 writeq(pte, ((void __iomem *)ptr) + (i * 8));
85 return 0;
86}
771fe6b9 87
3ce0a23d
JG
88void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
89{
90 unsigned i;
91 u32 tmp;
92
93 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
94 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
95 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
96 for (i = 0; i < rdev->usec_timeout; i++) {
97 /* read MC_STATUS */
98 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
99 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
100 if (tmp == 2) {
101 printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
102 return;
103 }
104 if (tmp) {
105 return;
106 }
107 udelay(1);
108 }
109}
110
4aac0473 111int r600_pcie_gart_init(struct radeon_device *rdev)
3ce0a23d 112{
4aac0473 113 int r;
3ce0a23d 114
4aac0473
JG
115 if (rdev->gart.table.vram.robj) {
116 WARN(1, "R600 PCIE GART already initialized.\n");
117 return 0;
118 }
3ce0a23d
JG
119 /* Initialize common gart structure */
120 r = radeon_gart_init(rdev);
4aac0473 121 if (r)
3ce0a23d 122 return r;
3ce0a23d 123 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
4aac0473
JG
124 return radeon_gart_table_vram_alloc(rdev);
125}
126
127int r600_pcie_gart_enable(struct radeon_device *rdev)
128{
129 u32 tmp;
130 int r, i;
131
132 if (rdev->gart.table.vram.robj == NULL) {
133 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
134 return -EINVAL;
771fe6b9 135 }
4aac0473
JG
136 r = radeon_gart_table_vram_pin(rdev);
137 if (r)
138 return r;
bc1a631e 139
3ce0a23d
JG
140 /* Setup L2 cache */
141 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
142 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
143 EFFECTIVE_L2_QUEUE_SIZE(7));
144 WREG32(VM_L2_CNTL2, 0);
145 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
146 /* Setup TLB control */
147 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
148 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
149 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
150 ENABLE_WAIT_L2_QUERY;
151 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
152 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
153 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
154 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
155 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
156 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
157 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
158 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
159 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
160 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
161 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
162 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
163 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
164 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
165 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
1a029b76 166 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
3ce0a23d
JG
167 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
168 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
169 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
170 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
171 (u32)(rdev->dummy_page.addr >> 12));
172 for (i = 1; i < 7; i++)
173 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
771fe6b9 174
3ce0a23d
JG
175 r600_pcie_gart_tlb_flush(rdev);
176 rdev->gart.ready = true;
771fe6b9
JG
177 return 0;
178}
179
3ce0a23d 180void r600_pcie_gart_disable(struct radeon_device *rdev)
771fe6b9 181{
3ce0a23d
JG
182 u32 tmp;
183 int i;
771fe6b9 184
3ce0a23d
JG
185 /* Disable all tables */
186 for (i = 0; i < 7; i++)
187 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
771fe6b9 188
3ce0a23d
JG
189 /* Disable L2 cache */
190 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
191 EFFECTIVE_L2_QUEUE_SIZE(7));
192 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
193 /* Setup L1 TLB control */
194 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
195 ENABLE_WAIT_L2_QUERY;
196 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
197 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
198 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
199 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
200 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
201 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
202 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
203 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
204 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
205 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
206 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
207 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
208 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
209 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
4aac0473
JG
210 if (rdev->gart.table.vram.robj) {
211 radeon_object_kunmap(rdev->gart.table.vram.robj);
212 radeon_object_unpin(rdev->gart.table.vram.robj);
213 }
214}
215
216void r600_pcie_gart_fini(struct radeon_device *rdev)
217{
218 r600_pcie_gart_disable(rdev);
219 radeon_gart_table_vram_free(rdev);
220 radeon_gart_fini(rdev);
771fe6b9
JG
221}
222
1a029b76
JG
223void r600_agp_enable(struct radeon_device *rdev)
224{
225 u32 tmp;
226 int i;
227
228 /* Setup L2 cache */
229 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
230 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
231 EFFECTIVE_L2_QUEUE_SIZE(7));
232 WREG32(VM_L2_CNTL2, 0);
233 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
234 /* Setup TLB control */
235 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
236 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
237 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
238 ENABLE_WAIT_L2_QUERY;
239 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
240 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
241 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
242 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
243 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
244 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
245 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
246 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
247 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
248 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
249 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
250 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
251 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
252 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
253 for (i = 0; i < 7; i++)
254 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
255}
256
771fe6b9
JG
257int r600_mc_wait_for_idle(struct radeon_device *rdev)
258{
3ce0a23d
JG
259 unsigned i;
260 u32 tmp;
261
262 for (i = 0; i < rdev->usec_timeout; i++) {
263 /* read MC_STATUS */
264 tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
265 if (!tmp)
266 return 0;
267 udelay(1);
268 }
269 return -1;
771fe6b9
JG
270}
271
a3c1945a 272static void r600_mc_program(struct radeon_device *rdev)
771fe6b9 273{
a3c1945a 274 struct rv515_mc_save save;
3ce0a23d
JG
275 u32 tmp;
276 int i, j;
771fe6b9 277
3ce0a23d
JG
278 /* Initialize HDP */
279 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
280 WREG32((0x2c14 + j), 0x00000000);
281 WREG32((0x2c18 + j), 0x00000000);
282 WREG32((0x2c1c + j), 0x00000000);
283 WREG32((0x2c20 + j), 0x00000000);
284 WREG32((0x2c24 + j), 0x00000000);
285 }
286 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
771fe6b9 287
a3c1945a 288 rv515_mc_stop(rdev, &save);
3ce0a23d 289 if (r600_mc_wait_for_idle(rdev)) {
a3c1945a 290 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
3ce0a23d 291 }
a3c1945a 292 /* Lockout access through VGA aperture (doesn't exist before R600) */
3ce0a23d 293 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
3ce0a23d 294 /* Update configuration */
1a029b76
JG
295 if (rdev->flags & RADEON_IS_AGP) {
296 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
297 /* VRAM before AGP */
298 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
299 rdev->mc.vram_start >> 12);
300 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
301 rdev->mc.gtt_end >> 12);
302 } else {
303 /* VRAM after AGP */
304 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
305 rdev->mc.gtt_start >> 12);
306 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
307 rdev->mc.vram_end >> 12);
308 }
309 } else {
310 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
311 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
312 }
3ce0a23d 313 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
1a029b76 314 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
3ce0a23d
JG
315 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
316 WREG32(MC_VM_FB_LOCATION, tmp);
317 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
318 WREG32(HDP_NONSURFACE_INFO, (2 << 7));
1a029b76 319 WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF);
3ce0a23d 320 if (rdev->flags & RADEON_IS_AGP) {
1a029b76
JG
321 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
322 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
3ce0a23d
JG
323 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
324 } else {
325 WREG32(MC_VM_AGP_BASE, 0);
326 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
327 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
328 }
3ce0a23d 329 if (r600_mc_wait_for_idle(rdev)) {
a3c1945a 330 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
3ce0a23d 331 }
a3c1945a 332 rv515_mc_resume(rdev, &save);
698443d9
DA
333 /* we need to own VRAM, so turn off the VGA renderer here
334 * to stop it overwriting our objects */
d39c3b89 335 rv515_vga_render_disable(rdev);
3ce0a23d
JG
336}
337
338int r600_mc_init(struct radeon_device *rdev)
771fe6b9 339{
3ce0a23d
JG
340 fixed20_12 a;
341 u32 tmp;
5885b7a9 342 int chansize, numchan;
3ce0a23d 343 int r;
771fe6b9 344
3ce0a23d 345 /* Get VRAM informations */
771fe6b9 346 rdev->mc.vram_is_ddr = true;
3ce0a23d
JG
347 tmp = RREG32(RAMCFG);
348 if (tmp & CHANSIZE_OVERRIDE) {
771fe6b9 349 chansize = 16;
3ce0a23d 350 } else if (tmp & CHANSIZE_MASK) {
771fe6b9
JG
351 chansize = 64;
352 } else {
353 chansize = 32;
354 }
5885b7a9
AD
355 tmp = RREG32(CHMAP);
356 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
357 case 0:
358 default:
359 numchan = 1;
360 break;
361 case 1:
362 numchan = 2;
363 break;
364 case 2:
365 numchan = 4;
366 break;
367 case 3:
368 numchan = 8;
369 break;
771fe6b9 370 }
5885b7a9 371 rdev->mc.vram_width = numchan * chansize;
3ce0a23d
JG
372 /* Could aper size report 0 ? */
373 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
374 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
375 /* Setup GPU memory space */
376 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
377 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
974b16e3
AD
378
379 if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
380 rdev->mc.mc_vram_size = rdev->mc.aper_size;
381
382 if (rdev->mc.real_vram_size > rdev->mc.aper_size)
383 rdev->mc.real_vram_size = rdev->mc.aper_size;
384
3ce0a23d
JG
385 if (rdev->flags & RADEON_IS_AGP) {
386 r = radeon_agp_init(rdev);
387 if (r)
388 return r;
389 /* gtt_size is setup by radeon_agp_init */
390 rdev->mc.gtt_location = rdev->mc.agp_base;
391 tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
392 /* Try to put vram before or after AGP because we
393 * we want SYSTEM_APERTURE to cover both VRAM and
394 * AGP so that GPU can catch out of VRAM/AGP access
395 */
396 if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
397 /* Enought place before */
398 rdev->mc.vram_location = rdev->mc.gtt_location -
399 rdev->mc.mc_vram_size;
400 } else if (tmp > rdev->mc.mc_vram_size) {
401 /* Enought place after */
402 rdev->mc.vram_location = rdev->mc.gtt_location +
403 rdev->mc.gtt_size;
404 } else {
405 /* Try to setup VRAM then AGP might not
406 * not work on some card
407 */
408 rdev->mc.vram_location = 0x00000000UL;
409 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
410 }
411 } else {
412 if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
413 rdev->mc.vram_location = (RREG32(MC_VM_FB_LOCATION) &
414 0xFFFF) << 24;
415 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
416 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
417 if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
418 /* Enough place after vram */
419 rdev->mc.gtt_location = tmp;
420 } else if (rdev->mc.vram_location >= rdev->mc.gtt_size) {
421 /* Enough place before vram */
422 rdev->mc.gtt_location = 0;
423 } else {
424 /* Not enough place after or before shrink
425 * gart size
426 */
427 if (rdev->mc.vram_location > (0xFFFFFFFFUL - tmp)) {
428 rdev->mc.gtt_location = 0;
429 rdev->mc.gtt_size = rdev->mc.vram_location;
430 } else {
431 rdev->mc.gtt_location = tmp;
432 rdev->mc.gtt_size = 0xFFFFFFFFUL - tmp;
433 }
434 }
435 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
436 } else {
437 rdev->mc.vram_location = 0x00000000UL;
438 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
439 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
440 }
441 }
442 rdev->mc.vram_start = rdev->mc.vram_location;
1a029b76 443 rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
3ce0a23d 444 rdev->mc.gtt_start = rdev->mc.gtt_location;
1a029b76 445 rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
3ce0a23d
JG
446 /* FIXME: we should enforce default clock in case GPU is not in
447 * default setup
448 */
449 a.full = rfixed_const(100);
450 rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
451 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
452 return 0;
771fe6b9
JG
453}
454
3ce0a23d
JG
455/* We doesn't check that the GPU really needs a reset we simply do the
456 * reset, it's up to the caller to determine if the GPU needs one. We
457 * might add an helper function to check that.
458 */
459int r600_gpu_soft_reset(struct radeon_device *rdev)
771fe6b9 460{
a3c1945a 461 struct rv515_mc_save save;
3ce0a23d
JG
462 u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
463 S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
464 S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
465 S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
466 S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
467 S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
468 S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
469 S_008010_GUI_ACTIVE(1);
470 u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
471 S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
472 S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
473 S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
474 S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
475 S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
476 S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
477 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
478 u32 srbm_reset = 0;
a3c1945a 479 u32 tmp;
771fe6b9 480
1a029b76
JG
481 dev_info(rdev->dev, "GPU softreset \n");
482 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
483 RREG32(R_008010_GRBM_STATUS));
484 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
a3c1945a 485 RREG32(R_008014_GRBM_STATUS2));
1a029b76
JG
486 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
487 RREG32(R_000E50_SRBM_STATUS));
a3c1945a
JG
488 rv515_mc_stop(rdev, &save);
489 if (r600_mc_wait_for_idle(rdev)) {
490 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
491 }
3ce0a23d
JG
492 /* Disable CP parsing/prefetching */
493 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff));
494 /* Check if any of the rendering block is busy and reset it */
495 if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
496 (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
a3c1945a 497 tmp = S_008020_SOFT_RESET_CR(1) |
3ce0a23d
JG
498 S_008020_SOFT_RESET_DB(1) |
499 S_008020_SOFT_RESET_CB(1) |
500 S_008020_SOFT_RESET_PA(1) |
501 S_008020_SOFT_RESET_SC(1) |
502 S_008020_SOFT_RESET_SMX(1) |
503 S_008020_SOFT_RESET_SPI(1) |
504 S_008020_SOFT_RESET_SX(1) |
505 S_008020_SOFT_RESET_SH(1) |
506 S_008020_SOFT_RESET_TC(1) |
507 S_008020_SOFT_RESET_TA(1) |
508 S_008020_SOFT_RESET_VC(1) |
a3c1945a 509 S_008020_SOFT_RESET_VGT(1);
1a029b76 510 dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
a3c1945a 511 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
3ce0a23d
JG
512 (void)RREG32(R_008020_GRBM_SOFT_RESET);
513 udelay(50);
514 WREG32(R_008020_GRBM_SOFT_RESET, 0);
515 (void)RREG32(R_008020_GRBM_SOFT_RESET);
516 }
517 /* Reset CP (we always reset CP) */
a3c1945a
JG
518 tmp = S_008020_SOFT_RESET_CP(1);
519 dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
520 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
3ce0a23d
JG
521 (void)RREG32(R_008020_GRBM_SOFT_RESET);
522 udelay(50);
523 WREG32(R_008020_GRBM_SOFT_RESET, 0);
524 (void)RREG32(R_008020_GRBM_SOFT_RESET);
525 /* Reset others GPU block if necessary */
526 if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
527 srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
528 if (G_000E50_GRBM_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
529 srbm_reset |= S_000E60_SOFT_RESET_GRBM(1);
530 if (G_000E50_HI_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
531 srbm_reset |= S_000E60_SOFT_RESET_IH(1);
532 if (G_000E50_VMC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
533 srbm_reset |= S_000E60_SOFT_RESET_VMC(1);
534 if (G_000E50_MCB_BUSY(RREG32(R_000E50_SRBM_STATUS)))
535 srbm_reset |= S_000E60_SOFT_RESET_MC(1);
536 if (G_000E50_MCDZ_BUSY(RREG32(R_000E50_SRBM_STATUS)))
537 srbm_reset |= S_000E60_SOFT_RESET_MC(1);
538 if (G_000E50_MCDY_BUSY(RREG32(R_000E50_SRBM_STATUS)))
539 srbm_reset |= S_000E60_SOFT_RESET_MC(1);
540 if (G_000E50_MCDX_BUSY(RREG32(R_000E50_SRBM_STATUS)))
541 srbm_reset |= S_000E60_SOFT_RESET_MC(1);
542 if (G_000E50_MCDW_BUSY(RREG32(R_000E50_SRBM_STATUS)))
543 srbm_reset |= S_000E60_SOFT_RESET_MC(1);
544 if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
545 srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
546 if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS)))
547 srbm_reset |= S_000E60_SOFT_RESET_SEM(1);
1a029b76
JG
548 if (G_000E50_BIF_BUSY(RREG32(R_000E50_SRBM_STATUS)))
549 srbm_reset |= S_000E60_SOFT_RESET_BIF(1);
550 dev_info(rdev->dev, " R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
551 WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
552 (void)RREG32(R_000E60_SRBM_SOFT_RESET);
553 udelay(50);
554 WREG32(R_000E60_SRBM_SOFT_RESET, 0);
555 (void)RREG32(R_000E60_SRBM_SOFT_RESET);
3ce0a23d
JG
556 WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
557 (void)RREG32(R_000E60_SRBM_SOFT_RESET);
558 udelay(50);
559 WREG32(R_000E60_SRBM_SOFT_RESET, 0);
560 (void)RREG32(R_000E60_SRBM_SOFT_RESET);
561 /* Wait a little for things to settle down */
562 udelay(50);
1a029b76
JG
563 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
564 RREG32(R_008010_GRBM_STATUS));
565 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
566 RREG32(R_008014_GRBM_STATUS2));
567 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
568 RREG32(R_000E50_SRBM_STATUS));
a3c1945a
JG
569 /* After reset we need to reinit the asic as GPU often endup in an
570 * incoherent state.
571 */
572 atom_asic_init(rdev->mode_info.atom_context);
573 rv515_mc_resume(rdev, &save);
3ce0a23d
JG
574 return 0;
575}
576
577int r600_gpu_reset(struct radeon_device *rdev)
578{
579 return r600_gpu_soft_reset(rdev);
580}
581
582static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
583 u32 num_backends,
584 u32 backend_disable_mask)
585{
586 u32 backend_map = 0;
587 u32 enabled_backends_mask;
588 u32 enabled_backends_count;
589 u32 cur_pipe;
590 u32 swizzle_pipe[R6XX_MAX_PIPES];
591 u32 cur_backend;
592 u32 i;
593
594 if (num_tile_pipes > R6XX_MAX_PIPES)
595 num_tile_pipes = R6XX_MAX_PIPES;
596 if (num_tile_pipes < 1)
597 num_tile_pipes = 1;
598 if (num_backends > R6XX_MAX_BACKENDS)
599 num_backends = R6XX_MAX_BACKENDS;
600 if (num_backends < 1)
601 num_backends = 1;
602
603 enabled_backends_mask = 0;
604 enabled_backends_count = 0;
605 for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
606 if (((backend_disable_mask >> i) & 1) == 0) {
607 enabled_backends_mask |= (1 << i);
608 ++enabled_backends_count;
609 }
610 if (enabled_backends_count == num_backends)
611 break;
612 }
613
614 if (enabled_backends_count == 0) {
615 enabled_backends_mask = 1;
616 enabled_backends_count = 1;
617 }
618
619 if (enabled_backends_count != num_backends)
620 num_backends = enabled_backends_count;
621
622 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
623 switch (num_tile_pipes) {
624 case 1:
625 swizzle_pipe[0] = 0;
626 break;
627 case 2:
628 swizzle_pipe[0] = 0;
629 swizzle_pipe[1] = 1;
630 break;
631 case 3:
632 swizzle_pipe[0] = 0;
633 swizzle_pipe[1] = 1;
634 swizzle_pipe[2] = 2;
635 break;
636 case 4:
637 swizzle_pipe[0] = 0;
638 swizzle_pipe[1] = 1;
639 swizzle_pipe[2] = 2;
640 swizzle_pipe[3] = 3;
641 break;
642 case 5:
643 swizzle_pipe[0] = 0;
644 swizzle_pipe[1] = 1;
645 swizzle_pipe[2] = 2;
646 swizzle_pipe[3] = 3;
647 swizzle_pipe[4] = 4;
648 break;
649 case 6:
650 swizzle_pipe[0] = 0;
651 swizzle_pipe[1] = 2;
652 swizzle_pipe[2] = 4;
653 swizzle_pipe[3] = 5;
654 swizzle_pipe[4] = 1;
655 swizzle_pipe[5] = 3;
656 break;
657 case 7:
658 swizzle_pipe[0] = 0;
659 swizzle_pipe[1] = 2;
660 swizzle_pipe[2] = 4;
661 swizzle_pipe[3] = 6;
662 swizzle_pipe[4] = 1;
663 swizzle_pipe[5] = 3;
664 swizzle_pipe[6] = 5;
665 break;
666 case 8:
667 swizzle_pipe[0] = 0;
668 swizzle_pipe[1] = 2;
669 swizzle_pipe[2] = 4;
670 swizzle_pipe[3] = 6;
671 swizzle_pipe[4] = 1;
672 swizzle_pipe[5] = 3;
673 swizzle_pipe[6] = 5;
674 swizzle_pipe[7] = 7;
675 break;
676 }
677
678 cur_backend = 0;
679 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
680 while (((1 << cur_backend) & enabled_backends_mask) == 0)
681 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
682
683 backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
684
685 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
686 }
687
688 return backend_map;
689}
690
691int r600_count_pipe_bits(uint32_t val)
692{
693 int i, ret = 0;
694
695 for (i = 0; i < 32; i++) {
696 ret += val & 1;
697 val >>= 1;
698 }
699 return ret;
771fe6b9
JG
700}
701
3ce0a23d
JG
702void r600_gpu_init(struct radeon_device *rdev)
703{
704 u32 tiling_config;
705 u32 ramcfg;
706 u32 tmp;
707 int i, j;
708 u32 sq_config;
709 u32 sq_gpr_resource_mgmt_1 = 0;
710 u32 sq_gpr_resource_mgmt_2 = 0;
711 u32 sq_thread_resource_mgmt = 0;
712 u32 sq_stack_resource_mgmt_1 = 0;
713 u32 sq_stack_resource_mgmt_2 = 0;
714
715 /* FIXME: implement */
716 switch (rdev->family) {
717 case CHIP_R600:
718 rdev->config.r600.max_pipes = 4;
719 rdev->config.r600.max_tile_pipes = 8;
720 rdev->config.r600.max_simds = 4;
721 rdev->config.r600.max_backends = 4;
722 rdev->config.r600.max_gprs = 256;
723 rdev->config.r600.max_threads = 192;
724 rdev->config.r600.max_stack_entries = 256;
725 rdev->config.r600.max_hw_contexts = 8;
726 rdev->config.r600.max_gs_threads = 16;
727 rdev->config.r600.sx_max_export_size = 128;
728 rdev->config.r600.sx_max_export_pos_size = 16;
729 rdev->config.r600.sx_max_export_smx_size = 128;
730 rdev->config.r600.sq_num_cf_insts = 2;
731 break;
732 case CHIP_RV630:
733 case CHIP_RV635:
734 rdev->config.r600.max_pipes = 2;
735 rdev->config.r600.max_tile_pipes = 2;
736 rdev->config.r600.max_simds = 3;
737 rdev->config.r600.max_backends = 1;
738 rdev->config.r600.max_gprs = 128;
739 rdev->config.r600.max_threads = 192;
740 rdev->config.r600.max_stack_entries = 128;
741 rdev->config.r600.max_hw_contexts = 8;
742 rdev->config.r600.max_gs_threads = 4;
743 rdev->config.r600.sx_max_export_size = 128;
744 rdev->config.r600.sx_max_export_pos_size = 16;
745 rdev->config.r600.sx_max_export_smx_size = 128;
746 rdev->config.r600.sq_num_cf_insts = 2;
747 break;
748 case CHIP_RV610:
749 case CHIP_RV620:
750 case CHIP_RS780:
751 case CHIP_RS880:
752 rdev->config.r600.max_pipes = 1;
753 rdev->config.r600.max_tile_pipes = 1;
754 rdev->config.r600.max_simds = 2;
755 rdev->config.r600.max_backends = 1;
756 rdev->config.r600.max_gprs = 128;
757 rdev->config.r600.max_threads = 192;
758 rdev->config.r600.max_stack_entries = 128;
759 rdev->config.r600.max_hw_contexts = 4;
760 rdev->config.r600.max_gs_threads = 4;
761 rdev->config.r600.sx_max_export_size = 128;
762 rdev->config.r600.sx_max_export_pos_size = 16;
763 rdev->config.r600.sx_max_export_smx_size = 128;
764 rdev->config.r600.sq_num_cf_insts = 1;
765 break;
766 case CHIP_RV670:
767 rdev->config.r600.max_pipes = 4;
768 rdev->config.r600.max_tile_pipes = 4;
769 rdev->config.r600.max_simds = 4;
770 rdev->config.r600.max_backends = 4;
771 rdev->config.r600.max_gprs = 192;
772 rdev->config.r600.max_threads = 192;
773 rdev->config.r600.max_stack_entries = 256;
774 rdev->config.r600.max_hw_contexts = 8;
775 rdev->config.r600.max_gs_threads = 16;
776 rdev->config.r600.sx_max_export_size = 128;
777 rdev->config.r600.sx_max_export_pos_size = 16;
778 rdev->config.r600.sx_max_export_smx_size = 128;
779 rdev->config.r600.sq_num_cf_insts = 2;
780 break;
781 default:
782 break;
783 }
784
785 /* Initialize HDP */
786 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
787 WREG32((0x2c14 + j), 0x00000000);
788 WREG32((0x2c18 + j), 0x00000000);
789 WREG32((0x2c1c + j), 0x00000000);
790 WREG32((0x2c20 + j), 0x00000000);
791 WREG32((0x2c24 + j), 0x00000000);
792 }
793
794 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
795
796 /* Setup tiling */
797 tiling_config = 0;
798 ramcfg = RREG32(RAMCFG);
799 switch (rdev->config.r600.max_tile_pipes) {
800 case 1:
801 tiling_config |= PIPE_TILING(0);
802 break;
803 case 2:
804 tiling_config |= PIPE_TILING(1);
805 break;
806 case 4:
807 tiling_config |= PIPE_TILING(2);
808 break;
809 case 8:
810 tiling_config |= PIPE_TILING(3);
811 break;
812 default:
813 break;
814 }
815 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
816 tiling_config |= GROUP_SIZE(0);
817 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
818 if (tmp > 3) {
819 tiling_config |= ROW_TILING(3);
820 tiling_config |= SAMPLE_SPLIT(3);
821 } else {
822 tiling_config |= ROW_TILING(tmp);
823 tiling_config |= SAMPLE_SPLIT(tmp);
824 }
825 tiling_config |= BANK_SWAPS(1);
826 tmp = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
827 rdev->config.r600.max_backends,
828 (0xff << rdev->config.r600.max_backends) & 0xff);
829 tiling_config |= BACKEND_MAP(tmp);
830 WREG32(GB_TILING_CONFIG, tiling_config);
831 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
832 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
833
834 tmp = BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
835 WREG32(CC_RB_BACKEND_DISABLE, tmp);
836
837 /* Setup pipes */
838 tmp = INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
839 tmp |= INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
840 WREG32(CC_GC_SHADER_PIPE_CONFIG, tmp);
841 WREG32(GC_USER_SHADER_PIPE_CONFIG, tmp);
842
843 tmp = R6XX_MAX_BACKENDS - r600_count_pipe_bits(tmp & INACTIVE_QD_PIPES_MASK);
844 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
845 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
846
847 /* Setup some CP states */
848 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
849 WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
850
851 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
852 SYNC_WALKER | SYNC_ALIGNER));
853 /* Setup various GPU states */
854 if (rdev->family == CHIP_RV670)
855 WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
856
857 tmp = RREG32(SX_DEBUG_1);
858 tmp |= SMX_EVENT_RELEASE;
859 if ((rdev->family > CHIP_R600))
860 tmp |= ENABLE_NEW_SMX_ADDRESS;
861 WREG32(SX_DEBUG_1, tmp);
862
863 if (((rdev->family) == CHIP_R600) ||
864 ((rdev->family) == CHIP_RV630) ||
865 ((rdev->family) == CHIP_RV610) ||
866 ((rdev->family) == CHIP_RV620) ||
867 ((rdev->family) == CHIP_RS780)) {
868 WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
869 } else {
870 WREG32(DB_DEBUG, 0);
871 }
872 WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
873 DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
874
875 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
876 WREG32(VGT_NUM_INSTANCES, 0);
877
878 WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
879 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
880
881 tmp = RREG32(SQ_MS_FIFO_SIZES);
882 if (((rdev->family) == CHIP_RV610) ||
883 ((rdev->family) == CHIP_RV620) ||
884 ((rdev->family) == CHIP_RS780)) {
885 tmp = (CACHE_FIFO_SIZE(0xa) |
886 FETCH_FIFO_HIWATER(0xa) |
887 DONE_FIFO_HIWATER(0xe0) |
888 ALU_UPDATE_FIFO_HIWATER(0x8));
889 } else if (((rdev->family) == CHIP_R600) ||
890 ((rdev->family) == CHIP_RV630)) {
891 tmp &= ~DONE_FIFO_HIWATER(0xff);
892 tmp |= DONE_FIFO_HIWATER(0x4);
893 }
894 WREG32(SQ_MS_FIFO_SIZES, tmp);
895
896 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
897 * should be adjusted as needed by the 2D/3D drivers. This just sets default values
898 */
899 sq_config = RREG32(SQ_CONFIG);
900 sq_config &= ~(PS_PRIO(3) |
901 VS_PRIO(3) |
902 GS_PRIO(3) |
903 ES_PRIO(3));
904 sq_config |= (DX9_CONSTS |
905 VC_ENABLE |
906 PS_PRIO(0) |
907 VS_PRIO(1) |
908 GS_PRIO(2) |
909 ES_PRIO(3));
910
911 if ((rdev->family) == CHIP_R600) {
912 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
913 NUM_VS_GPRS(124) |
914 NUM_CLAUSE_TEMP_GPRS(4));
915 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
916 NUM_ES_GPRS(0));
917 sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
918 NUM_VS_THREADS(48) |
919 NUM_GS_THREADS(4) |
920 NUM_ES_THREADS(4));
921 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
922 NUM_VS_STACK_ENTRIES(128));
923 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
924 NUM_ES_STACK_ENTRIES(0));
925 } else if (((rdev->family) == CHIP_RV610) ||
926 ((rdev->family) == CHIP_RV620) ||
927 ((rdev->family) == CHIP_RS780)) {
928 /* no vertex cache */
929 sq_config &= ~VC_ENABLE;
930
931 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
932 NUM_VS_GPRS(44) |
933 NUM_CLAUSE_TEMP_GPRS(2));
934 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
935 NUM_ES_GPRS(17));
936 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
937 NUM_VS_THREADS(78) |
938 NUM_GS_THREADS(4) |
939 NUM_ES_THREADS(31));
940 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
941 NUM_VS_STACK_ENTRIES(40));
942 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
943 NUM_ES_STACK_ENTRIES(16));
944 } else if (((rdev->family) == CHIP_RV630) ||
945 ((rdev->family) == CHIP_RV635)) {
946 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
947 NUM_VS_GPRS(44) |
948 NUM_CLAUSE_TEMP_GPRS(2));
949 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
950 NUM_ES_GPRS(18));
951 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
952 NUM_VS_THREADS(78) |
953 NUM_GS_THREADS(4) |
954 NUM_ES_THREADS(31));
955 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
956 NUM_VS_STACK_ENTRIES(40));
957 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
958 NUM_ES_STACK_ENTRIES(16));
959 } else if ((rdev->family) == CHIP_RV670) {
960 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
961 NUM_VS_GPRS(44) |
962 NUM_CLAUSE_TEMP_GPRS(2));
963 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
964 NUM_ES_GPRS(17));
965 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
966 NUM_VS_THREADS(78) |
967 NUM_GS_THREADS(4) |
968 NUM_ES_THREADS(31));
969 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
970 NUM_VS_STACK_ENTRIES(64));
971 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
972 NUM_ES_STACK_ENTRIES(64));
973 }
974
975 WREG32(SQ_CONFIG, sq_config);
976 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
977 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
978 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
979 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
980 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
981
982 if (((rdev->family) == CHIP_RV610) ||
983 ((rdev->family) == CHIP_RV620) ||
984 ((rdev->family) == CHIP_RS780)) {
985 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
986 } else {
987 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
988 }
989
990 /* More default values. 2D/3D driver should adjust as needed */
991 WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
992 S1_X(0x4) | S1_Y(0xc)));
993 WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
994 S1_X(0x2) | S1_Y(0x2) |
995 S2_X(0xa) | S2_Y(0x6) |
996 S3_X(0x6) | S3_Y(0xa)));
997 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
998 S1_X(0x4) | S1_Y(0xc) |
999 S2_X(0x1) | S2_Y(0x6) |
1000 S3_X(0xa) | S3_Y(0xe)));
1001 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
1002 S5_X(0x0) | S5_Y(0x0) |
1003 S6_X(0xb) | S6_Y(0x4) |
1004 S7_X(0x7) | S7_Y(0x8)));
1005
1006 WREG32(VGT_STRMOUT_EN, 0);
1007 tmp = rdev->config.r600.max_pipes * 16;
1008 switch (rdev->family) {
1009 case CHIP_RV610:
1010 case CHIP_RS780:
1011 case CHIP_RV620:
1012 tmp += 32;
1013 break;
1014 case CHIP_RV670:
1015 tmp += 128;
1016 break;
1017 default:
1018 break;
1019 }
1020 if (tmp > 256) {
1021 tmp = 256;
1022 }
1023 WREG32(VGT_ES_PER_GS, 128);
1024 WREG32(VGT_GS_PER_ES, tmp);
1025 WREG32(VGT_GS_PER_VS, 2);
1026 WREG32(VGT_GS_VERTEX_REUSE, 16);
1027
1028 /* more default values. 2D/3D driver should adjust as needed */
1029 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1030 WREG32(VGT_STRMOUT_EN, 0);
1031 WREG32(SX_MISC, 0);
1032 WREG32(PA_SC_MODE_CNTL, 0);
1033 WREG32(PA_SC_AA_CONFIG, 0);
1034 WREG32(PA_SC_LINE_STIPPLE, 0);
1035 WREG32(SPI_INPUT_Z, 0);
1036 WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
1037 WREG32(CB_COLOR7_FRAG, 0);
1038
1039 /* Clear render buffer base addresses */
1040 WREG32(CB_COLOR0_BASE, 0);
1041 WREG32(CB_COLOR1_BASE, 0);
1042 WREG32(CB_COLOR2_BASE, 0);
1043 WREG32(CB_COLOR3_BASE, 0);
1044 WREG32(CB_COLOR4_BASE, 0);
1045 WREG32(CB_COLOR5_BASE, 0);
1046 WREG32(CB_COLOR6_BASE, 0);
1047 WREG32(CB_COLOR7_BASE, 0);
1048 WREG32(CB_COLOR7_FRAG, 0);
1049
1050 switch (rdev->family) {
1051 case CHIP_RV610:
1052 case CHIP_RS780:
1053 case CHIP_RV620:
1054 tmp = TC_L2_SIZE(8);
1055 break;
1056 case CHIP_RV630:
1057 case CHIP_RV635:
1058 tmp = TC_L2_SIZE(4);
1059 break;
1060 case CHIP_R600:
1061 tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
1062 break;
1063 default:
1064 tmp = TC_L2_SIZE(0);
1065 break;
1066 }
1067 WREG32(TC_CNTL, tmp);
1068
1069 tmp = RREG32(HDP_HOST_PATH_CNTL);
1070 WREG32(HDP_HOST_PATH_CNTL, tmp);
1071
1072 tmp = RREG32(ARB_POP);
1073 tmp |= ENABLE_TC128;
1074 WREG32(ARB_POP, tmp);
1075
1076 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1077 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
1078 NUM_CLIP_SEQ(3)));
1079 WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
1080}
1081
1082
771fe6b9
JG
1083/*
1084 * Indirect registers accessor
1085 */
3ce0a23d
JG
1086u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
1087{
1088 u32 r;
1089
1090 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1091 (void)RREG32(PCIE_PORT_INDEX);
1092 r = RREG32(PCIE_PORT_DATA);
1093 return r;
1094}
1095
1096void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1097{
1098 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1099 (void)RREG32(PCIE_PORT_INDEX);
1100 WREG32(PCIE_PORT_DATA, (v));
1101 (void)RREG32(PCIE_PORT_DATA);
1102}
1103
1104
1105/*
1106 * CP & Ring
1107 */
1108void r600_cp_stop(struct radeon_device *rdev)
1109{
1110 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1111}
1112
1113int r600_cp_init_microcode(struct radeon_device *rdev)
1114{
1115 struct platform_device *pdev;
1116 const char *chip_name;
1117 size_t pfp_req_size, me_req_size;
1118 char fw_name[30];
1119 int err;
1120
1121 DRM_DEBUG("\n");
1122
1123 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
1124 err = IS_ERR(pdev);
1125 if (err) {
1126 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
1127 return -EINVAL;
1128 }
1129
1130 switch (rdev->family) {
1131 case CHIP_R600: chip_name = "R600"; break;
1132 case CHIP_RV610: chip_name = "RV610"; break;
1133 case CHIP_RV630: chip_name = "RV630"; break;
1134 case CHIP_RV620: chip_name = "RV620"; break;
1135 case CHIP_RV635: chip_name = "RV635"; break;
1136 case CHIP_RV670: chip_name = "RV670"; break;
1137 case CHIP_RS780:
1138 case CHIP_RS880: chip_name = "RS780"; break;
1139 case CHIP_RV770: chip_name = "RV770"; break;
1140 case CHIP_RV730:
1141 case CHIP_RV740: chip_name = "RV730"; break;
1142 case CHIP_RV710: chip_name = "RV710"; break;
1143 default: BUG();
1144 }
1145
1146 if (rdev->family >= CHIP_RV770) {
1147 pfp_req_size = R700_PFP_UCODE_SIZE * 4;
1148 me_req_size = R700_PM4_UCODE_SIZE * 4;
1149 } else {
1150 pfp_req_size = PFP_UCODE_SIZE * 4;
1151 me_req_size = PM4_UCODE_SIZE * 12;
1152 }
1153
1154 DRM_INFO("Loading %s CP Microcode\n", chip_name);
1155
1156 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
1157 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
1158 if (err)
1159 goto out;
1160 if (rdev->pfp_fw->size != pfp_req_size) {
1161 printk(KERN_ERR
1162 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1163 rdev->pfp_fw->size, fw_name);
1164 err = -EINVAL;
1165 goto out;
1166 }
1167
1168 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
1169 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
1170 if (err)
1171 goto out;
1172 if (rdev->me_fw->size != me_req_size) {
1173 printk(KERN_ERR
1174 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1175 rdev->me_fw->size, fw_name);
1176 err = -EINVAL;
1177 }
1178out:
1179 platform_device_unregister(pdev);
1180
1181 if (err) {
1182 if (err != -EINVAL)
1183 printk(KERN_ERR
1184 "r600_cp: Failed to load firmware \"%s\"\n",
1185 fw_name);
1186 release_firmware(rdev->pfp_fw);
1187 rdev->pfp_fw = NULL;
1188 release_firmware(rdev->me_fw);
1189 rdev->me_fw = NULL;
1190 }
1191 return err;
1192}
1193
1194static int r600_cp_load_microcode(struct radeon_device *rdev)
1195{
1196 const __be32 *fw_data;
1197 int i;
1198
1199 if (!rdev->me_fw || !rdev->pfp_fw)
1200 return -EINVAL;
1201
1202 r600_cp_stop(rdev);
1203
1204 WREG32(CP_RB_CNTL, RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
1205
1206 /* Reset cp */
1207 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
1208 RREG32(GRBM_SOFT_RESET);
1209 mdelay(15);
1210 WREG32(GRBM_SOFT_RESET, 0);
1211
1212 WREG32(CP_ME_RAM_WADDR, 0);
1213
1214 fw_data = (const __be32 *)rdev->me_fw->data;
1215 WREG32(CP_ME_RAM_WADDR, 0);
1216 for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
1217 WREG32(CP_ME_RAM_DATA,
1218 be32_to_cpup(fw_data++));
1219
1220 fw_data = (const __be32 *)rdev->pfp_fw->data;
1221 WREG32(CP_PFP_UCODE_ADDR, 0);
1222 for (i = 0; i < PFP_UCODE_SIZE; i++)
1223 WREG32(CP_PFP_UCODE_DATA,
1224 be32_to_cpup(fw_data++));
1225
1226 WREG32(CP_PFP_UCODE_ADDR, 0);
1227 WREG32(CP_ME_RAM_WADDR, 0);
1228 WREG32(CP_ME_RAM_RADDR, 0);
1229 return 0;
1230}
1231
1232int r600_cp_start(struct radeon_device *rdev)
1233{
1234 int r;
1235 uint32_t cp_me;
1236
1237 r = radeon_ring_lock(rdev, 7);
1238 if (r) {
1239 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1240 return r;
1241 }
1242 radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
1243 radeon_ring_write(rdev, 0x1);
1244 if (rdev->family < CHIP_RV770) {
1245 radeon_ring_write(rdev, 0x3);
1246 radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
1247 } else {
1248 radeon_ring_write(rdev, 0x0);
1249 radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
1250 }
1251 radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1252 radeon_ring_write(rdev, 0);
1253 radeon_ring_write(rdev, 0);
1254 radeon_ring_unlock_commit(rdev);
1255
1256 cp_me = 0xff;
1257 WREG32(R_0086D8_CP_ME_CNTL, cp_me);
1258 return 0;
1259}
1260
1261int r600_cp_resume(struct radeon_device *rdev)
1262{
1263 u32 tmp;
1264 u32 rb_bufsz;
1265 int r;
1266
1267 /* Reset cp */
1268 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
1269 RREG32(GRBM_SOFT_RESET);
1270 mdelay(15);
1271 WREG32(GRBM_SOFT_RESET, 0);
1272
1273 /* Set ring buffer size */
1274 rb_bufsz = drm_order(rdev->cp.ring_size / 8);
1275#ifdef __BIG_ENDIAN
1276 WREG32(CP_RB_CNTL, BUF_SWAP_32BIT | RB_NO_UPDATE |
a77f1718 1277 (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz);
3ce0a23d 1278#else
a77f1718 1279 WREG32(CP_RB_CNTL, RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz);
3ce0a23d
JG
1280#endif
1281 WREG32(CP_SEM_WAIT_TIMER, 0x4);
1282
1283 /* Set the write pointer delay */
1284 WREG32(CP_RB_WPTR_DELAY, 0);
1285
1286 /* Initialize the ring buffer's read and write pointers */
1287 tmp = RREG32(CP_RB_CNTL);
1288 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
1289 WREG32(CP_RB_RPTR_WR, 0);
1290 WREG32(CP_RB_WPTR, 0);
1291 WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
1292 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr));
1293 mdelay(1);
1294 WREG32(CP_RB_CNTL, tmp);
1295
1296 WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
1297 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
1298
1299 rdev->cp.rptr = RREG32(CP_RB_RPTR);
1300 rdev->cp.wptr = RREG32(CP_RB_WPTR);
1301
1302 r600_cp_start(rdev);
1303 rdev->cp.ready = true;
1304 r = radeon_ring_test(rdev);
1305 if (r) {
1306 rdev->cp.ready = false;
1307 return r;
1308 }
1309 return 0;
1310}
1311
1312void r600_cp_commit(struct radeon_device *rdev)
1313{
1314 WREG32(CP_RB_WPTR, rdev->cp.wptr);
1315 (void)RREG32(CP_RB_WPTR);
1316}
1317
1318void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
1319{
1320 u32 rb_bufsz;
1321
1322 /* Align ring size */
1323 rb_bufsz = drm_order(ring_size / 8);
1324 ring_size = (1 << (rb_bufsz + 1)) * 4;
1325 rdev->cp.ring_size = ring_size;
1326 rdev->cp.align_mask = 16 - 1;
1327}
1328
1329
1330/*
1331 * GPU scratch registers helpers function.
1332 */
1333void r600_scratch_init(struct radeon_device *rdev)
1334{
1335 int i;
1336
1337 rdev->scratch.num_reg = 7;
1338 for (i = 0; i < rdev->scratch.num_reg; i++) {
1339 rdev->scratch.free[i] = true;
1340 rdev->scratch.reg[i] = SCRATCH_REG0 + (i * 4);
1341 }
1342}
1343
1344int r600_ring_test(struct radeon_device *rdev)
1345{
1346 uint32_t scratch;
1347 uint32_t tmp = 0;
1348 unsigned i;
1349 int r;
1350
1351 r = radeon_scratch_get(rdev, &scratch);
1352 if (r) {
1353 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
1354 return r;
1355 }
1356 WREG32(scratch, 0xCAFEDEAD);
1357 r = radeon_ring_lock(rdev, 3);
1358 if (r) {
1359 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1360 radeon_scratch_free(rdev, scratch);
1361 return r;
1362 }
1363 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1364 radeon_ring_write(rdev, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
1365 radeon_ring_write(rdev, 0xDEADBEEF);
1366 radeon_ring_unlock_commit(rdev);
1367 for (i = 0; i < rdev->usec_timeout; i++) {
1368 tmp = RREG32(scratch);
1369 if (tmp == 0xDEADBEEF)
1370 break;
1371 DRM_UDELAY(1);
1372 }
1373 if (i < rdev->usec_timeout) {
1374 DRM_INFO("ring test succeeded in %d usecs\n", i);
1375 } else {
1376 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
1377 scratch, tmp);
1378 r = -EINVAL;
1379 }
1380 radeon_scratch_free(rdev, scratch);
1381 return r;
1382}
1383
81cc35bf
JG
1384void r600_wb_disable(struct radeon_device *rdev)
1385{
1386 WREG32(SCRATCH_UMSK, 0);
1387 if (rdev->wb.wb_obj) {
1388 radeon_object_kunmap(rdev->wb.wb_obj);
1389 radeon_object_unpin(rdev->wb.wb_obj);
1390 }
1391}
1392
1393void r600_wb_fini(struct radeon_device *rdev)
1394{
1395 r600_wb_disable(rdev);
1396 if (rdev->wb.wb_obj) {
1397 radeon_object_unref(&rdev->wb.wb_obj);
1398 rdev->wb.wb = NULL;
1399 rdev->wb.wb_obj = NULL;
1400 }
1401}
1402
1403int r600_wb_enable(struct radeon_device *rdev)
3ce0a23d
JG
1404{
1405 int r;
1406
1407 if (rdev->wb.wb_obj == NULL) {
a77f1718 1408 r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
81cc35bf 1409 RADEON_GEM_DOMAIN_GTT, false, &rdev->wb.wb_obj);
3ce0a23d 1410 if (r) {
81cc35bf 1411 dev_warn(rdev->dev, "failed to create WB buffer (%d).\n", r);
3ce0a23d
JG
1412 return r;
1413 }
81cc35bf
JG
1414 r = radeon_object_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
1415 &rdev->wb.gpu_addr);
3ce0a23d 1416 if (r) {
81cc35bf
JG
1417 dev_warn(rdev->dev, "failed to pin WB buffer (%d).\n", r);
1418 r600_wb_fini(rdev);
3ce0a23d
JG
1419 return r;
1420 }
1421 r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
1422 if (r) {
81cc35bf
JG
1423 dev_warn(rdev->dev, "failed to map WB buffer (%d).\n", r);
1424 r600_wb_fini(rdev);
3ce0a23d
JG
1425 return r;
1426 }
1427 }
1428 WREG32(SCRATCH_ADDR, (rdev->wb.gpu_addr >> 8) & 0xFFFFFFFF);
1429 WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + 1024) & 0xFFFFFFFC);
1430 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + 1024) & 0xFF);
1431 WREG32(SCRATCH_UMSK, 0xff);
1432 return 0;
1433}
1434
3ce0a23d
JG
1435void r600_fence_ring_emit(struct radeon_device *rdev,
1436 struct radeon_fence *fence)
1437{
1438 /* Emit fence sequence & fire IRQ */
1439 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1440 radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
1441 radeon_ring_write(rdev, fence->seq);
1442}
1443
1444int r600_copy_dma(struct radeon_device *rdev,
1445 uint64_t src_offset,
1446 uint64_t dst_offset,
1447 unsigned num_pages,
1448 struct radeon_fence *fence)
1449{
1450 /* FIXME: implement */
1451 return 0;
1452}
1453
1454int r600_copy_blit(struct radeon_device *rdev,
1455 uint64_t src_offset, uint64_t dst_offset,
1456 unsigned num_pages, struct radeon_fence *fence)
1457{
a77f1718
MT
1458 r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
1459 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
3ce0a23d
JG
1460 r600_blit_done_copy(rdev, fence);
1461 return 0;
1462}
1463
1464int r600_irq_process(struct radeon_device *rdev)
1465{
1466 /* FIXME: implement */
1467 return 0;
1468}
1469
1470int r600_irq_set(struct radeon_device *rdev)
1471{
1472 /* FIXME: implement */
1473 return 0;
1474}
1475
1476int r600_set_surface_reg(struct radeon_device *rdev, int reg,
1477 uint32_t tiling_flags, uint32_t pitch,
1478 uint32_t offset, uint32_t obj_size)
1479{
1480 /* FIXME: implement */
1481 return 0;
1482}
1483
1484void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
1485{
1486 /* FIXME: implement */
1487}
1488
1489
1490bool r600_card_posted(struct radeon_device *rdev)
1491{
1492 uint32_t reg;
1493
1494 /* first check CRTCs */
1495 reg = RREG32(D1CRTC_CONTROL) |
1496 RREG32(D2CRTC_CONTROL);
1497 if (reg & CRTC_EN)
1498 return true;
1499
1500 /* then check MEM_SIZE, in case the crtcs are off */
1501 if (RREG32(CONFIG_MEMSIZE))
1502 return true;
1503
1504 return false;
1505}
1506
fc30b8ef 1507int r600_startup(struct radeon_device *rdev)
3ce0a23d
JG
1508{
1509 int r;
1510
a3c1945a 1511 r600_mc_program(rdev);
1a029b76
JG
1512 if (rdev->flags & RADEON_IS_AGP) {
1513 r600_agp_enable(rdev);
1514 } else {
1515 r = r600_pcie_gart_enable(rdev);
1516 if (r)
1517 return r;
1518 }
3ce0a23d 1519 r600_gpu_init(rdev);
bc1a631e
DA
1520
1521 r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
1522 &rdev->r600_blit.shader_gpu_addr);
1523 if (r) {
1524 DRM_ERROR("failed to pin blit object %d\n", r);
1525 return r;
1526 }
1527
3ce0a23d
JG
1528 r = radeon_ring_init(rdev, rdev->cp.ring_size);
1529 if (r)
1530 return r;
1531 r = r600_cp_load_microcode(rdev);
1532 if (r)
1533 return r;
1534 r = r600_cp_resume(rdev);
1535 if (r)
1536 return r;
81cc35bf
JG
1537 /* write back buffer are not vital so don't worry about failure */
1538 r600_wb_enable(rdev);
3ce0a23d
JG
1539 return 0;
1540}
1541
fc30b8ef
DA
1542int r600_resume(struct radeon_device *rdev)
1543{
1544 int r;
1545
1a029b76
JG
1546 /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
1547 * posting will perform necessary task to bring back GPU into good
1548 * shape.
1549 */
fc30b8ef 1550 /* post card */
e7d40b9a 1551 atom_asic_init(rdev->mode_info.atom_context);
fc30b8ef
DA
1552 /* Initialize clocks */
1553 r = radeon_clocks_init(rdev);
1554 if (r) {
1555 return r;
1556 }
1557
1558 r = r600_startup(rdev);
1559 if (r) {
1560 DRM_ERROR("r600 startup failed on resume\n");
1561 return r;
1562 }
1563
62a8ea3f 1564 r = r600_ib_test(rdev);
fc30b8ef
DA
1565 if (r) {
1566 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
1567 return r;
1568 }
1569 return r;
1570}
1571
3ce0a23d
JG
1572int r600_suspend(struct radeon_device *rdev)
1573{
1574 /* FIXME: we should wait for ring to be empty */
1575 r600_cp_stop(rdev);
bc1a631e 1576 rdev->cp.ready = false;
81cc35bf 1577 r600_wb_disable(rdev);
4aac0473 1578 r600_pcie_gart_disable(rdev);
bc1a631e
DA
1579 /* unpin shaders bo */
1580 radeon_object_unpin(rdev->r600_blit.shader_obj);
3ce0a23d
JG
1581 return 0;
1582}
1583
1584/* Plan is to move initialization in that function and use
1585 * helper function so that radeon_device_init pretty much
1586 * do nothing more than calling asic specific function. This
1587 * should also allow to remove a bunch of callback function
1588 * like vram_info.
1589 */
1590int r600_init(struct radeon_device *rdev)
771fe6b9 1591{
3ce0a23d 1592 int r;
771fe6b9 1593
3ce0a23d
JG
1594 r = radeon_dummy_page_init(rdev);
1595 if (r)
1596 return r;
1597 if (r600_debugfs_mc_info_init(rdev)) {
1598 DRM_ERROR("Failed to register debugfs file for mc !\n");
1599 }
1600 /* This don't do much */
1601 r = radeon_gem_init(rdev);
1602 if (r)
1603 return r;
1604 /* Read BIOS */
1605 if (!radeon_get_bios(rdev)) {
1606 if (ASIC_IS_AVIVO(rdev))
1607 return -EINVAL;
1608 }
1609 /* Must be an ATOMBIOS */
e7d40b9a
JG
1610 if (!rdev->is_atom_bios) {
1611 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
3ce0a23d 1612 return -EINVAL;
e7d40b9a 1613 }
3ce0a23d
JG
1614 r = radeon_atombios_init(rdev);
1615 if (r)
1616 return r;
1617 /* Post card if necessary */
1618 if (!r600_card_posted(rdev) && rdev->bios) {
1619 DRM_INFO("GPU not posted. posting now...\n");
1620 atom_asic_init(rdev->mode_info.atom_context);
1621 }
1622 /* Initialize scratch registers */
1623 r600_scratch_init(rdev);
1624 /* Initialize surface registers */
1625 radeon_surface_init(rdev);
5e6dde7e 1626 radeon_get_clock_info(rdev->ddev);
3ce0a23d
JG
1627 r = radeon_clocks_init(rdev);
1628 if (r)
1629 return r;
1630 /* Fence driver */
1631 r = radeon_fence_driver_init(rdev);
1632 if (r)
1633 return r;
1634 r = r600_mc_init(rdev);
b574f251 1635 if (r)
3ce0a23d 1636 return r;
3ce0a23d
JG
1637 /* Memory manager */
1638 r = radeon_object_init(rdev);
1639 if (r)
1640 return r;
1641 rdev->cp.ring_obj = NULL;
1642 r600_ring_init(rdev, 1024 * 1024);
1643
1644 if (!rdev->me_fw || !rdev->pfp_fw) {
1645 r = r600_cp_init_microcode(rdev);
1646 if (r) {
1647 DRM_ERROR("Failed to load firmware!\n");
1648 return r;
1649 }
1650 }
1651
4aac0473
JG
1652 r = r600_pcie_gart_init(rdev);
1653 if (r)
1654 return r;
1655
733289c2 1656 rdev->accel_working = true;
bc1a631e
DA
1657 r = r600_blit_init(rdev);
1658 if (r) {
1659 DRM_ERROR("radeon: failled blitter (%d).\n", r);
1660 return r;
1661 }
1662
fc30b8ef 1663 r = r600_startup(rdev);
3ce0a23d 1664 if (r) {
75c81298
JG
1665 r600_suspend(rdev);
1666 r600_wb_fini(rdev);
75c81298
JG
1667 radeon_ring_fini(rdev);
1668 r600_pcie_gart_fini(rdev);
733289c2 1669 rdev->accel_working = false;
3ce0a23d 1670 }
733289c2
JG
1671 if (rdev->accel_working) {
1672 r = radeon_ib_pool_init(rdev);
1673 if (r) {
1674 DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
1675 rdev->accel_working = false;
1676 }
62a8ea3f 1677 r = r600_ib_test(rdev);
733289c2
JG
1678 if (r) {
1679 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
1680 rdev->accel_working = false;
1681 }
3ce0a23d
JG
1682 }
1683 return 0;
1684}
1685
1686void r600_fini(struct radeon_device *rdev)
1687{
1688 /* Suspend operations */
1689 r600_suspend(rdev);
1690
1691 r600_blit_fini(rdev);
1692 radeon_ring_fini(rdev);
81cc35bf 1693 r600_wb_fini(rdev);
4aac0473 1694 r600_pcie_gart_fini(rdev);
3ce0a23d
JG
1695 radeon_gem_fini(rdev);
1696 radeon_fence_driver_fini(rdev);
1697 radeon_clocks_fini(rdev);
3ce0a23d
JG
1698 if (rdev->flags & RADEON_IS_AGP)
1699 radeon_agp_fini(rdev);
3ce0a23d 1700 radeon_object_fini(rdev);
e7d40b9a 1701 radeon_atombios_fini(rdev);
3ce0a23d
JG
1702 kfree(rdev->bios);
1703 rdev->bios = NULL;
1704 radeon_dummy_page_fini(rdev);
1705}
1706
1707
1708/*
1709 * CS stuff
1710 */
1711void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1712{
1713 /* FIXME: implement */
1714 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
1715 radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC);
1716 radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
1717 radeon_ring_write(rdev, ib->length_dw);
1718}
1719
1720int r600_ib_test(struct radeon_device *rdev)
1721{
1722 struct radeon_ib *ib;
1723 uint32_t scratch;
1724 uint32_t tmp = 0;
1725 unsigned i;
1726 int r;
1727
1728 r = radeon_scratch_get(rdev, &scratch);
1729 if (r) {
1730 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
1731 return r;
1732 }
1733 WREG32(scratch, 0xCAFEDEAD);
1734 r = radeon_ib_get(rdev, &ib);
1735 if (r) {
1736 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
1737 return r;
1738 }
1739 ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
1740 ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
1741 ib->ptr[2] = 0xDEADBEEF;
1742 ib->ptr[3] = PACKET2(0);
1743 ib->ptr[4] = PACKET2(0);
1744 ib->ptr[5] = PACKET2(0);
1745 ib->ptr[6] = PACKET2(0);
1746 ib->ptr[7] = PACKET2(0);
1747 ib->ptr[8] = PACKET2(0);
1748 ib->ptr[9] = PACKET2(0);
1749 ib->ptr[10] = PACKET2(0);
1750 ib->ptr[11] = PACKET2(0);
1751 ib->ptr[12] = PACKET2(0);
1752 ib->ptr[13] = PACKET2(0);
1753 ib->ptr[14] = PACKET2(0);
1754 ib->ptr[15] = PACKET2(0);
1755 ib->length_dw = 16;
1756 r = radeon_ib_schedule(rdev, ib);
1757 if (r) {
1758 radeon_scratch_free(rdev, scratch);
1759 radeon_ib_free(rdev, &ib);
1760 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
1761 return r;
1762 }
1763 r = radeon_fence_wait(ib->fence, false);
1764 if (r) {
1765 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
1766 return r;
1767 }
1768 for (i = 0; i < rdev->usec_timeout; i++) {
1769 tmp = RREG32(scratch);
1770 if (tmp == 0xDEADBEEF)
1771 break;
1772 DRM_UDELAY(1);
1773 }
1774 if (i < rdev->usec_timeout) {
1775 DRM_INFO("ib test succeeded in %u usecs\n", i);
1776 } else {
1777 DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
1778 scratch, tmp);
1779 r = -EINVAL;
1780 }
1781 radeon_scratch_free(rdev, scratch);
1782 radeon_ib_free(rdev, &ib);
771fe6b9
JG
1783 return r;
1784}
1785
3ce0a23d
JG
1786
1787
1788
1789/*
1790 * Debugfs info
1791 */
1792#if defined(CONFIG_DEBUG_FS)
1793
1794static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
771fe6b9 1795{
3ce0a23d
JG
1796 struct drm_info_node *node = (struct drm_info_node *) m->private;
1797 struct drm_device *dev = node->minor->dev;
1798 struct radeon_device *rdev = dev->dev_private;
1799 uint32_t rdp, wdp;
1800 unsigned count, i, j;
1801
1802 radeon_ring_free_size(rdev);
1803 rdp = RREG32(CP_RB_RPTR);
1804 wdp = RREG32(CP_RB_WPTR);
1805 count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
1806 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
1807 seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
1808 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
1809 seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
1810 seq_printf(m, "%u dwords in ring\n", count);
1811 for (j = 0; j <= count; j++) {
1812 i = (rdp + j) & rdev->cp.ptr_mask;
1813 seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
1814 }
1815 return 0;
1816}
1817
1818static int r600_debugfs_mc_info(struct seq_file *m, void *data)
1819{
1820 struct drm_info_node *node = (struct drm_info_node *) m->private;
1821 struct drm_device *dev = node->minor->dev;
1822 struct radeon_device *rdev = dev->dev_private;
1823
1824 DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
1825 DREG32_SYS(m, rdev, VM_L2_STATUS);
1826 return 0;
1827}
1828
1829static struct drm_info_list r600_mc_info_list[] = {
1830 {"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
1831 {"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
1832};
1833#endif
1834
1835int r600_debugfs_mc_info_init(struct radeon_device *rdev)
1836{
1837#if defined(CONFIG_DEBUG_FS)
1838 return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
1839#else
1840 return 0;
1841#endif
771fe6b9 1842}