]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/gpu/drm/radeon/r600.c
drm/radeon/kms: enable use of unmappable VRAM V2
[net-next-2.6.git] / drivers / gpu / drm / radeon / r600.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
3ce0a23d
JG
28#include <linux/seq_file.h>
29#include <linux/firmware.h>
30#include <linux/platform_device.h>
771fe6b9 31#include "drmP.h"
3ce0a23d 32#include "radeon_drm.h"
771fe6b9 33#include "radeon.h"
e6990375 34#include "radeon_asic.h"
3ce0a23d 35#include "radeon_mode.h"
3ce0a23d 36#include "r600d.h"
3ce0a23d 37#include "atom.h"
d39c3b89 38#include "avivod.h"
771fe6b9 39
3ce0a23d
JG
40#define PFP_UCODE_SIZE 576
41#define PM4_UCODE_SIZE 1792
d8f60cfc 42#define RLC_UCODE_SIZE 768
3ce0a23d
JG
43#define R700_PFP_UCODE_SIZE 848
44#define R700_PM4_UCODE_SIZE 1360
d8f60cfc 45#define R700_RLC_UCODE_SIZE 1024
3ce0a23d
JG
46
47/* Firmware Names */
48MODULE_FIRMWARE("radeon/R600_pfp.bin");
49MODULE_FIRMWARE("radeon/R600_me.bin");
50MODULE_FIRMWARE("radeon/RV610_pfp.bin");
51MODULE_FIRMWARE("radeon/RV610_me.bin");
52MODULE_FIRMWARE("radeon/RV630_pfp.bin");
53MODULE_FIRMWARE("radeon/RV630_me.bin");
54MODULE_FIRMWARE("radeon/RV620_pfp.bin");
55MODULE_FIRMWARE("radeon/RV620_me.bin");
56MODULE_FIRMWARE("radeon/RV635_pfp.bin");
57MODULE_FIRMWARE("radeon/RV635_me.bin");
58MODULE_FIRMWARE("radeon/RV670_pfp.bin");
59MODULE_FIRMWARE("radeon/RV670_me.bin");
60MODULE_FIRMWARE("radeon/RS780_pfp.bin");
61MODULE_FIRMWARE("radeon/RS780_me.bin");
62MODULE_FIRMWARE("radeon/RV770_pfp.bin");
63MODULE_FIRMWARE("radeon/RV770_me.bin");
64MODULE_FIRMWARE("radeon/RV730_pfp.bin");
65MODULE_FIRMWARE("radeon/RV730_me.bin");
66MODULE_FIRMWARE("radeon/RV710_pfp.bin");
67MODULE_FIRMWARE("radeon/RV710_me.bin");
d8f60cfc
AD
68MODULE_FIRMWARE("radeon/R600_rlc.bin");
69MODULE_FIRMWARE("radeon/R700_rlc.bin");
3ce0a23d
JG
70
71int r600_debugfs_mc_info_init(struct radeon_device *rdev);
771fe6b9 72
1a029b76 73/* r600,rv610,rv630,rv620,rv635,rv670 */
771fe6b9
JG
74int r600_mc_wait_for_idle(struct radeon_device *rdev);
75void r600_gpu_init(struct radeon_device *rdev);
3ce0a23d 76void r600_fini(struct radeon_device *rdev);
771fe6b9 77
e0df1ac5
AD
78/* hpd for digital panel detect/disconnect */
79bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
80{
81 bool connected = false;
82
83 if (ASIC_IS_DCE3(rdev)) {
84 switch (hpd) {
85 case RADEON_HPD_1:
86 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
87 connected = true;
88 break;
89 case RADEON_HPD_2:
90 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
91 connected = true;
92 break;
93 case RADEON_HPD_3:
94 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
95 connected = true;
96 break;
97 case RADEON_HPD_4:
98 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
99 connected = true;
100 break;
101 /* DCE 3.2 */
102 case RADEON_HPD_5:
103 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
104 connected = true;
105 break;
106 case RADEON_HPD_6:
107 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
108 connected = true;
109 break;
110 default:
111 break;
112 }
113 } else {
114 switch (hpd) {
115 case RADEON_HPD_1:
116 if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
117 connected = true;
118 break;
119 case RADEON_HPD_2:
120 if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
121 connected = true;
122 break;
123 case RADEON_HPD_3:
124 if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
125 connected = true;
126 break;
127 default:
128 break;
129 }
130 }
131 return connected;
132}
133
134void r600_hpd_set_polarity(struct radeon_device *rdev,
429770b3 135 enum radeon_hpd_id hpd)
e0df1ac5
AD
136{
137 u32 tmp;
138 bool connected = r600_hpd_sense(rdev, hpd);
139
140 if (ASIC_IS_DCE3(rdev)) {
141 switch (hpd) {
142 case RADEON_HPD_1:
143 tmp = RREG32(DC_HPD1_INT_CONTROL);
144 if (connected)
145 tmp &= ~DC_HPDx_INT_POLARITY;
146 else
147 tmp |= DC_HPDx_INT_POLARITY;
148 WREG32(DC_HPD1_INT_CONTROL, tmp);
149 break;
150 case RADEON_HPD_2:
151 tmp = RREG32(DC_HPD2_INT_CONTROL);
152 if (connected)
153 tmp &= ~DC_HPDx_INT_POLARITY;
154 else
155 tmp |= DC_HPDx_INT_POLARITY;
156 WREG32(DC_HPD2_INT_CONTROL, tmp);
157 break;
158 case RADEON_HPD_3:
159 tmp = RREG32(DC_HPD3_INT_CONTROL);
160 if (connected)
161 tmp &= ~DC_HPDx_INT_POLARITY;
162 else
163 tmp |= DC_HPDx_INT_POLARITY;
164 WREG32(DC_HPD3_INT_CONTROL, tmp);
165 break;
166 case RADEON_HPD_4:
167 tmp = RREG32(DC_HPD4_INT_CONTROL);
168 if (connected)
169 tmp &= ~DC_HPDx_INT_POLARITY;
170 else
171 tmp |= DC_HPDx_INT_POLARITY;
172 WREG32(DC_HPD4_INT_CONTROL, tmp);
173 break;
174 case RADEON_HPD_5:
175 tmp = RREG32(DC_HPD5_INT_CONTROL);
176 if (connected)
177 tmp &= ~DC_HPDx_INT_POLARITY;
178 else
179 tmp |= DC_HPDx_INT_POLARITY;
180 WREG32(DC_HPD5_INT_CONTROL, tmp);
181 break;
182 /* DCE 3.2 */
183 case RADEON_HPD_6:
184 tmp = RREG32(DC_HPD6_INT_CONTROL);
185 if (connected)
186 tmp &= ~DC_HPDx_INT_POLARITY;
187 else
188 tmp |= DC_HPDx_INT_POLARITY;
189 WREG32(DC_HPD6_INT_CONTROL, tmp);
190 break;
191 default:
192 break;
193 }
194 } else {
195 switch (hpd) {
196 case RADEON_HPD_1:
197 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
198 if (connected)
199 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
200 else
201 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
202 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
203 break;
204 case RADEON_HPD_2:
205 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
206 if (connected)
207 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
208 else
209 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
210 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
211 break;
212 case RADEON_HPD_3:
213 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
214 if (connected)
215 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
216 else
217 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
218 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
219 break;
220 default:
221 break;
222 }
223 }
224}
225
226void r600_hpd_init(struct radeon_device *rdev)
227{
228 struct drm_device *dev = rdev->ddev;
229 struct drm_connector *connector;
230
231 if (ASIC_IS_DCE3(rdev)) {
232 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
233 if (ASIC_IS_DCE32(rdev))
234 tmp |= DC_HPDx_EN;
235
236 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
237 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
238 switch (radeon_connector->hpd.hpd) {
239 case RADEON_HPD_1:
240 WREG32(DC_HPD1_CONTROL, tmp);
241 rdev->irq.hpd[0] = true;
242 break;
243 case RADEON_HPD_2:
244 WREG32(DC_HPD2_CONTROL, tmp);
245 rdev->irq.hpd[1] = true;
246 break;
247 case RADEON_HPD_3:
248 WREG32(DC_HPD3_CONTROL, tmp);
249 rdev->irq.hpd[2] = true;
250 break;
251 case RADEON_HPD_4:
252 WREG32(DC_HPD4_CONTROL, tmp);
253 rdev->irq.hpd[3] = true;
254 break;
255 /* DCE 3.2 */
256 case RADEON_HPD_5:
257 WREG32(DC_HPD5_CONTROL, tmp);
258 rdev->irq.hpd[4] = true;
259 break;
260 case RADEON_HPD_6:
261 WREG32(DC_HPD6_CONTROL, tmp);
262 rdev->irq.hpd[5] = true;
263 break;
264 default:
265 break;
266 }
267 }
268 } else {
269 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
270 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
271 switch (radeon_connector->hpd.hpd) {
272 case RADEON_HPD_1:
273 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
274 rdev->irq.hpd[0] = true;
275 break;
276 case RADEON_HPD_2:
277 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
278 rdev->irq.hpd[1] = true;
279 break;
280 case RADEON_HPD_3:
281 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
282 rdev->irq.hpd[2] = true;
283 break;
284 default:
285 break;
286 }
287 }
288 }
003e69f9
JG
289 if (rdev->irq.installed)
290 r600_irq_set(rdev);
e0df1ac5
AD
291}
292
293void r600_hpd_fini(struct radeon_device *rdev)
294{
295 struct drm_device *dev = rdev->ddev;
296 struct drm_connector *connector;
297
298 if (ASIC_IS_DCE3(rdev)) {
299 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
300 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
301 switch (radeon_connector->hpd.hpd) {
302 case RADEON_HPD_1:
303 WREG32(DC_HPD1_CONTROL, 0);
304 rdev->irq.hpd[0] = false;
305 break;
306 case RADEON_HPD_2:
307 WREG32(DC_HPD2_CONTROL, 0);
308 rdev->irq.hpd[1] = false;
309 break;
310 case RADEON_HPD_3:
311 WREG32(DC_HPD3_CONTROL, 0);
312 rdev->irq.hpd[2] = false;
313 break;
314 case RADEON_HPD_4:
315 WREG32(DC_HPD4_CONTROL, 0);
316 rdev->irq.hpd[3] = false;
317 break;
318 /* DCE 3.2 */
319 case RADEON_HPD_5:
320 WREG32(DC_HPD5_CONTROL, 0);
321 rdev->irq.hpd[4] = false;
322 break;
323 case RADEON_HPD_6:
324 WREG32(DC_HPD6_CONTROL, 0);
325 rdev->irq.hpd[5] = false;
326 break;
327 default:
328 break;
329 }
330 }
331 } else {
332 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
333 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
334 switch (radeon_connector->hpd.hpd) {
335 case RADEON_HPD_1:
336 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
337 rdev->irq.hpd[0] = false;
338 break;
339 case RADEON_HPD_2:
340 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
341 rdev->irq.hpd[1] = false;
342 break;
343 case RADEON_HPD_3:
344 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
345 rdev->irq.hpd[2] = false;
346 break;
347 default:
348 break;
349 }
350 }
351 }
352}
353
771fe6b9 354/*
3ce0a23d 355 * R600 PCIE GART
771fe6b9 356 */
3ce0a23d
JG
357void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
358{
359 unsigned i;
360 u32 tmp;
361
2e98f10a
DA
362 /* flush hdp cache so updates hit vram */
363 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
364
3ce0a23d
JG
365 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
366 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
367 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
368 for (i = 0; i < rdev->usec_timeout; i++) {
369 /* read MC_STATUS */
370 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
371 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
372 if (tmp == 2) {
373 printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
374 return;
375 }
376 if (tmp) {
377 return;
378 }
379 udelay(1);
380 }
381}
382
4aac0473 383int r600_pcie_gart_init(struct radeon_device *rdev)
3ce0a23d 384{
4aac0473 385 int r;
3ce0a23d 386
4aac0473
JG
387 if (rdev->gart.table.vram.robj) {
388 WARN(1, "R600 PCIE GART already initialized.\n");
389 return 0;
390 }
3ce0a23d
JG
391 /* Initialize common gart structure */
392 r = radeon_gart_init(rdev);
4aac0473 393 if (r)
3ce0a23d 394 return r;
3ce0a23d 395 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
4aac0473
JG
396 return radeon_gart_table_vram_alloc(rdev);
397}
398
399int r600_pcie_gart_enable(struct radeon_device *rdev)
400{
401 u32 tmp;
402 int r, i;
403
404 if (rdev->gart.table.vram.robj == NULL) {
405 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
406 return -EINVAL;
771fe6b9 407 }
4aac0473
JG
408 r = radeon_gart_table_vram_pin(rdev);
409 if (r)
410 return r;
82568565 411 radeon_gart_restore(rdev);
bc1a631e 412
3ce0a23d
JG
413 /* Setup L2 cache */
414 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
415 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
416 EFFECTIVE_L2_QUEUE_SIZE(7));
417 WREG32(VM_L2_CNTL2, 0);
418 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
419 /* Setup TLB control */
420 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
421 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
422 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
423 ENABLE_WAIT_L2_QUERY;
424 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
425 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
426 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
427 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
428 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
429 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
430 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
431 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
432 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
433 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
434 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
435 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
436 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
437 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
438 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
1a029b76 439 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
3ce0a23d
JG
440 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
441 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
442 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
443 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
444 (u32)(rdev->dummy_page.addr >> 12));
445 for (i = 1; i < 7; i++)
446 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
771fe6b9 447
3ce0a23d
JG
448 r600_pcie_gart_tlb_flush(rdev);
449 rdev->gart.ready = true;
771fe6b9
JG
450 return 0;
451}
452
3ce0a23d 453void r600_pcie_gart_disable(struct radeon_device *rdev)
771fe6b9 454{
3ce0a23d 455 u32 tmp;
4c788679 456 int i, r;
771fe6b9 457
3ce0a23d
JG
458 /* Disable all tables */
459 for (i = 0; i < 7; i++)
460 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
771fe6b9 461
3ce0a23d
JG
462 /* Disable L2 cache */
463 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
464 EFFECTIVE_L2_QUEUE_SIZE(7));
465 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
466 /* Setup L1 TLB control */
467 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
468 ENABLE_WAIT_L2_QUERY;
469 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
470 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
471 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
472 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
473 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
474 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
475 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
476 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
477 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
478 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
479 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
480 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
481 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
482 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
4aac0473 483 if (rdev->gart.table.vram.robj) {
4c788679
JG
484 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
485 if (likely(r == 0)) {
486 radeon_bo_kunmap(rdev->gart.table.vram.robj);
487 radeon_bo_unpin(rdev->gart.table.vram.robj);
488 radeon_bo_unreserve(rdev->gart.table.vram.robj);
489 }
4aac0473
JG
490 }
491}
492
493void r600_pcie_gart_fini(struct radeon_device *rdev)
494{
f9274562 495 radeon_gart_fini(rdev);
4aac0473
JG
496 r600_pcie_gart_disable(rdev);
497 radeon_gart_table_vram_free(rdev);
771fe6b9
JG
498}
499
1a029b76
JG
500void r600_agp_enable(struct radeon_device *rdev)
501{
502 u32 tmp;
503 int i;
504
505 /* Setup L2 cache */
506 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
507 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
508 EFFECTIVE_L2_QUEUE_SIZE(7));
509 WREG32(VM_L2_CNTL2, 0);
510 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
511 /* Setup TLB control */
512 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
513 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
514 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
515 ENABLE_WAIT_L2_QUERY;
516 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
517 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
518 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
519 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
520 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
521 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
522 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
523 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
524 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
525 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
526 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
527 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
528 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
529 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
530 for (i = 0; i < 7; i++)
531 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
532}
533
771fe6b9
JG
534int r600_mc_wait_for_idle(struct radeon_device *rdev)
535{
3ce0a23d
JG
536 unsigned i;
537 u32 tmp;
538
539 for (i = 0; i < rdev->usec_timeout; i++) {
540 /* read MC_STATUS */
541 tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
542 if (!tmp)
543 return 0;
544 udelay(1);
545 }
546 return -1;
771fe6b9
JG
547}
548
a3c1945a 549static void r600_mc_program(struct radeon_device *rdev)
771fe6b9 550{
a3c1945a 551 struct rv515_mc_save save;
3ce0a23d
JG
552 u32 tmp;
553 int i, j;
771fe6b9 554
3ce0a23d
JG
555 /* Initialize HDP */
556 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
557 WREG32((0x2c14 + j), 0x00000000);
558 WREG32((0x2c18 + j), 0x00000000);
559 WREG32((0x2c1c + j), 0x00000000);
560 WREG32((0x2c20 + j), 0x00000000);
561 WREG32((0x2c24 + j), 0x00000000);
562 }
563 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
771fe6b9 564
a3c1945a 565 rv515_mc_stop(rdev, &save);
3ce0a23d 566 if (r600_mc_wait_for_idle(rdev)) {
a3c1945a 567 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
3ce0a23d 568 }
a3c1945a 569 /* Lockout access through VGA aperture (doesn't exist before R600) */
3ce0a23d 570 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
3ce0a23d 571 /* Update configuration */
1a029b76
JG
572 if (rdev->flags & RADEON_IS_AGP) {
573 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
574 /* VRAM before AGP */
575 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
576 rdev->mc.vram_start >> 12);
577 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
578 rdev->mc.gtt_end >> 12);
579 } else {
580 /* VRAM after AGP */
581 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
582 rdev->mc.gtt_start >> 12);
583 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
584 rdev->mc.vram_end >> 12);
585 }
586 } else {
587 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
588 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
589 }
3ce0a23d 590 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
1a029b76 591 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
3ce0a23d
JG
592 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
593 WREG32(MC_VM_FB_LOCATION, tmp);
594 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
595 WREG32(HDP_NONSURFACE_INFO, (2 << 7));
1a029b76 596 WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF);
3ce0a23d 597 if (rdev->flags & RADEON_IS_AGP) {
1a029b76
JG
598 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
599 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
3ce0a23d
JG
600 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
601 } else {
602 WREG32(MC_VM_AGP_BASE, 0);
603 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
604 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
605 }
3ce0a23d 606 if (r600_mc_wait_for_idle(rdev)) {
a3c1945a 607 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
3ce0a23d 608 }
a3c1945a 609 rv515_mc_resume(rdev, &save);
698443d9
DA
610 /* we need to own VRAM, so turn off the VGA renderer here
611 * to stop it overwriting our objects */
d39c3b89 612 rv515_vga_render_disable(rdev);
3ce0a23d
JG
613}
614
d594e46a
JG
615/**
616 * r600_vram_gtt_location - try to find VRAM & GTT location
617 * @rdev: radeon device structure holding all necessary informations
618 * @mc: memory controller structure holding memory informations
619 *
620 * Function will place try to place VRAM at same place as in CPU (PCI)
621 * address space as some GPU seems to have issue when we reprogram at
622 * different address space.
623 *
624 * If there is not enough space to fit the unvisible VRAM after the
625 * aperture then we limit the VRAM size to the aperture.
626 *
627 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
628 * them to be in one from GPU point of view so that we can program GPU to
629 * catch access outside them (weird GPU policy see ??).
630 *
631 * This function will never fails, worst case are limiting VRAM or GTT.
632 *
633 * Note: GTT start, end, size should be initialized before calling this
634 * function on AGP platform.
635 */
636void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
637{
638 u64 size_bf, size_af;
639
640 if (mc->mc_vram_size > 0xE0000000) {
641 /* leave room for at least 512M GTT */
642 dev_warn(rdev->dev, "limiting VRAM\n");
643 mc->real_vram_size = 0xE0000000;
644 mc->mc_vram_size = 0xE0000000;
645 }
646 if (rdev->flags & RADEON_IS_AGP) {
647 size_bf = mc->gtt_start;
648 size_af = 0xFFFFFFFF - mc->gtt_end + 1;
649 if (size_bf > size_af) {
650 if (mc->mc_vram_size > size_bf) {
651 dev_warn(rdev->dev, "limiting VRAM\n");
652 mc->real_vram_size = size_bf;
653 mc->mc_vram_size = size_bf;
654 }
655 mc->vram_start = mc->gtt_start - mc->mc_vram_size;
656 } else {
657 if (mc->mc_vram_size > size_af) {
658 dev_warn(rdev->dev, "limiting VRAM\n");
659 mc->real_vram_size = size_af;
660 mc->mc_vram_size = size_af;
661 }
662 mc->vram_start = mc->gtt_end;
663 }
664 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
665 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
666 mc->mc_vram_size >> 20, mc->vram_start,
667 mc->vram_end, mc->real_vram_size >> 20);
668 } else {
669 u64 base = 0;
670 if (rdev->flags & RADEON_IS_IGP)
671 base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
672 radeon_vram_location(rdev, &rdev->mc, base);
673 radeon_gtt_location(rdev, mc);
674 }
675}
676
3ce0a23d 677int r600_mc_init(struct radeon_device *rdev)
771fe6b9 678{
3ce0a23d 679 u32 tmp;
5885b7a9 680 int chansize, numchan;
771fe6b9 681
3ce0a23d 682 /* Get VRAM informations */
771fe6b9 683 rdev->mc.vram_is_ddr = true;
3ce0a23d
JG
684 tmp = RREG32(RAMCFG);
685 if (tmp & CHANSIZE_OVERRIDE) {
771fe6b9 686 chansize = 16;
3ce0a23d 687 } else if (tmp & CHANSIZE_MASK) {
771fe6b9
JG
688 chansize = 64;
689 } else {
690 chansize = 32;
691 }
5885b7a9
AD
692 tmp = RREG32(CHMAP);
693 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
694 case 0:
695 default:
696 numchan = 1;
697 break;
698 case 1:
699 numchan = 2;
700 break;
701 case 2:
702 numchan = 4;
703 break;
704 case 3:
705 numchan = 8;
706 break;
771fe6b9 707 }
5885b7a9 708 rdev->mc.vram_width = numchan * chansize;
3ce0a23d
JG
709 /* Could aper size report 0 ? */
710 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
711 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
712 /* Setup GPU memory space */
713 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
714 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
51e5fcd3 715 rdev->mc.visible_vram_size = rdev->mc.aper_size;
d594e46a 716 r600_vram_gtt_location(rdev, &rdev->mc);
f47299c5 717
06b6476d
AD
718 if (rdev->flags & RADEON_IS_IGP)
719 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
f47299c5 720 radeon_update_bandwidth_info(rdev);
3ce0a23d 721 return 0;
771fe6b9
JG
722}
723
3ce0a23d
JG
724/* We doesn't check that the GPU really needs a reset we simply do the
725 * reset, it's up to the caller to determine if the GPU needs one. We
726 * might add an helper function to check that.
727 */
728int r600_gpu_soft_reset(struct radeon_device *rdev)
771fe6b9 729{
a3c1945a 730 struct rv515_mc_save save;
3ce0a23d
JG
731 u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
732 S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
733 S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
734 S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
735 S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
736 S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
737 S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
738 S_008010_GUI_ACTIVE(1);
739 u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
740 S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
741 S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
742 S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
743 S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
744 S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
745 S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
746 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
747 u32 srbm_reset = 0;
a3c1945a 748 u32 tmp;
771fe6b9 749
1a029b76
JG
750 dev_info(rdev->dev, "GPU softreset \n");
751 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
752 RREG32(R_008010_GRBM_STATUS));
753 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
a3c1945a 754 RREG32(R_008014_GRBM_STATUS2));
1a029b76
JG
755 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
756 RREG32(R_000E50_SRBM_STATUS));
a3c1945a
JG
757 rv515_mc_stop(rdev, &save);
758 if (r600_mc_wait_for_idle(rdev)) {
759 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
760 }
3ce0a23d
JG
761 /* Disable CP parsing/prefetching */
762 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff));
763 /* Check if any of the rendering block is busy and reset it */
764 if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
765 (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
a3c1945a 766 tmp = S_008020_SOFT_RESET_CR(1) |
3ce0a23d
JG
767 S_008020_SOFT_RESET_DB(1) |
768 S_008020_SOFT_RESET_CB(1) |
769 S_008020_SOFT_RESET_PA(1) |
770 S_008020_SOFT_RESET_SC(1) |
771 S_008020_SOFT_RESET_SMX(1) |
772 S_008020_SOFT_RESET_SPI(1) |
773 S_008020_SOFT_RESET_SX(1) |
774 S_008020_SOFT_RESET_SH(1) |
775 S_008020_SOFT_RESET_TC(1) |
776 S_008020_SOFT_RESET_TA(1) |
777 S_008020_SOFT_RESET_VC(1) |
a3c1945a 778 S_008020_SOFT_RESET_VGT(1);
1a029b76 779 dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
a3c1945a 780 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
3ce0a23d
JG
781 (void)RREG32(R_008020_GRBM_SOFT_RESET);
782 udelay(50);
783 WREG32(R_008020_GRBM_SOFT_RESET, 0);
784 (void)RREG32(R_008020_GRBM_SOFT_RESET);
785 }
786 /* Reset CP (we always reset CP) */
a3c1945a
JG
787 tmp = S_008020_SOFT_RESET_CP(1);
788 dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
789 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
3ce0a23d
JG
790 (void)RREG32(R_008020_GRBM_SOFT_RESET);
791 udelay(50);
792 WREG32(R_008020_GRBM_SOFT_RESET, 0);
793 (void)RREG32(R_008020_GRBM_SOFT_RESET);
794 /* Reset others GPU block if necessary */
795 if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
796 srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
797 if (G_000E50_GRBM_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
798 srbm_reset |= S_000E60_SOFT_RESET_GRBM(1);
799 if (G_000E50_HI_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
800 srbm_reset |= S_000E60_SOFT_RESET_IH(1);
801 if (G_000E50_VMC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
802 srbm_reset |= S_000E60_SOFT_RESET_VMC(1);
803 if (G_000E50_MCB_BUSY(RREG32(R_000E50_SRBM_STATUS)))
804 srbm_reset |= S_000E60_SOFT_RESET_MC(1);
805 if (G_000E50_MCDZ_BUSY(RREG32(R_000E50_SRBM_STATUS)))
806 srbm_reset |= S_000E60_SOFT_RESET_MC(1);
807 if (G_000E50_MCDY_BUSY(RREG32(R_000E50_SRBM_STATUS)))
808 srbm_reset |= S_000E60_SOFT_RESET_MC(1);
809 if (G_000E50_MCDX_BUSY(RREG32(R_000E50_SRBM_STATUS)))
810 srbm_reset |= S_000E60_SOFT_RESET_MC(1);
811 if (G_000E50_MCDW_BUSY(RREG32(R_000E50_SRBM_STATUS)))
812 srbm_reset |= S_000E60_SOFT_RESET_MC(1);
813 if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
814 srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
815 if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS)))
816 srbm_reset |= S_000E60_SOFT_RESET_SEM(1);
1a029b76
JG
817 if (G_000E50_BIF_BUSY(RREG32(R_000E50_SRBM_STATUS)))
818 srbm_reset |= S_000E60_SOFT_RESET_BIF(1);
819 dev_info(rdev->dev, " R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
820 WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
821 (void)RREG32(R_000E60_SRBM_SOFT_RESET);
822 udelay(50);
823 WREG32(R_000E60_SRBM_SOFT_RESET, 0);
824 (void)RREG32(R_000E60_SRBM_SOFT_RESET);
3ce0a23d
JG
825 WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
826 (void)RREG32(R_000E60_SRBM_SOFT_RESET);
827 udelay(50);
828 WREG32(R_000E60_SRBM_SOFT_RESET, 0);
829 (void)RREG32(R_000E60_SRBM_SOFT_RESET);
830 /* Wait a little for things to settle down */
831 udelay(50);
1a029b76
JG
832 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
833 RREG32(R_008010_GRBM_STATUS));
834 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
835 RREG32(R_008014_GRBM_STATUS2));
836 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
837 RREG32(R_000E50_SRBM_STATUS));
a3c1945a
JG
838 /* After reset we need to reinit the asic as GPU often endup in an
839 * incoherent state.
840 */
841 atom_asic_init(rdev->mode_info.atom_context);
842 rv515_mc_resume(rdev, &save);
3ce0a23d
JG
843 return 0;
844}
845
846int r600_gpu_reset(struct radeon_device *rdev)
847{
848 return r600_gpu_soft_reset(rdev);
849}
850
851static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
852 u32 num_backends,
853 u32 backend_disable_mask)
854{
855 u32 backend_map = 0;
856 u32 enabled_backends_mask;
857 u32 enabled_backends_count;
858 u32 cur_pipe;
859 u32 swizzle_pipe[R6XX_MAX_PIPES];
860 u32 cur_backend;
861 u32 i;
862
863 if (num_tile_pipes > R6XX_MAX_PIPES)
864 num_tile_pipes = R6XX_MAX_PIPES;
865 if (num_tile_pipes < 1)
866 num_tile_pipes = 1;
867 if (num_backends > R6XX_MAX_BACKENDS)
868 num_backends = R6XX_MAX_BACKENDS;
869 if (num_backends < 1)
870 num_backends = 1;
871
872 enabled_backends_mask = 0;
873 enabled_backends_count = 0;
874 for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
875 if (((backend_disable_mask >> i) & 1) == 0) {
876 enabled_backends_mask |= (1 << i);
877 ++enabled_backends_count;
878 }
879 if (enabled_backends_count == num_backends)
880 break;
881 }
882
883 if (enabled_backends_count == 0) {
884 enabled_backends_mask = 1;
885 enabled_backends_count = 1;
886 }
887
888 if (enabled_backends_count != num_backends)
889 num_backends = enabled_backends_count;
890
891 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
892 switch (num_tile_pipes) {
893 case 1:
894 swizzle_pipe[0] = 0;
895 break;
896 case 2:
897 swizzle_pipe[0] = 0;
898 swizzle_pipe[1] = 1;
899 break;
900 case 3:
901 swizzle_pipe[0] = 0;
902 swizzle_pipe[1] = 1;
903 swizzle_pipe[2] = 2;
904 break;
905 case 4:
906 swizzle_pipe[0] = 0;
907 swizzle_pipe[1] = 1;
908 swizzle_pipe[2] = 2;
909 swizzle_pipe[3] = 3;
910 break;
911 case 5:
912 swizzle_pipe[0] = 0;
913 swizzle_pipe[1] = 1;
914 swizzle_pipe[2] = 2;
915 swizzle_pipe[3] = 3;
916 swizzle_pipe[4] = 4;
917 break;
918 case 6:
919 swizzle_pipe[0] = 0;
920 swizzle_pipe[1] = 2;
921 swizzle_pipe[2] = 4;
922 swizzle_pipe[3] = 5;
923 swizzle_pipe[4] = 1;
924 swizzle_pipe[5] = 3;
925 break;
926 case 7:
927 swizzle_pipe[0] = 0;
928 swizzle_pipe[1] = 2;
929 swizzle_pipe[2] = 4;
930 swizzle_pipe[3] = 6;
931 swizzle_pipe[4] = 1;
932 swizzle_pipe[5] = 3;
933 swizzle_pipe[6] = 5;
934 break;
935 case 8:
936 swizzle_pipe[0] = 0;
937 swizzle_pipe[1] = 2;
938 swizzle_pipe[2] = 4;
939 swizzle_pipe[3] = 6;
940 swizzle_pipe[4] = 1;
941 swizzle_pipe[5] = 3;
942 swizzle_pipe[6] = 5;
943 swizzle_pipe[7] = 7;
944 break;
945 }
946
947 cur_backend = 0;
948 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
949 while (((1 << cur_backend) & enabled_backends_mask) == 0)
950 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
951
952 backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
953
954 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
955 }
956
957 return backend_map;
958}
959
960int r600_count_pipe_bits(uint32_t val)
961{
962 int i, ret = 0;
963
964 for (i = 0; i < 32; i++) {
965 ret += val & 1;
966 val >>= 1;
967 }
968 return ret;
771fe6b9
JG
969}
970
3ce0a23d
JG
971void r600_gpu_init(struct radeon_device *rdev)
972{
973 u32 tiling_config;
974 u32 ramcfg;
d03f5d59
AD
975 u32 backend_map;
976 u32 cc_rb_backend_disable;
977 u32 cc_gc_shader_pipe_config;
3ce0a23d
JG
978 u32 tmp;
979 int i, j;
980 u32 sq_config;
981 u32 sq_gpr_resource_mgmt_1 = 0;
982 u32 sq_gpr_resource_mgmt_2 = 0;
983 u32 sq_thread_resource_mgmt = 0;
984 u32 sq_stack_resource_mgmt_1 = 0;
985 u32 sq_stack_resource_mgmt_2 = 0;
986
987 /* FIXME: implement */
988 switch (rdev->family) {
989 case CHIP_R600:
990 rdev->config.r600.max_pipes = 4;
991 rdev->config.r600.max_tile_pipes = 8;
992 rdev->config.r600.max_simds = 4;
993 rdev->config.r600.max_backends = 4;
994 rdev->config.r600.max_gprs = 256;
995 rdev->config.r600.max_threads = 192;
996 rdev->config.r600.max_stack_entries = 256;
997 rdev->config.r600.max_hw_contexts = 8;
998 rdev->config.r600.max_gs_threads = 16;
999 rdev->config.r600.sx_max_export_size = 128;
1000 rdev->config.r600.sx_max_export_pos_size = 16;
1001 rdev->config.r600.sx_max_export_smx_size = 128;
1002 rdev->config.r600.sq_num_cf_insts = 2;
1003 break;
1004 case CHIP_RV630:
1005 case CHIP_RV635:
1006 rdev->config.r600.max_pipes = 2;
1007 rdev->config.r600.max_tile_pipes = 2;
1008 rdev->config.r600.max_simds = 3;
1009 rdev->config.r600.max_backends = 1;
1010 rdev->config.r600.max_gprs = 128;
1011 rdev->config.r600.max_threads = 192;
1012 rdev->config.r600.max_stack_entries = 128;
1013 rdev->config.r600.max_hw_contexts = 8;
1014 rdev->config.r600.max_gs_threads = 4;
1015 rdev->config.r600.sx_max_export_size = 128;
1016 rdev->config.r600.sx_max_export_pos_size = 16;
1017 rdev->config.r600.sx_max_export_smx_size = 128;
1018 rdev->config.r600.sq_num_cf_insts = 2;
1019 break;
1020 case CHIP_RV610:
1021 case CHIP_RV620:
1022 case CHIP_RS780:
1023 case CHIP_RS880:
1024 rdev->config.r600.max_pipes = 1;
1025 rdev->config.r600.max_tile_pipes = 1;
1026 rdev->config.r600.max_simds = 2;
1027 rdev->config.r600.max_backends = 1;
1028 rdev->config.r600.max_gprs = 128;
1029 rdev->config.r600.max_threads = 192;
1030 rdev->config.r600.max_stack_entries = 128;
1031 rdev->config.r600.max_hw_contexts = 4;
1032 rdev->config.r600.max_gs_threads = 4;
1033 rdev->config.r600.sx_max_export_size = 128;
1034 rdev->config.r600.sx_max_export_pos_size = 16;
1035 rdev->config.r600.sx_max_export_smx_size = 128;
1036 rdev->config.r600.sq_num_cf_insts = 1;
1037 break;
1038 case CHIP_RV670:
1039 rdev->config.r600.max_pipes = 4;
1040 rdev->config.r600.max_tile_pipes = 4;
1041 rdev->config.r600.max_simds = 4;
1042 rdev->config.r600.max_backends = 4;
1043 rdev->config.r600.max_gprs = 192;
1044 rdev->config.r600.max_threads = 192;
1045 rdev->config.r600.max_stack_entries = 256;
1046 rdev->config.r600.max_hw_contexts = 8;
1047 rdev->config.r600.max_gs_threads = 16;
1048 rdev->config.r600.sx_max_export_size = 128;
1049 rdev->config.r600.sx_max_export_pos_size = 16;
1050 rdev->config.r600.sx_max_export_smx_size = 128;
1051 rdev->config.r600.sq_num_cf_insts = 2;
1052 break;
1053 default:
1054 break;
1055 }
1056
1057 /* Initialize HDP */
1058 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1059 WREG32((0x2c14 + j), 0x00000000);
1060 WREG32((0x2c18 + j), 0x00000000);
1061 WREG32((0x2c1c + j), 0x00000000);
1062 WREG32((0x2c20 + j), 0x00000000);
1063 WREG32((0x2c24 + j), 0x00000000);
1064 }
1065
1066 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1067
1068 /* Setup tiling */
1069 tiling_config = 0;
1070 ramcfg = RREG32(RAMCFG);
1071 switch (rdev->config.r600.max_tile_pipes) {
1072 case 1:
1073 tiling_config |= PIPE_TILING(0);
1074 break;
1075 case 2:
1076 tiling_config |= PIPE_TILING(1);
1077 break;
1078 case 4:
1079 tiling_config |= PIPE_TILING(2);
1080 break;
1081 case 8:
1082 tiling_config |= PIPE_TILING(3);
1083 break;
1084 default:
1085 break;
1086 }
d03f5d59 1087 rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
961fb597 1088 rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
3ce0a23d
JG
1089 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1090 tiling_config |= GROUP_SIZE(0);
961fb597 1091 rdev->config.r600.tiling_group_size = 256;
3ce0a23d
JG
1092 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1093 if (tmp > 3) {
1094 tiling_config |= ROW_TILING(3);
1095 tiling_config |= SAMPLE_SPLIT(3);
1096 } else {
1097 tiling_config |= ROW_TILING(tmp);
1098 tiling_config |= SAMPLE_SPLIT(tmp);
1099 }
1100 tiling_config |= BANK_SWAPS(1);
d03f5d59
AD
1101
1102 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1103 cc_rb_backend_disable |=
1104 BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
1105
1106 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
1107 cc_gc_shader_pipe_config |=
1108 INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
1109 cc_gc_shader_pipe_config |=
1110 INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
1111
1112 backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
1113 (R6XX_MAX_BACKENDS -
1114 r600_count_pipe_bits((cc_rb_backend_disable &
1115 R6XX_MAX_BACKENDS_MASK) >> 16)),
1116 (cc_rb_backend_disable >> 16));
1117
1118 tiling_config |= BACKEND_MAP(backend_map);
3ce0a23d
JG
1119 WREG32(GB_TILING_CONFIG, tiling_config);
1120 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1121 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1122
3ce0a23d 1123 /* Setup pipes */
d03f5d59
AD
1124 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1125 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
f867c60d 1126 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
3ce0a23d 1127
d03f5d59 1128 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
3ce0a23d
JG
1129 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1130 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1131
1132 /* Setup some CP states */
1133 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1134 WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
1135
1136 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
1137 SYNC_WALKER | SYNC_ALIGNER));
1138 /* Setup various GPU states */
1139 if (rdev->family == CHIP_RV670)
1140 WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
1141
1142 tmp = RREG32(SX_DEBUG_1);
1143 tmp |= SMX_EVENT_RELEASE;
1144 if ((rdev->family > CHIP_R600))
1145 tmp |= ENABLE_NEW_SMX_ADDRESS;
1146 WREG32(SX_DEBUG_1, tmp);
1147
1148 if (((rdev->family) == CHIP_R600) ||
1149 ((rdev->family) == CHIP_RV630) ||
1150 ((rdev->family) == CHIP_RV610) ||
1151 ((rdev->family) == CHIP_RV620) ||
ee59f2b4
AD
1152 ((rdev->family) == CHIP_RS780) ||
1153 ((rdev->family) == CHIP_RS880)) {
3ce0a23d
JG
1154 WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
1155 } else {
1156 WREG32(DB_DEBUG, 0);
1157 }
1158 WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1159 DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1160
1161 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1162 WREG32(VGT_NUM_INSTANCES, 0);
1163
1164 WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
1165 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
1166
1167 tmp = RREG32(SQ_MS_FIFO_SIZES);
1168 if (((rdev->family) == CHIP_RV610) ||
1169 ((rdev->family) == CHIP_RV620) ||
ee59f2b4
AD
1170 ((rdev->family) == CHIP_RS780) ||
1171 ((rdev->family) == CHIP_RS880)) {
3ce0a23d
JG
1172 tmp = (CACHE_FIFO_SIZE(0xa) |
1173 FETCH_FIFO_HIWATER(0xa) |
1174 DONE_FIFO_HIWATER(0xe0) |
1175 ALU_UPDATE_FIFO_HIWATER(0x8));
1176 } else if (((rdev->family) == CHIP_R600) ||
1177 ((rdev->family) == CHIP_RV630)) {
1178 tmp &= ~DONE_FIFO_HIWATER(0xff);
1179 tmp |= DONE_FIFO_HIWATER(0x4);
1180 }
1181 WREG32(SQ_MS_FIFO_SIZES, tmp);
1182
1183 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1184 * should be adjusted as needed by the 2D/3D drivers. This just sets default values
1185 */
1186 sq_config = RREG32(SQ_CONFIG);
1187 sq_config &= ~(PS_PRIO(3) |
1188 VS_PRIO(3) |
1189 GS_PRIO(3) |
1190 ES_PRIO(3));
1191 sq_config |= (DX9_CONSTS |
1192 VC_ENABLE |
1193 PS_PRIO(0) |
1194 VS_PRIO(1) |
1195 GS_PRIO(2) |
1196 ES_PRIO(3));
1197
1198 if ((rdev->family) == CHIP_R600) {
1199 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
1200 NUM_VS_GPRS(124) |
1201 NUM_CLAUSE_TEMP_GPRS(4));
1202 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
1203 NUM_ES_GPRS(0));
1204 sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
1205 NUM_VS_THREADS(48) |
1206 NUM_GS_THREADS(4) |
1207 NUM_ES_THREADS(4));
1208 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
1209 NUM_VS_STACK_ENTRIES(128));
1210 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
1211 NUM_ES_STACK_ENTRIES(0));
1212 } else if (((rdev->family) == CHIP_RV610) ||
1213 ((rdev->family) == CHIP_RV620) ||
ee59f2b4
AD
1214 ((rdev->family) == CHIP_RS780) ||
1215 ((rdev->family) == CHIP_RS880)) {
3ce0a23d
JG
1216 /* no vertex cache */
1217 sq_config &= ~VC_ENABLE;
1218
1219 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1220 NUM_VS_GPRS(44) |
1221 NUM_CLAUSE_TEMP_GPRS(2));
1222 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1223 NUM_ES_GPRS(17));
1224 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1225 NUM_VS_THREADS(78) |
1226 NUM_GS_THREADS(4) |
1227 NUM_ES_THREADS(31));
1228 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1229 NUM_VS_STACK_ENTRIES(40));
1230 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1231 NUM_ES_STACK_ENTRIES(16));
1232 } else if (((rdev->family) == CHIP_RV630) ||
1233 ((rdev->family) == CHIP_RV635)) {
1234 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1235 NUM_VS_GPRS(44) |
1236 NUM_CLAUSE_TEMP_GPRS(2));
1237 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
1238 NUM_ES_GPRS(18));
1239 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1240 NUM_VS_THREADS(78) |
1241 NUM_GS_THREADS(4) |
1242 NUM_ES_THREADS(31));
1243 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1244 NUM_VS_STACK_ENTRIES(40));
1245 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1246 NUM_ES_STACK_ENTRIES(16));
1247 } else if ((rdev->family) == CHIP_RV670) {
1248 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1249 NUM_VS_GPRS(44) |
1250 NUM_CLAUSE_TEMP_GPRS(2));
1251 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1252 NUM_ES_GPRS(17));
1253 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1254 NUM_VS_THREADS(78) |
1255 NUM_GS_THREADS(4) |
1256 NUM_ES_THREADS(31));
1257 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
1258 NUM_VS_STACK_ENTRIES(64));
1259 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
1260 NUM_ES_STACK_ENTRIES(64));
1261 }
1262
1263 WREG32(SQ_CONFIG, sq_config);
1264 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
1265 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
1266 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
1267 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
1268 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
1269
1270 if (((rdev->family) == CHIP_RV610) ||
1271 ((rdev->family) == CHIP_RV620) ||
ee59f2b4
AD
1272 ((rdev->family) == CHIP_RS780) ||
1273 ((rdev->family) == CHIP_RS880)) {
3ce0a23d
JG
1274 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
1275 } else {
1276 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
1277 }
1278
1279 /* More default values. 2D/3D driver should adjust as needed */
1280 WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
1281 S1_X(0x4) | S1_Y(0xc)));
1282 WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
1283 S1_X(0x2) | S1_Y(0x2) |
1284 S2_X(0xa) | S2_Y(0x6) |
1285 S3_X(0x6) | S3_Y(0xa)));
1286 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
1287 S1_X(0x4) | S1_Y(0xc) |
1288 S2_X(0x1) | S2_Y(0x6) |
1289 S3_X(0xa) | S3_Y(0xe)));
1290 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
1291 S5_X(0x0) | S5_Y(0x0) |
1292 S6_X(0xb) | S6_Y(0x4) |
1293 S7_X(0x7) | S7_Y(0x8)));
1294
1295 WREG32(VGT_STRMOUT_EN, 0);
1296 tmp = rdev->config.r600.max_pipes * 16;
1297 switch (rdev->family) {
1298 case CHIP_RV610:
3ce0a23d 1299 case CHIP_RV620:
ee59f2b4
AD
1300 case CHIP_RS780:
1301 case CHIP_RS880:
3ce0a23d
JG
1302 tmp += 32;
1303 break;
1304 case CHIP_RV670:
1305 tmp += 128;
1306 break;
1307 default:
1308 break;
1309 }
1310 if (tmp > 256) {
1311 tmp = 256;
1312 }
1313 WREG32(VGT_ES_PER_GS, 128);
1314 WREG32(VGT_GS_PER_ES, tmp);
1315 WREG32(VGT_GS_PER_VS, 2);
1316 WREG32(VGT_GS_VERTEX_REUSE, 16);
1317
1318 /* more default values. 2D/3D driver should adjust as needed */
1319 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1320 WREG32(VGT_STRMOUT_EN, 0);
1321 WREG32(SX_MISC, 0);
1322 WREG32(PA_SC_MODE_CNTL, 0);
1323 WREG32(PA_SC_AA_CONFIG, 0);
1324 WREG32(PA_SC_LINE_STIPPLE, 0);
1325 WREG32(SPI_INPUT_Z, 0);
1326 WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
1327 WREG32(CB_COLOR7_FRAG, 0);
1328
1329 /* Clear render buffer base addresses */
1330 WREG32(CB_COLOR0_BASE, 0);
1331 WREG32(CB_COLOR1_BASE, 0);
1332 WREG32(CB_COLOR2_BASE, 0);
1333 WREG32(CB_COLOR3_BASE, 0);
1334 WREG32(CB_COLOR4_BASE, 0);
1335 WREG32(CB_COLOR5_BASE, 0);
1336 WREG32(CB_COLOR6_BASE, 0);
1337 WREG32(CB_COLOR7_BASE, 0);
1338 WREG32(CB_COLOR7_FRAG, 0);
1339
1340 switch (rdev->family) {
1341 case CHIP_RV610:
3ce0a23d 1342 case CHIP_RV620:
ee59f2b4
AD
1343 case CHIP_RS780:
1344 case CHIP_RS880:
3ce0a23d
JG
1345 tmp = TC_L2_SIZE(8);
1346 break;
1347 case CHIP_RV630:
1348 case CHIP_RV635:
1349 tmp = TC_L2_SIZE(4);
1350 break;
1351 case CHIP_R600:
1352 tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
1353 break;
1354 default:
1355 tmp = TC_L2_SIZE(0);
1356 break;
1357 }
1358 WREG32(TC_CNTL, tmp);
1359
1360 tmp = RREG32(HDP_HOST_PATH_CNTL);
1361 WREG32(HDP_HOST_PATH_CNTL, tmp);
1362
1363 tmp = RREG32(ARB_POP);
1364 tmp |= ENABLE_TC128;
1365 WREG32(ARB_POP, tmp);
1366
1367 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1368 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
1369 NUM_CLIP_SEQ(3)));
1370 WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
1371}
1372
1373
771fe6b9
JG
1374/*
1375 * Indirect registers accessor
1376 */
3ce0a23d
JG
1377u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
1378{
1379 u32 r;
1380
1381 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1382 (void)RREG32(PCIE_PORT_INDEX);
1383 r = RREG32(PCIE_PORT_DATA);
1384 return r;
1385}
1386
1387void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1388{
1389 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1390 (void)RREG32(PCIE_PORT_INDEX);
1391 WREG32(PCIE_PORT_DATA, (v));
1392 (void)RREG32(PCIE_PORT_DATA);
1393}
1394
3ce0a23d
JG
1395/*
1396 * CP & Ring
1397 */
1398void r600_cp_stop(struct radeon_device *rdev)
1399{
1400 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1401}
1402
d8f60cfc 1403int r600_init_microcode(struct radeon_device *rdev)
3ce0a23d
JG
1404{
1405 struct platform_device *pdev;
1406 const char *chip_name;
d8f60cfc
AD
1407 const char *rlc_chip_name;
1408 size_t pfp_req_size, me_req_size, rlc_req_size;
3ce0a23d
JG
1409 char fw_name[30];
1410 int err;
1411
1412 DRM_DEBUG("\n");
1413
1414 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
1415 err = IS_ERR(pdev);
1416 if (err) {
1417 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
1418 return -EINVAL;
1419 }
1420
1421 switch (rdev->family) {
d8f60cfc
AD
1422 case CHIP_R600:
1423 chip_name = "R600";
1424 rlc_chip_name = "R600";
1425 break;
1426 case CHIP_RV610:
1427 chip_name = "RV610";
1428 rlc_chip_name = "R600";
1429 break;
1430 case CHIP_RV630:
1431 chip_name = "RV630";
1432 rlc_chip_name = "R600";
1433 break;
1434 case CHIP_RV620:
1435 chip_name = "RV620";
1436 rlc_chip_name = "R600";
1437 break;
1438 case CHIP_RV635:
1439 chip_name = "RV635";
1440 rlc_chip_name = "R600";
1441 break;
1442 case CHIP_RV670:
1443 chip_name = "RV670";
1444 rlc_chip_name = "R600";
1445 break;
3ce0a23d 1446 case CHIP_RS780:
d8f60cfc
AD
1447 case CHIP_RS880:
1448 chip_name = "RS780";
1449 rlc_chip_name = "R600";
1450 break;
1451 case CHIP_RV770:
1452 chip_name = "RV770";
1453 rlc_chip_name = "R700";
1454 break;
3ce0a23d 1455 case CHIP_RV730:
d8f60cfc
AD
1456 case CHIP_RV740:
1457 chip_name = "RV730";
1458 rlc_chip_name = "R700";
1459 break;
1460 case CHIP_RV710:
1461 chip_name = "RV710";
1462 rlc_chip_name = "R700";
1463 break;
3ce0a23d
JG
1464 default: BUG();
1465 }
1466
1467 if (rdev->family >= CHIP_RV770) {
1468 pfp_req_size = R700_PFP_UCODE_SIZE * 4;
1469 me_req_size = R700_PM4_UCODE_SIZE * 4;
d8f60cfc 1470 rlc_req_size = R700_RLC_UCODE_SIZE * 4;
3ce0a23d
JG
1471 } else {
1472 pfp_req_size = PFP_UCODE_SIZE * 4;
1473 me_req_size = PM4_UCODE_SIZE * 12;
d8f60cfc 1474 rlc_req_size = RLC_UCODE_SIZE * 4;
3ce0a23d
JG
1475 }
1476
d8f60cfc 1477 DRM_INFO("Loading %s Microcode\n", chip_name);
3ce0a23d
JG
1478
1479 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
1480 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
1481 if (err)
1482 goto out;
1483 if (rdev->pfp_fw->size != pfp_req_size) {
1484 printk(KERN_ERR
1485 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1486 rdev->pfp_fw->size, fw_name);
1487 err = -EINVAL;
1488 goto out;
1489 }
1490
1491 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
1492 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
1493 if (err)
1494 goto out;
1495 if (rdev->me_fw->size != me_req_size) {
1496 printk(KERN_ERR
1497 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1498 rdev->me_fw->size, fw_name);
1499 err = -EINVAL;
1500 }
d8f60cfc
AD
1501
1502 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
1503 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
1504 if (err)
1505 goto out;
1506 if (rdev->rlc_fw->size != rlc_req_size) {
1507 printk(KERN_ERR
1508 "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
1509 rdev->rlc_fw->size, fw_name);
1510 err = -EINVAL;
1511 }
1512
3ce0a23d
JG
1513out:
1514 platform_device_unregister(pdev);
1515
1516 if (err) {
1517 if (err != -EINVAL)
1518 printk(KERN_ERR
1519 "r600_cp: Failed to load firmware \"%s\"\n",
1520 fw_name);
1521 release_firmware(rdev->pfp_fw);
1522 rdev->pfp_fw = NULL;
1523 release_firmware(rdev->me_fw);
1524 rdev->me_fw = NULL;
d8f60cfc
AD
1525 release_firmware(rdev->rlc_fw);
1526 rdev->rlc_fw = NULL;
3ce0a23d
JG
1527 }
1528 return err;
1529}
1530
1531static int r600_cp_load_microcode(struct radeon_device *rdev)
1532{
1533 const __be32 *fw_data;
1534 int i;
1535
1536 if (!rdev->me_fw || !rdev->pfp_fw)
1537 return -EINVAL;
1538
1539 r600_cp_stop(rdev);
1540
1541 WREG32(CP_RB_CNTL, RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
1542
1543 /* Reset cp */
1544 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
1545 RREG32(GRBM_SOFT_RESET);
1546 mdelay(15);
1547 WREG32(GRBM_SOFT_RESET, 0);
1548
1549 WREG32(CP_ME_RAM_WADDR, 0);
1550
1551 fw_data = (const __be32 *)rdev->me_fw->data;
1552 WREG32(CP_ME_RAM_WADDR, 0);
1553 for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
1554 WREG32(CP_ME_RAM_DATA,
1555 be32_to_cpup(fw_data++));
1556
1557 fw_data = (const __be32 *)rdev->pfp_fw->data;
1558 WREG32(CP_PFP_UCODE_ADDR, 0);
1559 for (i = 0; i < PFP_UCODE_SIZE; i++)
1560 WREG32(CP_PFP_UCODE_DATA,
1561 be32_to_cpup(fw_data++));
1562
1563 WREG32(CP_PFP_UCODE_ADDR, 0);
1564 WREG32(CP_ME_RAM_WADDR, 0);
1565 WREG32(CP_ME_RAM_RADDR, 0);
1566 return 0;
1567}
1568
1569int r600_cp_start(struct radeon_device *rdev)
1570{
1571 int r;
1572 uint32_t cp_me;
1573
1574 r = radeon_ring_lock(rdev, 7);
1575 if (r) {
1576 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1577 return r;
1578 }
1579 radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
1580 radeon_ring_write(rdev, 0x1);
1581 if (rdev->family < CHIP_RV770) {
1582 radeon_ring_write(rdev, 0x3);
1583 radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
1584 } else {
1585 radeon_ring_write(rdev, 0x0);
1586 radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
1587 }
1588 radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1589 radeon_ring_write(rdev, 0);
1590 radeon_ring_write(rdev, 0);
1591 radeon_ring_unlock_commit(rdev);
1592
1593 cp_me = 0xff;
1594 WREG32(R_0086D8_CP_ME_CNTL, cp_me);
1595 return 0;
1596}
1597
1598int r600_cp_resume(struct radeon_device *rdev)
1599{
1600 u32 tmp;
1601 u32 rb_bufsz;
1602 int r;
1603
1604 /* Reset cp */
1605 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
1606 RREG32(GRBM_SOFT_RESET);
1607 mdelay(15);
1608 WREG32(GRBM_SOFT_RESET, 0);
1609
1610 /* Set ring buffer size */
1611 rb_bufsz = drm_order(rdev->cp.ring_size / 8);
d6f28938 1612 tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
3ce0a23d 1613#ifdef __BIG_ENDIAN
d6f28938 1614 tmp |= BUF_SWAP_32BIT;
3ce0a23d 1615#endif
d6f28938 1616 WREG32(CP_RB_CNTL, tmp);
3ce0a23d
JG
1617 WREG32(CP_SEM_WAIT_TIMER, 0x4);
1618
1619 /* Set the write pointer delay */
1620 WREG32(CP_RB_WPTR_DELAY, 0);
1621
1622 /* Initialize the ring buffer's read and write pointers */
3ce0a23d
JG
1623 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
1624 WREG32(CP_RB_RPTR_WR, 0);
1625 WREG32(CP_RB_WPTR, 0);
1626 WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
1627 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr));
1628 mdelay(1);
1629 WREG32(CP_RB_CNTL, tmp);
1630
1631 WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
1632 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
1633
1634 rdev->cp.rptr = RREG32(CP_RB_RPTR);
1635 rdev->cp.wptr = RREG32(CP_RB_WPTR);
1636
1637 r600_cp_start(rdev);
1638 rdev->cp.ready = true;
1639 r = radeon_ring_test(rdev);
1640 if (r) {
1641 rdev->cp.ready = false;
1642 return r;
1643 }
1644 return 0;
1645}
1646
1647void r600_cp_commit(struct radeon_device *rdev)
1648{
1649 WREG32(CP_RB_WPTR, rdev->cp.wptr);
1650 (void)RREG32(CP_RB_WPTR);
1651}
1652
1653void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
1654{
1655 u32 rb_bufsz;
1656
1657 /* Align ring size */
1658 rb_bufsz = drm_order(ring_size / 8);
1659 ring_size = (1 << (rb_bufsz + 1)) * 4;
1660 rdev->cp.ring_size = ring_size;
1661 rdev->cp.align_mask = 16 - 1;
1662}
1663
655efd3d
JG
1664void r600_cp_fini(struct radeon_device *rdev)
1665{
1666 r600_cp_stop(rdev);
1667 radeon_ring_fini(rdev);
1668}
1669
3ce0a23d
JG
1670
1671/*
1672 * GPU scratch registers helpers function.
1673 */
1674void r600_scratch_init(struct radeon_device *rdev)
1675{
1676 int i;
1677
1678 rdev->scratch.num_reg = 7;
1679 for (i = 0; i < rdev->scratch.num_reg; i++) {
1680 rdev->scratch.free[i] = true;
1681 rdev->scratch.reg[i] = SCRATCH_REG0 + (i * 4);
1682 }
1683}
1684
1685int r600_ring_test(struct radeon_device *rdev)
1686{
1687 uint32_t scratch;
1688 uint32_t tmp = 0;
1689 unsigned i;
1690 int r;
1691
1692 r = radeon_scratch_get(rdev, &scratch);
1693 if (r) {
1694 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
1695 return r;
1696 }
1697 WREG32(scratch, 0xCAFEDEAD);
1698 r = radeon_ring_lock(rdev, 3);
1699 if (r) {
1700 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1701 radeon_scratch_free(rdev, scratch);
1702 return r;
1703 }
1704 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1705 radeon_ring_write(rdev, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
1706 radeon_ring_write(rdev, 0xDEADBEEF);
1707 radeon_ring_unlock_commit(rdev);
1708 for (i = 0; i < rdev->usec_timeout; i++) {
1709 tmp = RREG32(scratch);
1710 if (tmp == 0xDEADBEEF)
1711 break;
1712 DRM_UDELAY(1);
1713 }
1714 if (i < rdev->usec_timeout) {
1715 DRM_INFO("ring test succeeded in %d usecs\n", i);
1716 } else {
1717 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
1718 scratch, tmp);
1719 r = -EINVAL;
1720 }
1721 radeon_scratch_free(rdev, scratch);
1722 return r;
1723}
1724
81cc35bf
JG
1725void r600_wb_disable(struct radeon_device *rdev)
1726{
4c788679
JG
1727 int r;
1728
81cc35bf
JG
1729 WREG32(SCRATCH_UMSK, 0);
1730 if (rdev->wb.wb_obj) {
4c788679
JG
1731 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
1732 if (unlikely(r != 0))
1733 return;
1734 radeon_bo_kunmap(rdev->wb.wb_obj);
1735 radeon_bo_unpin(rdev->wb.wb_obj);
1736 radeon_bo_unreserve(rdev->wb.wb_obj);
81cc35bf
JG
1737 }
1738}
1739
1740void r600_wb_fini(struct radeon_device *rdev)
1741{
1742 r600_wb_disable(rdev);
1743 if (rdev->wb.wb_obj) {
4c788679 1744 radeon_bo_unref(&rdev->wb.wb_obj);
81cc35bf
JG
1745 rdev->wb.wb = NULL;
1746 rdev->wb.wb_obj = NULL;
1747 }
1748}
1749
1750int r600_wb_enable(struct radeon_device *rdev)
3ce0a23d
JG
1751{
1752 int r;
1753
1754 if (rdev->wb.wb_obj == NULL) {
4c788679
JG
1755 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
1756 RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
3ce0a23d 1757 if (r) {
4c788679 1758 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
3ce0a23d
JG
1759 return r;
1760 }
4c788679
JG
1761 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
1762 if (unlikely(r != 0)) {
1763 r600_wb_fini(rdev);
3ce0a23d
JG
1764 return r;
1765 }
4c788679 1766 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
81cc35bf 1767 &rdev->wb.gpu_addr);
3ce0a23d 1768 if (r) {
4c788679
JG
1769 radeon_bo_unreserve(rdev->wb.wb_obj);
1770 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
81cc35bf 1771 r600_wb_fini(rdev);
3ce0a23d
JG
1772 return r;
1773 }
4c788679
JG
1774 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
1775 radeon_bo_unreserve(rdev->wb.wb_obj);
3ce0a23d 1776 if (r) {
4c788679 1777 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
81cc35bf 1778 r600_wb_fini(rdev);
3ce0a23d
JG
1779 return r;
1780 }
1781 }
1782 WREG32(SCRATCH_ADDR, (rdev->wb.gpu_addr >> 8) & 0xFFFFFFFF);
1783 WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + 1024) & 0xFFFFFFFC);
1784 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + 1024) & 0xFF);
1785 WREG32(SCRATCH_UMSK, 0xff);
1786 return 0;
1787}
1788
3ce0a23d
JG
1789void r600_fence_ring_emit(struct radeon_device *rdev,
1790 struct radeon_fence *fence)
1791{
d8f60cfc 1792 /* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */
44224c3f
AD
1793
1794 radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
1795 radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
1796 /* wait for 3D idle clean */
1797 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1798 radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
1799 radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
3ce0a23d
JG
1800 /* Emit fence sequence & fire IRQ */
1801 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1802 radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
1803 radeon_ring_write(rdev, fence->seq);
d8f60cfc
AD
1804 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
1805 radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
1806 radeon_ring_write(rdev, RB_INT_STAT);
3ce0a23d
JG
1807}
1808
3ce0a23d
JG
1809int r600_copy_blit(struct radeon_device *rdev,
1810 uint64_t src_offset, uint64_t dst_offset,
1811 unsigned num_pages, struct radeon_fence *fence)
1812{
ff82f052
JG
1813 int r;
1814
1815 mutex_lock(&rdev->r600_blit.mutex);
1816 rdev->r600_blit.vb_ib = NULL;
1817 r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
1818 if (r) {
1819 if (rdev->r600_blit.vb_ib)
1820 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
1821 mutex_unlock(&rdev->r600_blit.mutex);
1822 return r;
1823 }
a77f1718 1824 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
3ce0a23d 1825 r600_blit_done_copy(rdev, fence);
ff82f052 1826 mutex_unlock(&rdev->r600_blit.mutex);
3ce0a23d
JG
1827 return 0;
1828}
1829
3ce0a23d
JG
1830int r600_set_surface_reg(struct radeon_device *rdev, int reg,
1831 uint32_t tiling_flags, uint32_t pitch,
1832 uint32_t offset, uint32_t obj_size)
1833{
1834 /* FIXME: implement */
1835 return 0;
1836}
1837
1838void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
1839{
1840 /* FIXME: implement */
1841}
1842
1843
1844bool r600_card_posted(struct radeon_device *rdev)
1845{
1846 uint32_t reg;
1847
1848 /* first check CRTCs */
1849 reg = RREG32(D1CRTC_CONTROL) |
1850 RREG32(D2CRTC_CONTROL);
1851 if (reg & CRTC_EN)
1852 return true;
1853
1854 /* then check MEM_SIZE, in case the crtcs are off */
1855 if (RREG32(CONFIG_MEMSIZE))
1856 return true;
1857
1858 return false;
1859}
1860
fc30b8ef 1861int r600_startup(struct radeon_device *rdev)
3ce0a23d
JG
1862{
1863 int r;
1864
779720a3
AD
1865 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1866 r = r600_init_microcode(rdev);
1867 if (r) {
1868 DRM_ERROR("Failed to load firmware!\n");
1869 return r;
1870 }
1871 }
1872
a3c1945a 1873 r600_mc_program(rdev);
1a029b76
JG
1874 if (rdev->flags & RADEON_IS_AGP) {
1875 r600_agp_enable(rdev);
1876 } else {
1877 r = r600_pcie_gart_enable(rdev);
1878 if (r)
1879 return r;
1880 }
3ce0a23d 1881 r600_gpu_init(rdev);
c38c7b64
JG
1882 r = r600_blit_init(rdev);
1883 if (r) {
1884 r600_blit_fini(rdev);
1885 rdev->asic->copy = NULL;
1886 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
1887 }
ff82f052
JG
1888 /* pin copy shader into vram */
1889 if (rdev->r600_blit.shader_obj) {
1890 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
1891 if (unlikely(r != 0))
1892 return r;
1893 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
1894 &rdev->r600_blit.shader_gpu_addr);
1895 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
7923c615 1896 if (r) {
ff82f052 1897 dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
7923c615
AD
1898 return r;
1899 }
1900 }
d8f60cfc 1901 /* Enable IRQ */
d8f60cfc
AD
1902 r = r600_irq_init(rdev);
1903 if (r) {
1904 DRM_ERROR("radeon: IH init failed (%d).\n", r);
1905 radeon_irq_kms_fini(rdev);
1906 return r;
1907 }
1908 r600_irq_set(rdev);
1909
3ce0a23d
JG
1910 r = radeon_ring_init(rdev, rdev->cp.ring_size);
1911 if (r)
1912 return r;
1913 r = r600_cp_load_microcode(rdev);
1914 if (r)
1915 return r;
1916 r = r600_cp_resume(rdev);
1917 if (r)
1918 return r;
81cc35bf
JG
1919 /* write back buffer are not vital so don't worry about failure */
1920 r600_wb_enable(rdev);
3ce0a23d
JG
1921 return 0;
1922}
1923
28d52043
DA
1924void r600_vga_set_state(struct radeon_device *rdev, bool state)
1925{
1926 uint32_t temp;
1927
1928 temp = RREG32(CONFIG_CNTL);
1929 if (state == false) {
1930 temp &= ~(1<<0);
1931 temp |= (1<<1);
1932 } else {
1933 temp &= ~(1<<1);
1934 }
1935 WREG32(CONFIG_CNTL, temp);
1936}
1937
fc30b8ef
DA
1938int r600_resume(struct radeon_device *rdev)
1939{
1940 int r;
1941
1a029b76
JG
1942 /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
1943 * posting will perform necessary task to bring back GPU into good
1944 * shape.
1945 */
fc30b8ef 1946 /* post card */
e7d40b9a 1947 atom_asic_init(rdev->mode_info.atom_context);
fc30b8ef
DA
1948 /* Initialize clocks */
1949 r = radeon_clocks_init(rdev);
1950 if (r) {
1951 return r;
1952 }
1953
1954 r = r600_startup(rdev);
1955 if (r) {
1956 DRM_ERROR("r600 startup failed on resume\n");
1957 return r;
1958 }
1959
62a8ea3f 1960 r = r600_ib_test(rdev);
fc30b8ef
DA
1961 if (r) {
1962 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
1963 return r;
1964 }
38fd2c6f
RM
1965
1966 r = r600_audio_init(rdev);
1967 if (r) {
1968 DRM_ERROR("radeon: audio resume failed\n");
1969 return r;
1970 }
1971
fc30b8ef
DA
1972 return r;
1973}
1974
3ce0a23d
JG
1975int r600_suspend(struct radeon_device *rdev)
1976{
4c788679
JG
1977 int r;
1978
38fd2c6f 1979 r600_audio_fini(rdev);
3ce0a23d
JG
1980 /* FIXME: we should wait for ring to be empty */
1981 r600_cp_stop(rdev);
bc1a631e 1982 rdev->cp.ready = false;
0c45249f 1983 r600_irq_suspend(rdev);
81cc35bf 1984 r600_wb_disable(rdev);
4aac0473 1985 r600_pcie_gart_disable(rdev);
bc1a631e 1986 /* unpin shaders bo */
30d2d9a5
JG
1987 if (rdev->r600_blit.shader_obj) {
1988 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
1989 if (!r) {
1990 radeon_bo_unpin(rdev->r600_blit.shader_obj);
1991 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
1992 }
1993 }
3ce0a23d
JG
1994 return 0;
1995}
1996
1997/* Plan is to move initialization in that function and use
1998 * helper function so that radeon_device_init pretty much
1999 * do nothing more than calling asic specific function. This
2000 * should also allow to remove a bunch of callback function
2001 * like vram_info.
2002 */
2003int r600_init(struct radeon_device *rdev)
771fe6b9 2004{
3ce0a23d 2005 int r;
771fe6b9 2006
3ce0a23d
JG
2007 r = radeon_dummy_page_init(rdev);
2008 if (r)
2009 return r;
2010 if (r600_debugfs_mc_info_init(rdev)) {
2011 DRM_ERROR("Failed to register debugfs file for mc !\n");
2012 }
2013 /* This don't do much */
2014 r = radeon_gem_init(rdev);
2015 if (r)
2016 return r;
2017 /* Read BIOS */
2018 if (!radeon_get_bios(rdev)) {
2019 if (ASIC_IS_AVIVO(rdev))
2020 return -EINVAL;
2021 }
2022 /* Must be an ATOMBIOS */
e7d40b9a
JG
2023 if (!rdev->is_atom_bios) {
2024 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
3ce0a23d 2025 return -EINVAL;
e7d40b9a 2026 }
3ce0a23d
JG
2027 r = radeon_atombios_init(rdev);
2028 if (r)
2029 return r;
2030 /* Post card if necessary */
72542d77
DA
2031 if (!r600_card_posted(rdev)) {
2032 if (!rdev->bios) {
2033 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
2034 return -EINVAL;
2035 }
3ce0a23d
JG
2036 DRM_INFO("GPU not posted. posting now...\n");
2037 atom_asic_init(rdev->mode_info.atom_context);
2038 }
2039 /* Initialize scratch registers */
2040 r600_scratch_init(rdev);
2041 /* Initialize surface registers */
2042 radeon_surface_init(rdev);
7433874e 2043 /* Initialize clocks */
5e6dde7e 2044 radeon_get_clock_info(rdev->ddev);
3ce0a23d
JG
2045 r = radeon_clocks_init(rdev);
2046 if (r)
2047 return r;
7433874e
RM
2048 /* Initialize power management */
2049 radeon_pm_init(rdev);
3ce0a23d
JG
2050 /* Fence driver */
2051 r = radeon_fence_driver_init(rdev);
2052 if (r)
2053 return r;
700a0cc0
JG
2054 if (rdev->flags & RADEON_IS_AGP) {
2055 r = radeon_agp_init(rdev);
2056 if (r)
2057 radeon_agp_disable(rdev);
2058 }
3ce0a23d 2059 r = r600_mc_init(rdev);
b574f251 2060 if (r)
3ce0a23d 2061 return r;
3ce0a23d 2062 /* Memory manager */
4c788679 2063 r = radeon_bo_init(rdev);
3ce0a23d
JG
2064 if (r)
2065 return r;
d8f60cfc
AD
2066
2067 r = radeon_irq_kms_init(rdev);
2068 if (r)
2069 return r;
2070
3ce0a23d
JG
2071 rdev->cp.ring_obj = NULL;
2072 r600_ring_init(rdev, 1024 * 1024);
2073
d8f60cfc
AD
2074 rdev->ih.ring_obj = NULL;
2075 r600_ih_ring_init(rdev, 64 * 1024);
3ce0a23d 2076
4aac0473
JG
2077 r = r600_pcie_gart_init(rdev);
2078 if (r)
2079 return r;
2080
779720a3 2081 rdev->accel_working = true;
fc30b8ef 2082 r = r600_startup(rdev);
3ce0a23d 2083 if (r) {
655efd3d
JG
2084 dev_err(rdev->dev, "disabling GPU acceleration\n");
2085 r600_cp_fini(rdev);
75c81298 2086 r600_wb_fini(rdev);
655efd3d
JG
2087 r600_irq_fini(rdev);
2088 radeon_irq_kms_fini(rdev);
75c81298 2089 r600_pcie_gart_fini(rdev);
733289c2 2090 rdev->accel_working = false;
3ce0a23d 2091 }
733289c2
JG
2092 if (rdev->accel_working) {
2093 r = radeon_ib_pool_init(rdev);
2094 if (r) {
db96380e 2095 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
733289c2 2096 rdev->accel_working = false;
db96380e
JG
2097 } else {
2098 r = r600_ib_test(rdev);
2099 if (r) {
2100 dev_err(rdev->dev, "IB test failed (%d).\n", r);
2101 rdev->accel_working = false;
2102 }
733289c2 2103 }
3ce0a23d 2104 }
dafc3bd5
CK
2105
2106 r = r600_audio_init(rdev);
2107 if (r)
2108 return r; /* TODO error handling */
3ce0a23d
JG
2109 return 0;
2110}
2111
2112void r600_fini(struct radeon_device *rdev)
2113{
29fb52ca 2114 radeon_pm_fini(rdev);
dafc3bd5 2115 r600_audio_fini(rdev);
3ce0a23d 2116 r600_blit_fini(rdev);
655efd3d
JG
2117 r600_cp_fini(rdev);
2118 r600_wb_fini(rdev);
d8f60cfc
AD
2119 r600_irq_fini(rdev);
2120 radeon_irq_kms_fini(rdev);
4aac0473 2121 r600_pcie_gart_fini(rdev);
655efd3d 2122 radeon_agp_fini(rdev);
3ce0a23d
JG
2123 radeon_gem_fini(rdev);
2124 radeon_fence_driver_fini(rdev);
2125 radeon_clocks_fini(rdev);
4c788679 2126 radeon_bo_fini(rdev);
e7d40b9a 2127 radeon_atombios_fini(rdev);
3ce0a23d
JG
2128 kfree(rdev->bios);
2129 rdev->bios = NULL;
2130 radeon_dummy_page_fini(rdev);
2131}
2132
2133
2134/*
2135 * CS stuff
2136 */
2137void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
2138{
2139 /* FIXME: implement */
2140 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2141 radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC);
2142 radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
2143 radeon_ring_write(rdev, ib->length_dw);
2144}
2145
2146int r600_ib_test(struct radeon_device *rdev)
2147{
2148 struct radeon_ib *ib;
2149 uint32_t scratch;
2150 uint32_t tmp = 0;
2151 unsigned i;
2152 int r;
2153
2154 r = radeon_scratch_get(rdev, &scratch);
2155 if (r) {
2156 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
2157 return r;
2158 }
2159 WREG32(scratch, 0xCAFEDEAD);
2160 r = radeon_ib_get(rdev, &ib);
2161 if (r) {
2162 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
2163 return r;
2164 }
2165 ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
2166 ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2167 ib->ptr[2] = 0xDEADBEEF;
2168 ib->ptr[3] = PACKET2(0);
2169 ib->ptr[4] = PACKET2(0);
2170 ib->ptr[5] = PACKET2(0);
2171 ib->ptr[6] = PACKET2(0);
2172 ib->ptr[7] = PACKET2(0);
2173 ib->ptr[8] = PACKET2(0);
2174 ib->ptr[9] = PACKET2(0);
2175 ib->ptr[10] = PACKET2(0);
2176 ib->ptr[11] = PACKET2(0);
2177 ib->ptr[12] = PACKET2(0);
2178 ib->ptr[13] = PACKET2(0);
2179 ib->ptr[14] = PACKET2(0);
2180 ib->ptr[15] = PACKET2(0);
2181 ib->length_dw = 16;
2182 r = radeon_ib_schedule(rdev, ib);
2183 if (r) {
2184 radeon_scratch_free(rdev, scratch);
2185 radeon_ib_free(rdev, &ib);
2186 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
2187 return r;
2188 }
2189 r = radeon_fence_wait(ib->fence, false);
2190 if (r) {
2191 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
2192 return r;
2193 }
2194 for (i = 0; i < rdev->usec_timeout; i++) {
2195 tmp = RREG32(scratch);
2196 if (tmp == 0xDEADBEEF)
2197 break;
2198 DRM_UDELAY(1);
2199 }
2200 if (i < rdev->usec_timeout) {
2201 DRM_INFO("ib test succeeded in %u usecs\n", i);
2202 } else {
2203 DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
2204 scratch, tmp);
2205 r = -EINVAL;
2206 }
2207 radeon_scratch_free(rdev, scratch);
2208 radeon_ib_free(rdev, &ib);
771fe6b9
JG
2209 return r;
2210}
2211
d8f60cfc
AD
2212/*
2213 * Interrupts
2214 *
2215 * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
2216 * the same as the CP ring buffer, but in reverse. Rather than the CPU
2217 * writing to the ring and the GPU consuming, the GPU writes to the ring
2218 * and host consumes. As the host irq handler processes interrupts, it
2219 * increments the rptr. When the rptr catches up with the wptr, all the
2220 * current interrupts have been processed.
2221 */
2222
2223void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
2224{
2225 u32 rb_bufsz;
2226
2227 /* Align ring size */
2228 rb_bufsz = drm_order(ring_size / 4);
2229 ring_size = (1 << rb_bufsz) * 4;
2230 rdev->ih.ring_size = ring_size;
0c45249f
JG
2231 rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
2232 rdev->ih.rptr = 0;
d8f60cfc
AD
2233}
2234
0c45249f 2235static int r600_ih_ring_alloc(struct radeon_device *rdev)
d8f60cfc
AD
2236{
2237 int r;
2238
d8f60cfc
AD
2239 /* Allocate ring buffer */
2240 if (rdev->ih.ring_obj == NULL) {
4c788679
JG
2241 r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
2242 true,
2243 RADEON_GEM_DOMAIN_GTT,
2244 &rdev->ih.ring_obj);
d8f60cfc
AD
2245 if (r) {
2246 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
2247 return r;
2248 }
4c788679
JG
2249 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2250 if (unlikely(r != 0))
2251 return r;
2252 r = radeon_bo_pin(rdev->ih.ring_obj,
2253 RADEON_GEM_DOMAIN_GTT,
2254 &rdev->ih.gpu_addr);
d8f60cfc 2255 if (r) {
4c788679 2256 radeon_bo_unreserve(rdev->ih.ring_obj);
d8f60cfc
AD
2257 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
2258 return r;
2259 }
4c788679
JG
2260 r = radeon_bo_kmap(rdev->ih.ring_obj,
2261 (void **)&rdev->ih.ring);
2262 radeon_bo_unreserve(rdev->ih.ring_obj);
d8f60cfc
AD
2263 if (r) {
2264 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
2265 return r;
2266 }
2267 }
d8f60cfc
AD
2268 return 0;
2269}
2270
2271static void r600_ih_ring_fini(struct radeon_device *rdev)
2272{
4c788679 2273 int r;
d8f60cfc 2274 if (rdev->ih.ring_obj) {
4c788679
JG
2275 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2276 if (likely(r == 0)) {
2277 radeon_bo_kunmap(rdev->ih.ring_obj);
2278 radeon_bo_unpin(rdev->ih.ring_obj);
2279 radeon_bo_unreserve(rdev->ih.ring_obj);
2280 }
2281 radeon_bo_unref(&rdev->ih.ring_obj);
d8f60cfc
AD
2282 rdev->ih.ring = NULL;
2283 rdev->ih.ring_obj = NULL;
2284 }
2285}
2286
2287static void r600_rlc_stop(struct radeon_device *rdev)
2288{
2289
2290 if (rdev->family >= CHIP_RV770) {
2291 /* r7xx asics need to soft reset RLC before halting */
2292 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
2293 RREG32(SRBM_SOFT_RESET);
2294 udelay(15000);
2295 WREG32(SRBM_SOFT_RESET, 0);
2296 RREG32(SRBM_SOFT_RESET);
2297 }
2298
2299 WREG32(RLC_CNTL, 0);
2300}
2301
2302static void r600_rlc_start(struct radeon_device *rdev)
2303{
2304 WREG32(RLC_CNTL, RLC_ENABLE);
2305}
2306
2307static int r600_rlc_init(struct radeon_device *rdev)
2308{
2309 u32 i;
2310 const __be32 *fw_data;
2311
2312 if (!rdev->rlc_fw)
2313 return -EINVAL;
2314
2315 r600_rlc_stop(rdev);
2316
2317 WREG32(RLC_HB_BASE, 0);
2318 WREG32(RLC_HB_CNTL, 0);
2319 WREG32(RLC_HB_RPTR, 0);
2320 WREG32(RLC_HB_WPTR, 0);
2321 WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
2322 WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
2323 WREG32(RLC_MC_CNTL, 0);
2324 WREG32(RLC_UCODE_CNTL, 0);
2325
2326 fw_data = (const __be32 *)rdev->rlc_fw->data;
2327 if (rdev->family >= CHIP_RV770) {
2328 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
2329 WREG32(RLC_UCODE_ADDR, i);
2330 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2331 }
2332 } else {
2333 for (i = 0; i < RLC_UCODE_SIZE; i++) {
2334 WREG32(RLC_UCODE_ADDR, i);
2335 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2336 }
2337 }
2338 WREG32(RLC_UCODE_ADDR, 0);
2339
2340 r600_rlc_start(rdev);
2341
2342 return 0;
2343}
2344
2345static void r600_enable_interrupts(struct radeon_device *rdev)
2346{
2347 u32 ih_cntl = RREG32(IH_CNTL);
2348 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2349
2350 ih_cntl |= ENABLE_INTR;
2351 ih_rb_cntl |= IH_RB_ENABLE;
2352 WREG32(IH_CNTL, ih_cntl);
2353 WREG32(IH_RB_CNTL, ih_rb_cntl);
2354 rdev->ih.enabled = true;
2355}
2356
2357static void r600_disable_interrupts(struct radeon_device *rdev)
2358{
2359 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2360 u32 ih_cntl = RREG32(IH_CNTL);
2361
2362 ih_rb_cntl &= ~IH_RB_ENABLE;
2363 ih_cntl &= ~ENABLE_INTR;
2364 WREG32(IH_RB_CNTL, ih_rb_cntl);
2365 WREG32(IH_CNTL, ih_cntl);
2366 /* set rptr, wptr to 0 */
2367 WREG32(IH_RB_RPTR, 0);
2368 WREG32(IH_RB_WPTR, 0);
2369 rdev->ih.enabled = false;
2370 rdev->ih.wptr = 0;
2371 rdev->ih.rptr = 0;
2372}
2373
e0df1ac5
AD
2374static void r600_disable_interrupt_state(struct radeon_device *rdev)
2375{
2376 u32 tmp;
2377
2378 WREG32(CP_INT_CNTL, 0);
2379 WREG32(GRBM_INT_CNTL, 0);
2380 WREG32(DxMODE_INT_MASK, 0);
2381 if (ASIC_IS_DCE3(rdev)) {
2382 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
2383 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
2384 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2385 WREG32(DC_HPD1_INT_CONTROL, tmp);
2386 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2387 WREG32(DC_HPD2_INT_CONTROL, tmp);
2388 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2389 WREG32(DC_HPD3_INT_CONTROL, tmp);
2390 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2391 WREG32(DC_HPD4_INT_CONTROL, tmp);
2392 if (ASIC_IS_DCE32(rdev)) {
2393 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
5898b1f3 2394 WREG32(DC_HPD5_INT_CONTROL, tmp);
e0df1ac5 2395 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
5898b1f3 2396 WREG32(DC_HPD6_INT_CONTROL, tmp);
e0df1ac5
AD
2397 }
2398 } else {
2399 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
2400 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2401 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
5898b1f3 2402 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
e0df1ac5 2403 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
5898b1f3 2404 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
e0df1ac5 2405 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
5898b1f3 2406 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
e0df1ac5
AD
2407 }
2408}
2409
d8f60cfc
AD
2410int r600_irq_init(struct radeon_device *rdev)
2411{
2412 int ret = 0;
2413 int rb_bufsz;
2414 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
2415
2416 /* allocate ring */
0c45249f 2417 ret = r600_ih_ring_alloc(rdev);
d8f60cfc
AD
2418 if (ret)
2419 return ret;
2420
2421 /* disable irqs */
2422 r600_disable_interrupts(rdev);
2423
2424 /* init rlc */
2425 ret = r600_rlc_init(rdev);
2426 if (ret) {
2427 r600_ih_ring_fini(rdev);
2428 return ret;
2429 }
2430
2431 /* setup interrupt control */
2432 /* set dummy read address to ring address */
2433 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
2434 interrupt_cntl = RREG32(INTERRUPT_CNTL);
2435 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
2436 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
2437 */
2438 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
2439 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
2440 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
2441 WREG32(INTERRUPT_CNTL, interrupt_cntl);
2442
2443 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
2444 rb_bufsz = drm_order(rdev->ih.ring_size / 4);
2445
2446 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
2447 IH_WPTR_OVERFLOW_CLEAR |
2448 (rb_bufsz << 1));
2449 /* WPTR writeback, not yet */
2450 /*ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;*/
2451 WREG32(IH_RB_WPTR_ADDR_LO, 0);
2452 WREG32(IH_RB_WPTR_ADDR_HI, 0);
2453
2454 WREG32(IH_RB_CNTL, ih_rb_cntl);
2455
2456 /* set rptr, wptr to 0 */
2457 WREG32(IH_RB_RPTR, 0);
2458 WREG32(IH_RB_WPTR, 0);
2459
2460 /* Default settings for IH_CNTL (disabled at first) */
2461 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
2462 /* RPTR_REARM only works if msi's are enabled */
2463 if (rdev->msi_enabled)
2464 ih_cntl |= RPTR_REARM;
2465
2466#ifdef __BIG_ENDIAN
2467 ih_cntl |= IH_MC_SWAP(IH_MC_SWAP_32BIT);
2468#endif
2469 WREG32(IH_CNTL, ih_cntl);
2470
2471 /* force the active interrupt state to all disabled */
e0df1ac5 2472 r600_disable_interrupt_state(rdev);
d8f60cfc
AD
2473
2474 /* enable irqs */
2475 r600_enable_interrupts(rdev);
2476
2477 return ret;
2478}
2479
0c45249f 2480void r600_irq_suspend(struct radeon_device *rdev)
d8f60cfc
AD
2481{
2482 r600_disable_interrupts(rdev);
2483 r600_rlc_stop(rdev);
0c45249f
JG
2484}
2485
2486void r600_irq_fini(struct radeon_device *rdev)
2487{
2488 r600_irq_suspend(rdev);
d8f60cfc
AD
2489 r600_ih_ring_fini(rdev);
2490}
2491
2492int r600_irq_set(struct radeon_device *rdev)
2493{
e0df1ac5
AD
2494 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
2495 u32 mode_int = 0;
2496 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
d8f60cfc 2497
003e69f9
JG
2498 if (!rdev->irq.installed) {
2499 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
2500 return -EINVAL;
2501 }
d8f60cfc 2502 /* don't enable anything if the ih is disabled */
79c2bbc5
JG
2503 if (!rdev->ih.enabled) {
2504 r600_disable_interrupts(rdev);
2505 /* force the active interrupt state to all disabled */
2506 r600_disable_interrupt_state(rdev);
d8f60cfc 2507 return 0;
79c2bbc5 2508 }
d8f60cfc 2509
e0df1ac5
AD
2510 if (ASIC_IS_DCE3(rdev)) {
2511 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
2512 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
2513 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
2514 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
2515 if (ASIC_IS_DCE32(rdev)) {
2516 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
2517 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
2518 }
2519 } else {
2520 hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
2521 hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
2522 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
2523 }
2524
d8f60cfc
AD
2525 if (rdev->irq.sw_int) {
2526 DRM_DEBUG("r600_irq_set: sw int\n");
2527 cp_int_cntl |= RB_INT_ENABLE;
2528 }
2529 if (rdev->irq.crtc_vblank_int[0]) {
2530 DRM_DEBUG("r600_irq_set: vblank 0\n");
2531 mode_int |= D1MODE_VBLANK_INT_MASK;
2532 }
2533 if (rdev->irq.crtc_vblank_int[1]) {
2534 DRM_DEBUG("r600_irq_set: vblank 1\n");
2535 mode_int |= D2MODE_VBLANK_INT_MASK;
2536 }
e0df1ac5
AD
2537 if (rdev->irq.hpd[0]) {
2538 DRM_DEBUG("r600_irq_set: hpd 1\n");
2539 hpd1 |= DC_HPDx_INT_EN;
2540 }
2541 if (rdev->irq.hpd[1]) {
2542 DRM_DEBUG("r600_irq_set: hpd 2\n");
2543 hpd2 |= DC_HPDx_INT_EN;
2544 }
2545 if (rdev->irq.hpd[2]) {
2546 DRM_DEBUG("r600_irq_set: hpd 3\n");
2547 hpd3 |= DC_HPDx_INT_EN;
2548 }
2549 if (rdev->irq.hpd[3]) {
2550 DRM_DEBUG("r600_irq_set: hpd 4\n");
2551 hpd4 |= DC_HPDx_INT_EN;
2552 }
2553 if (rdev->irq.hpd[4]) {
2554 DRM_DEBUG("r600_irq_set: hpd 5\n");
2555 hpd5 |= DC_HPDx_INT_EN;
2556 }
2557 if (rdev->irq.hpd[5]) {
2558 DRM_DEBUG("r600_irq_set: hpd 6\n");
2559 hpd6 |= DC_HPDx_INT_EN;
2560 }
d8f60cfc
AD
2561
2562 WREG32(CP_INT_CNTL, cp_int_cntl);
2563 WREG32(DxMODE_INT_MASK, mode_int);
e0df1ac5
AD
2564 if (ASIC_IS_DCE3(rdev)) {
2565 WREG32(DC_HPD1_INT_CONTROL, hpd1);
2566 WREG32(DC_HPD2_INT_CONTROL, hpd2);
2567 WREG32(DC_HPD3_INT_CONTROL, hpd3);
2568 WREG32(DC_HPD4_INT_CONTROL, hpd4);
2569 if (ASIC_IS_DCE32(rdev)) {
2570 WREG32(DC_HPD5_INT_CONTROL, hpd5);
2571 WREG32(DC_HPD6_INT_CONTROL, hpd6);
2572 }
2573 } else {
2574 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
2575 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
2576 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
2577 }
d8f60cfc
AD
2578
2579 return 0;
2580}
2581
e0df1ac5
AD
2582static inline void r600_irq_ack(struct radeon_device *rdev,
2583 u32 *disp_int,
2584 u32 *disp_int_cont,
2585 u32 *disp_int_cont2)
d8f60cfc 2586{
e0df1ac5
AD
2587 u32 tmp;
2588
2589 if (ASIC_IS_DCE3(rdev)) {
2590 *disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
2591 *disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
2592 *disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
2593 } else {
2594 *disp_int = RREG32(DISP_INTERRUPT_STATUS);
2595 *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
2596 *disp_int_cont2 = 0;
2597 }
d8f60cfc 2598
e0df1ac5 2599 if (*disp_int & LB_D1_VBLANK_INTERRUPT)
d8f60cfc 2600 WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
e0df1ac5 2601 if (*disp_int & LB_D1_VLINE_INTERRUPT)
d8f60cfc 2602 WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
e0df1ac5 2603 if (*disp_int & LB_D2_VBLANK_INTERRUPT)
d8f60cfc 2604 WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
e0df1ac5 2605 if (*disp_int & LB_D2_VLINE_INTERRUPT)
d8f60cfc 2606 WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
e0df1ac5
AD
2607 if (*disp_int & DC_HPD1_INTERRUPT) {
2608 if (ASIC_IS_DCE3(rdev)) {
2609 tmp = RREG32(DC_HPD1_INT_CONTROL);
2610 tmp |= DC_HPDx_INT_ACK;
2611 WREG32(DC_HPD1_INT_CONTROL, tmp);
2612 } else {
2613 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
2614 tmp |= DC_HPDx_INT_ACK;
2615 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
2616 }
2617 }
2618 if (*disp_int & DC_HPD2_INTERRUPT) {
2619 if (ASIC_IS_DCE3(rdev)) {
2620 tmp = RREG32(DC_HPD2_INT_CONTROL);
2621 tmp |= DC_HPDx_INT_ACK;
2622 WREG32(DC_HPD2_INT_CONTROL, tmp);
2623 } else {
2624 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
2625 tmp |= DC_HPDx_INT_ACK;
2626 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
2627 }
2628 }
2629 if (*disp_int_cont & DC_HPD3_INTERRUPT) {
2630 if (ASIC_IS_DCE3(rdev)) {
2631 tmp = RREG32(DC_HPD3_INT_CONTROL);
2632 tmp |= DC_HPDx_INT_ACK;
2633 WREG32(DC_HPD3_INT_CONTROL, tmp);
2634 } else {
2635 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
2636 tmp |= DC_HPDx_INT_ACK;
2637 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
2638 }
2639 }
2640 if (*disp_int_cont & DC_HPD4_INTERRUPT) {
2641 tmp = RREG32(DC_HPD4_INT_CONTROL);
2642 tmp |= DC_HPDx_INT_ACK;
2643 WREG32(DC_HPD4_INT_CONTROL, tmp);
2644 }
2645 if (ASIC_IS_DCE32(rdev)) {
2646 if (*disp_int_cont2 & DC_HPD5_INTERRUPT) {
2647 tmp = RREG32(DC_HPD5_INT_CONTROL);
2648 tmp |= DC_HPDx_INT_ACK;
2649 WREG32(DC_HPD5_INT_CONTROL, tmp);
2650 }
2651 if (*disp_int_cont2 & DC_HPD6_INTERRUPT) {
2652 tmp = RREG32(DC_HPD5_INT_CONTROL);
2653 tmp |= DC_HPDx_INT_ACK;
2654 WREG32(DC_HPD6_INT_CONTROL, tmp);
2655 }
2656 }
d8f60cfc
AD
2657}
2658
2659void r600_irq_disable(struct radeon_device *rdev)
2660{
e0df1ac5 2661 u32 disp_int, disp_int_cont, disp_int_cont2;
d8f60cfc
AD
2662
2663 r600_disable_interrupts(rdev);
2664 /* Wait and acknowledge irq */
2665 mdelay(1);
e0df1ac5
AD
2666 r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
2667 r600_disable_interrupt_state(rdev);
d8f60cfc
AD
2668}
2669
2670static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
2671{
2672 u32 wptr, tmp;
3ce0a23d 2673
d8f60cfc
AD
2674 /* XXX use writeback */
2675 wptr = RREG32(IH_RB_WPTR);
3ce0a23d 2676
d8f60cfc 2677 if (wptr & RB_OVERFLOW) {
7924e5eb
JG
2678 /* When a ring buffer overflow happen start parsing interrupt
2679 * from the last not overwritten vector (wptr + 16). Hopefully
2680 * this should allow us to catchup.
2681 */
2682 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
2683 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
2684 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
d8f60cfc
AD
2685 tmp = RREG32(IH_RB_CNTL);
2686 tmp |= IH_WPTR_OVERFLOW_CLEAR;
2687 WREG32(IH_RB_CNTL, tmp);
2688 }
0c45249f 2689 return (wptr & rdev->ih.ptr_mask);
d8f60cfc 2690}
3ce0a23d 2691
d8f60cfc
AD
2692/* r600 IV Ring
2693 * Each IV ring entry is 128 bits:
2694 * [7:0] - interrupt source id
2695 * [31:8] - reserved
2696 * [59:32] - interrupt source data
2697 * [127:60] - reserved
2698 *
2699 * The basic interrupt vector entries
2700 * are decoded as follows:
2701 * src_id src_data description
2702 * 1 0 D1 Vblank
2703 * 1 1 D1 Vline
2704 * 5 0 D2 Vblank
2705 * 5 1 D2 Vline
2706 * 19 0 FP Hot plug detection A
2707 * 19 1 FP Hot plug detection B
2708 * 19 2 DAC A auto-detection
2709 * 19 3 DAC B auto-detection
2710 * 176 - CP_INT RB
2711 * 177 - CP_INT IB1
2712 * 178 - CP_INT IB2
2713 * 181 - EOP Interrupt
2714 * 233 - GUI Idle
2715 *
2716 * Note, these are based on r600 and may need to be
2717 * adjusted or added to on newer asics
2718 */
2719
2720int r600_irq_process(struct radeon_device *rdev)
2721{
2722 u32 wptr = r600_get_ih_wptr(rdev);
2723 u32 rptr = rdev->ih.rptr;
2724 u32 src_id, src_data;
e0df1ac5 2725 u32 ring_index, disp_int, disp_int_cont, disp_int_cont2;
d8f60cfc 2726 unsigned long flags;
d4877cf2 2727 bool queue_hotplug = false;
d8f60cfc
AD
2728
2729 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
79c2bbc5
JG
2730 if (!rdev->ih.enabled)
2731 return IRQ_NONE;
d8f60cfc
AD
2732
2733 spin_lock_irqsave(&rdev->ih.lock, flags);
2734
2735 if (rptr == wptr) {
2736 spin_unlock_irqrestore(&rdev->ih.lock, flags);
2737 return IRQ_NONE;
2738 }
2739 if (rdev->shutdown) {
2740 spin_unlock_irqrestore(&rdev->ih.lock, flags);
2741 return IRQ_NONE;
2742 }
2743
2744restart_ih:
2745 /* display interrupts */
e0df1ac5 2746 r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
d8f60cfc
AD
2747
2748 rdev->ih.wptr = wptr;
2749 while (rptr != wptr) {
2750 /* wptr/rptr are in bytes! */
2751 ring_index = rptr / 4;
2752 src_id = rdev->ih.ring[ring_index] & 0xff;
2753 src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
2754
2755 switch (src_id) {
2756 case 1: /* D1 vblank/vline */
2757 switch (src_data) {
2758 case 0: /* D1 vblank */
2759 if (disp_int & LB_D1_VBLANK_INTERRUPT) {
2760 drm_handle_vblank(rdev->ddev, 0);
839461d3 2761 rdev->pm.vblank_sync = true;
73a6d3fc 2762 wake_up(&rdev->irq.vblank_queue);
d8f60cfc
AD
2763 disp_int &= ~LB_D1_VBLANK_INTERRUPT;
2764 DRM_DEBUG("IH: D1 vblank\n");
2765 }
2766 break;
2767 case 1: /* D1 vline */
2768 if (disp_int & LB_D1_VLINE_INTERRUPT) {
2769 disp_int &= ~LB_D1_VLINE_INTERRUPT;
2770 DRM_DEBUG("IH: D1 vline\n");
2771 }
2772 break;
2773 default:
b042589c 2774 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
d8f60cfc
AD
2775 break;
2776 }
2777 break;
2778 case 5: /* D2 vblank/vline */
2779 switch (src_data) {
2780 case 0: /* D2 vblank */
2781 if (disp_int & LB_D2_VBLANK_INTERRUPT) {
2782 drm_handle_vblank(rdev->ddev, 1);
839461d3 2783 rdev->pm.vblank_sync = true;
73a6d3fc 2784 wake_up(&rdev->irq.vblank_queue);
d8f60cfc
AD
2785 disp_int &= ~LB_D2_VBLANK_INTERRUPT;
2786 DRM_DEBUG("IH: D2 vblank\n");
2787 }
2788 break;
2789 case 1: /* D1 vline */
2790 if (disp_int & LB_D2_VLINE_INTERRUPT) {
2791 disp_int &= ~LB_D2_VLINE_INTERRUPT;
2792 DRM_DEBUG("IH: D2 vline\n");
2793 }
2794 break;
2795 default:
b042589c 2796 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
d8f60cfc
AD
2797 break;
2798 }
2799 break;
e0df1ac5
AD
2800 case 19: /* HPD/DAC hotplug */
2801 switch (src_data) {
2802 case 0:
2803 if (disp_int & DC_HPD1_INTERRUPT) {
2804 disp_int &= ~DC_HPD1_INTERRUPT;
d4877cf2
AD
2805 queue_hotplug = true;
2806 DRM_DEBUG("IH: HPD1\n");
e0df1ac5
AD
2807 }
2808 break;
2809 case 1:
2810 if (disp_int & DC_HPD2_INTERRUPT) {
2811 disp_int &= ~DC_HPD2_INTERRUPT;
d4877cf2
AD
2812 queue_hotplug = true;
2813 DRM_DEBUG("IH: HPD2\n");
e0df1ac5
AD
2814 }
2815 break;
2816 case 4:
2817 if (disp_int_cont & DC_HPD3_INTERRUPT) {
2818 disp_int_cont &= ~DC_HPD3_INTERRUPT;
d4877cf2
AD
2819 queue_hotplug = true;
2820 DRM_DEBUG("IH: HPD3\n");
e0df1ac5
AD
2821 }
2822 break;
2823 case 5:
2824 if (disp_int_cont & DC_HPD4_INTERRUPT) {
2825 disp_int_cont &= ~DC_HPD4_INTERRUPT;
d4877cf2
AD
2826 queue_hotplug = true;
2827 DRM_DEBUG("IH: HPD4\n");
e0df1ac5
AD
2828 }
2829 break;
2830 case 10:
2831 if (disp_int_cont2 & DC_HPD5_INTERRUPT) {
5898b1f3 2832 disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
d4877cf2
AD
2833 queue_hotplug = true;
2834 DRM_DEBUG("IH: HPD5\n");
e0df1ac5
AD
2835 }
2836 break;
2837 case 12:
2838 if (disp_int_cont2 & DC_HPD6_INTERRUPT) {
5898b1f3 2839 disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
d4877cf2
AD
2840 queue_hotplug = true;
2841 DRM_DEBUG("IH: HPD6\n");
e0df1ac5
AD
2842 }
2843 break;
2844 default:
b042589c 2845 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
e0df1ac5
AD
2846 break;
2847 }
2848 break;
d8f60cfc
AD
2849 case 176: /* CP_INT in ring buffer */
2850 case 177: /* CP_INT in IB1 */
2851 case 178: /* CP_INT in IB2 */
2852 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
2853 radeon_fence_process(rdev);
2854 break;
2855 case 181: /* CP EOP event */
2856 DRM_DEBUG("IH: CP EOP\n");
2857 break;
2858 default:
b042589c 2859 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
d8f60cfc
AD
2860 break;
2861 }
2862
2863 /* wptr/rptr are in bytes! */
0c45249f
JG
2864 rptr += 16;
2865 rptr &= rdev->ih.ptr_mask;
d8f60cfc
AD
2866 }
2867 /* make sure wptr hasn't changed while processing */
2868 wptr = r600_get_ih_wptr(rdev);
2869 if (wptr != rdev->ih.wptr)
2870 goto restart_ih;
d4877cf2
AD
2871 if (queue_hotplug)
2872 queue_work(rdev->wq, &rdev->hotplug_work);
d8f60cfc
AD
2873 rdev->ih.rptr = rptr;
2874 WREG32(IH_RB_RPTR, rdev->ih.rptr);
2875 spin_unlock_irqrestore(&rdev->ih.lock, flags);
2876 return IRQ_HANDLED;
2877}
3ce0a23d
JG
2878
2879/*
2880 * Debugfs info
2881 */
2882#if defined(CONFIG_DEBUG_FS)
2883
2884static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
771fe6b9 2885{
3ce0a23d
JG
2886 struct drm_info_node *node = (struct drm_info_node *) m->private;
2887 struct drm_device *dev = node->minor->dev;
2888 struct radeon_device *rdev = dev->dev_private;
3ce0a23d
JG
2889 unsigned count, i, j;
2890
2891 radeon_ring_free_size(rdev);
d6840766 2892 count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
3ce0a23d 2893 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
d6840766
RM
2894 seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
2895 seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
2896 seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
2897 seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
3ce0a23d
JG
2898 seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
2899 seq_printf(m, "%u dwords in ring\n", count);
d6840766 2900 i = rdev->cp.rptr;
3ce0a23d 2901 for (j = 0; j <= count; j++) {
3ce0a23d 2902 seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
d6840766 2903 i = (i + 1) & rdev->cp.ptr_mask;
3ce0a23d
JG
2904 }
2905 return 0;
2906}
2907
2908static int r600_debugfs_mc_info(struct seq_file *m, void *data)
2909{
2910 struct drm_info_node *node = (struct drm_info_node *) m->private;
2911 struct drm_device *dev = node->minor->dev;
2912 struct radeon_device *rdev = dev->dev_private;
2913
2914 DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
2915 DREG32_SYS(m, rdev, VM_L2_STATUS);
2916 return 0;
2917}
2918
2919static struct drm_info_list r600_mc_info_list[] = {
2920 {"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
2921 {"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
2922};
2923#endif
2924
2925int r600_debugfs_mc_info_init(struct radeon_device *rdev)
2926{
2927#if defined(CONFIG_DEBUG_FS)
2928 return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
2929#else
2930 return 0;
2931#endif
771fe6b9 2932}
062b389c
JG
2933
2934/**
2935 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
2936 * rdev: radeon device structure
2937 * bo: buffer object struct which userspace is waiting for idle
2938 *
2939 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
2940 * through ring buffer, this leads to corruption in rendering, see
2941 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
2942 * directly perform HDP flush by writing register through MMIO.
2943 */
2944void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
2945{
2946 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
2947}