]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/gpu/drm/radeon/evergreen.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[net-next-2.6.git] / drivers / gpu / drm / radeon / evergreen.c
CommitLineData
bcc1c2a1
AD
1/*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#include <linux/firmware.h>
25#include <linux/platform_device.h>
5a0e3ad6 26#include <linux/slab.h>
bcc1c2a1
AD
27#include "drmP.h"
28#include "radeon.h"
29#include "radeon_drm.h"
30#include "rv770d.h"
31#include "atom.h"
32#include "avivod.h"
33#include "evergreen_reg.h"
34
35static void evergreen_gpu_init(struct radeon_device *rdev);
36void evergreen_fini(struct radeon_device *rdev);
37
38bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
39{
40 bool connected = false;
41 /* XXX */
42 return connected;
43}
44
45void evergreen_hpd_set_polarity(struct radeon_device *rdev,
46 enum radeon_hpd_id hpd)
47{
48 /* XXX */
49}
50
51void evergreen_hpd_init(struct radeon_device *rdev)
52{
53 /* XXX */
54}
55
56
57void evergreen_bandwidth_update(struct radeon_device *rdev)
58{
59 /* XXX */
60}
61
62void evergreen_hpd_fini(struct radeon_device *rdev)
63{
64 /* XXX */
65}
66
67static int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
68{
69 unsigned i;
70 u32 tmp;
71
72 for (i = 0; i < rdev->usec_timeout; i++) {
73 /* read MC_STATUS */
74 tmp = RREG32(SRBM_STATUS) & 0x1F00;
75 if (!tmp)
76 return 0;
77 udelay(1);
78 }
79 return -1;
80}
81
82/*
83 * GART
84 */
85int evergreen_pcie_gart_enable(struct radeon_device *rdev)
86{
87 u32 tmp;
88 int r, i;
89
90 if (rdev->gart.table.vram.robj == NULL) {
91 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
92 return -EINVAL;
93 }
94 r = radeon_gart_table_vram_pin(rdev);
95 if (r)
96 return r;
82568565 97 radeon_gart_restore(rdev);
bcc1c2a1
AD
98 /* Setup L2 cache */
99 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
100 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
101 EFFECTIVE_L2_QUEUE_SIZE(7));
102 WREG32(VM_L2_CNTL2, 0);
103 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
104 /* Setup TLB control */
105 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
106 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
107 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
108 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
109 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
110 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
111 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
112 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
113 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
114 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
115 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
116 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
117 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
118 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
119 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
120 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
121 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
122 (u32)(rdev->dummy_page.addr >> 12));
123 for (i = 1; i < 7; i++)
124 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
125
126 r600_pcie_gart_tlb_flush(rdev);
127 rdev->gart.ready = true;
128 return 0;
129}
130
131void evergreen_pcie_gart_disable(struct radeon_device *rdev)
132{
133 u32 tmp;
134 int i, r;
135
136 /* Disable all tables */
137 for (i = 0; i < 7; i++)
138 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
139
140 /* Setup L2 cache */
141 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
142 EFFECTIVE_L2_QUEUE_SIZE(7));
143 WREG32(VM_L2_CNTL2, 0);
144 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
145 /* Setup TLB control */
146 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
147 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
148 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
149 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
150 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
151 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
152 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
153 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
154 if (rdev->gart.table.vram.robj) {
155 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
156 if (likely(r == 0)) {
157 radeon_bo_kunmap(rdev->gart.table.vram.robj);
158 radeon_bo_unpin(rdev->gart.table.vram.robj);
159 radeon_bo_unreserve(rdev->gart.table.vram.robj);
160 }
161 }
162}
163
164void evergreen_pcie_gart_fini(struct radeon_device *rdev)
165{
166 evergreen_pcie_gart_disable(rdev);
167 radeon_gart_table_vram_free(rdev);
168 radeon_gart_fini(rdev);
169}
170
171
172void evergreen_agp_enable(struct radeon_device *rdev)
173{
174 u32 tmp;
175 int i;
176
177 /* Setup L2 cache */
178 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
179 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
180 EFFECTIVE_L2_QUEUE_SIZE(7));
181 WREG32(VM_L2_CNTL2, 0);
182 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
183 /* Setup TLB control */
184 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
185 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
186 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
187 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
188 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
189 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
190 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
191 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
192 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
193 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
194 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
195 for (i = 0; i < 7; i++)
196 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
197}
198
199static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
200{
201 save->vga_control[0] = RREG32(D1VGA_CONTROL);
202 save->vga_control[1] = RREG32(D2VGA_CONTROL);
203 save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL);
204 save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL);
205 save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL);
206 save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL);
207 save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
208 save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
209 save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
210 save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
211 save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
212 save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
213 save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
214 save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
215
216 /* Stop all video */
217 WREG32(VGA_RENDER_CONTROL, 0);
218 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
219 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
220 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
221 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
222 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
223 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
224 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
225 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
226 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
227 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
228 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
229 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
230 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
231 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
232 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
233 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
234 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
235 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
236
237 WREG32(D1VGA_CONTROL, 0);
238 WREG32(D2VGA_CONTROL, 0);
239 WREG32(EVERGREEN_D3VGA_CONTROL, 0);
240 WREG32(EVERGREEN_D4VGA_CONTROL, 0);
241 WREG32(EVERGREEN_D5VGA_CONTROL, 0);
242 WREG32(EVERGREEN_D6VGA_CONTROL, 0);
243}
244
245static void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
246{
247 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
248 upper_32_bits(rdev->mc.vram_start));
249 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
250 upper_32_bits(rdev->mc.vram_start));
251 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
252 (u32)rdev->mc.vram_start);
253 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
254 (u32)rdev->mc.vram_start);
255
256 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
257 upper_32_bits(rdev->mc.vram_start));
258 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
259 upper_32_bits(rdev->mc.vram_start));
260 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
261 (u32)rdev->mc.vram_start);
262 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
263 (u32)rdev->mc.vram_start);
264
265 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
266 upper_32_bits(rdev->mc.vram_start));
267 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
268 upper_32_bits(rdev->mc.vram_start));
269 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
270 (u32)rdev->mc.vram_start);
271 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
272 (u32)rdev->mc.vram_start);
273
274 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
275 upper_32_bits(rdev->mc.vram_start));
276 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
277 upper_32_bits(rdev->mc.vram_start));
278 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
279 (u32)rdev->mc.vram_start);
280 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
281 (u32)rdev->mc.vram_start);
282
283 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
284 upper_32_bits(rdev->mc.vram_start));
285 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
286 upper_32_bits(rdev->mc.vram_start));
287 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
288 (u32)rdev->mc.vram_start);
289 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
290 (u32)rdev->mc.vram_start);
291
292 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
293 upper_32_bits(rdev->mc.vram_start));
294 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
295 upper_32_bits(rdev->mc.vram_start));
296 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
297 (u32)rdev->mc.vram_start);
298 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
299 (u32)rdev->mc.vram_start);
300
301 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
302 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
303 /* Unlock host access */
304 WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
305 mdelay(1);
306 /* Restore video state */
307 WREG32(D1VGA_CONTROL, save->vga_control[0]);
308 WREG32(D2VGA_CONTROL, save->vga_control[1]);
309 WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]);
310 WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]);
311 WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]);
312 WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
313 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
314 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
315 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
316 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
317 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
318 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
319 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]);
320 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]);
321 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
322 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
323 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
324 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
325 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
326 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
327 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
328 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
329 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
330 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
331 WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
332}
333
334static void evergreen_mc_program(struct radeon_device *rdev)
335{
336 struct evergreen_mc_save save;
337 u32 tmp;
338 int i, j;
339
340 /* Initialize HDP */
341 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
342 WREG32((0x2c14 + j), 0x00000000);
343 WREG32((0x2c18 + j), 0x00000000);
344 WREG32((0x2c1c + j), 0x00000000);
345 WREG32((0x2c20 + j), 0x00000000);
346 WREG32((0x2c24 + j), 0x00000000);
347 }
348 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
349
350 evergreen_mc_stop(rdev, &save);
351 if (evergreen_mc_wait_for_idle(rdev)) {
352 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
353 }
354 /* Lockout access through VGA aperture*/
355 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
356 /* Update configuration */
357 if (rdev->flags & RADEON_IS_AGP) {
358 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
359 /* VRAM before AGP */
360 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
361 rdev->mc.vram_start >> 12);
362 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
363 rdev->mc.gtt_end >> 12);
364 } else {
365 /* VRAM after AGP */
366 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
367 rdev->mc.gtt_start >> 12);
368 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
369 rdev->mc.vram_end >> 12);
370 }
371 } else {
372 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
373 rdev->mc.vram_start >> 12);
374 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
375 rdev->mc.vram_end >> 12);
376 }
377 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
378 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
379 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
380 WREG32(MC_VM_FB_LOCATION, tmp);
381 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
382 WREG32(HDP_NONSURFACE_INFO, (2 << 7));
383 WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF);
384 if (rdev->flags & RADEON_IS_AGP) {
385 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
386 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
387 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
388 } else {
389 WREG32(MC_VM_AGP_BASE, 0);
390 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
391 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
392 }
393 if (evergreen_mc_wait_for_idle(rdev)) {
394 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
395 }
396 evergreen_mc_resume(rdev, &save);
397 /* we need to own VRAM, so turn off the VGA renderer here
398 * to stop it overwriting our objects */
399 rv515_vga_render_disable(rdev);
400}
401
402#if 0
403/*
404 * CP.
405 */
406static void evergreen_cp_stop(struct radeon_device *rdev)
407{
408 /* XXX */
409}
410
411
412static int evergreen_cp_load_microcode(struct radeon_device *rdev)
413{
414 /* XXX */
415
416 return 0;
417}
418
419
420/*
421 * Core functions
422 */
423static u32 evergreen_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
424 u32 num_backends,
425 u32 backend_disable_mask)
426{
427 u32 backend_map = 0;
428
429 return backend_map;
430}
431#endif
432
433static void evergreen_gpu_init(struct radeon_device *rdev)
434{
435 /* XXX */
436}
437
438int evergreen_mc_init(struct radeon_device *rdev)
439{
440 fixed20_12 a;
441 u32 tmp;
442 int chansize, numchan;
bcc1c2a1
AD
443
444 /* Get VRAM informations */
445 rdev->mc.vram_is_ddr = true;
446 tmp = RREG32(MC_ARB_RAMCFG);
447 if (tmp & CHANSIZE_OVERRIDE) {
448 chansize = 16;
449 } else if (tmp & CHANSIZE_MASK) {
450 chansize = 64;
451 } else {
452 chansize = 32;
453 }
454 tmp = RREG32(MC_SHARED_CHMAP);
455 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
456 case 0:
457 default:
458 numchan = 1;
459 break;
460 case 1:
461 numchan = 2;
462 break;
463 case 2:
464 numchan = 4;
465 break;
466 case 3:
467 numchan = 8;
468 break;
469 }
470 rdev->mc.vram_width = numchan * chansize;
471 /* Could aper size report 0 ? */
472 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
473 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
474 /* Setup GPU memory space */
475 /* size in MB on evergreen */
476 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
477 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
51e5fcd3 478 rdev->mc.visible_vram_size = rdev->mc.aper_size;
d594e46a
JG
479 /* FIXME remove this once we support unmappable VRAM */
480 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
bcc1c2a1 481 rdev->mc.mc_vram_size = rdev->mc.aper_size;
bcc1c2a1 482 rdev->mc.real_vram_size = rdev->mc.aper_size;
bcc1c2a1 483 }
d594e46a 484 r600_vram_gtt_location(rdev, &rdev->mc);
bcc1c2a1
AD
485 /* FIXME: we should enforce default clock in case GPU is not in
486 * default setup
487 */
488 a.full = rfixed_const(100);
489 rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
490 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
491 return 0;
492}
d594e46a 493
bcc1c2a1
AD
494int evergreen_gpu_reset(struct radeon_device *rdev)
495{
496 /* FIXME: implement for evergreen */
497 return 0;
498}
499
500static int evergreen_startup(struct radeon_device *rdev)
501{
502#if 0
503 int r;
504
505 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
506 r = r600_init_microcode(rdev);
507 if (r) {
508 DRM_ERROR("Failed to load firmware!\n");
509 return r;
510 }
511 }
512#endif
513 evergreen_mc_program(rdev);
514#if 0
515 if (rdev->flags & RADEON_IS_AGP) {
516 evergreem_agp_enable(rdev);
517 } else {
518 r = evergreen_pcie_gart_enable(rdev);
519 if (r)
520 return r;
521 }
522#endif
523 evergreen_gpu_init(rdev);
524#if 0
525 if (!rdev->r600_blit.shader_obj) {
526 r = r600_blit_init(rdev);
527 if (r) {
528 DRM_ERROR("radeon: failed blitter (%d).\n", r);
529 return r;
530 }
531 }
532
533 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
534 if (unlikely(r != 0))
535 return r;
536 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
537 &rdev->r600_blit.shader_gpu_addr);
538 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
539 if (r) {
540 DRM_ERROR("failed to pin blit object %d\n", r);
541 return r;
542 }
543
544 /* Enable IRQ */
545 r = r600_irq_init(rdev);
546 if (r) {
547 DRM_ERROR("radeon: IH init failed (%d).\n", r);
548 radeon_irq_kms_fini(rdev);
549 return r;
550 }
551 r600_irq_set(rdev);
552
553 r = radeon_ring_init(rdev, rdev->cp.ring_size);
554 if (r)
555 return r;
556 r = evergreen_cp_load_microcode(rdev);
557 if (r)
558 return r;
559 r = r600_cp_resume(rdev);
560 if (r)
561 return r;
562 /* write back buffer are not vital so don't worry about failure */
563 r600_wb_enable(rdev);
564#endif
565 return 0;
566}
567
568int evergreen_resume(struct radeon_device *rdev)
569{
570 int r;
571
572 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
573 * posting will perform necessary task to bring back GPU into good
574 * shape.
575 */
576 /* post card */
577 atom_asic_init(rdev->mode_info.atom_context);
578 /* Initialize clocks */
579 r = radeon_clocks_init(rdev);
580 if (r) {
581 return r;
582 }
583
584 r = evergreen_startup(rdev);
585 if (r) {
586 DRM_ERROR("r600 startup failed on resume\n");
587 return r;
588 }
589#if 0
590 r = r600_ib_test(rdev);
591 if (r) {
592 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
593 return r;
594 }
595#endif
596 return r;
597
598}
599
600int evergreen_suspend(struct radeon_device *rdev)
601{
602#if 0
603 int r;
604
605 /* FIXME: we should wait for ring to be empty */
606 r700_cp_stop(rdev);
607 rdev->cp.ready = false;
608 r600_wb_disable(rdev);
609 evergreen_pcie_gart_disable(rdev);
610 /* unpin shaders bo */
611 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
612 if (likely(r == 0)) {
613 radeon_bo_unpin(rdev->r600_blit.shader_obj);
614 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
615 }
616#endif
617 return 0;
618}
619
620static bool evergreen_card_posted(struct radeon_device *rdev)
621{
622 u32 reg;
623
624 /* first check CRTCs */
625 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
626 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
627 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
628 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
629 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
630 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
631 if (reg & EVERGREEN_CRTC_MASTER_EN)
632 return true;
633
634 /* then check MEM_SIZE, in case the crtcs are off */
635 if (RREG32(CONFIG_MEMSIZE))
636 return true;
637
638 return false;
639}
640
641/* Plan is to move initialization in that function and use
642 * helper function so that radeon_device_init pretty much
643 * do nothing more than calling asic specific function. This
644 * should also allow to remove a bunch of callback function
645 * like vram_info.
646 */
647int evergreen_init(struct radeon_device *rdev)
648{
649 int r;
650
651 r = radeon_dummy_page_init(rdev);
652 if (r)
653 return r;
654 /* This don't do much */
655 r = radeon_gem_init(rdev);
656 if (r)
657 return r;
658 /* Read BIOS */
659 if (!radeon_get_bios(rdev)) {
660 if (ASIC_IS_AVIVO(rdev))
661 return -EINVAL;
662 }
663 /* Must be an ATOMBIOS */
664 if (!rdev->is_atom_bios) {
665 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
666 return -EINVAL;
667 }
668 r = radeon_atombios_init(rdev);
669 if (r)
670 return r;
671 /* Post card if necessary */
672 if (!evergreen_card_posted(rdev)) {
673 if (!rdev->bios) {
674 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
675 return -EINVAL;
676 }
677 DRM_INFO("GPU not posted. posting now...\n");
678 atom_asic_init(rdev->mode_info.atom_context);
679 }
680 /* Initialize scratch registers */
681 r600_scratch_init(rdev);
682 /* Initialize surface registers */
683 radeon_surface_init(rdev);
684 /* Initialize clocks */
685 radeon_get_clock_info(rdev->ddev);
686 r = radeon_clocks_init(rdev);
687 if (r)
688 return r;
689 /* Initialize power management */
690 radeon_pm_init(rdev);
691 /* Fence driver */
692 r = radeon_fence_driver_init(rdev);
693 if (r)
694 return r;
d594e46a
JG
695 /* initialize AGP */
696 if (rdev->flags & RADEON_IS_AGP) {
697 r = radeon_agp_init(rdev);
698 if (r)
699 radeon_agp_disable(rdev);
700 }
701 /* initialize memory controller */
bcc1c2a1
AD
702 r = evergreen_mc_init(rdev);
703 if (r)
704 return r;
705 /* Memory manager */
706 r = radeon_bo_init(rdev);
707 if (r)
708 return r;
709#if 0
710 r = radeon_irq_kms_init(rdev);
711 if (r)
712 return r;
713
714 rdev->cp.ring_obj = NULL;
715 r600_ring_init(rdev, 1024 * 1024);
716
717 rdev->ih.ring_obj = NULL;
718 r600_ih_ring_init(rdev, 64 * 1024);
719
720 r = r600_pcie_gart_init(rdev);
721 if (r)
722 return r;
723#endif
724 rdev->accel_working = false;
725 r = evergreen_startup(rdev);
726 if (r) {
727 evergreen_suspend(rdev);
728 /*r600_wb_fini(rdev);*/
729 /*radeon_ring_fini(rdev);*/
730 /*evergreen_pcie_gart_fini(rdev);*/
731 rdev->accel_working = false;
732 }
733 if (rdev->accel_working) {
734 r = radeon_ib_pool_init(rdev);
735 if (r) {
736 DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
737 rdev->accel_working = false;
738 }
739 r = r600_ib_test(rdev);
740 if (r) {
741 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
742 rdev->accel_working = false;
743 }
744 }
745 return 0;
746}
747
748void evergreen_fini(struct radeon_device *rdev)
749{
750 evergreen_suspend(rdev);
751#if 0
752 r600_blit_fini(rdev);
753 r600_irq_fini(rdev);
754 radeon_irq_kms_fini(rdev);
755 radeon_ring_fini(rdev);
756 r600_wb_fini(rdev);
757 evergreen_pcie_gart_fini(rdev);
758#endif
759 radeon_gem_fini(rdev);
760 radeon_fence_driver_fini(rdev);
761 radeon_clocks_fini(rdev);
762 radeon_agp_fini(rdev);
763 radeon_bo_fini(rdev);
764 radeon_atombios_fini(rdev);
765 kfree(rdev->bios);
766 rdev->bios = NULL;
767 radeon_dummy_page_fini(rdev);
768}