]>
Commit | Line | Data |
---|---|---|
771fe6b9 JG |
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. | |
3 | * Copyright 2008 Red Hat Inc. | |
4 | * Copyright 2009 Jerome Glisse. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the "Software"), | |
8 | * to deal in the Software without restriction, including without limitation | |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
10 | * and/or sell copies of the Software, and to permit persons to whom the | |
11 | * Software is furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | * | |
24 | * Authors: Dave Airlie | |
25 | * Alex Deucher | |
26 | * Jerome Glisse | |
27 | */ | |
28 | #include "drmP.h" | |
29 | #include "radeon_reg.h" | |
30 | #include "radeon.h" | |
31 | ||
3f7dc91a DA |
32 | #include "rs600_reg_safe.h" |
33 | ||
771fe6b9 JG |
34 | /* rs600 depends on : */ |
35 | void r100_hdp_reset(struct radeon_device *rdev); | |
36 | int r100_gui_wait_for_idle(struct radeon_device *rdev); | |
37 | int r300_mc_wait_for_idle(struct radeon_device *rdev); | |
38 | void r420_pipes_init(struct radeon_device *rdev); | |
39 | ||
40 | /* This files gather functions specifics to : | |
41 | * rs600 | |
42 | * | |
43 | * Some of these functions might be used by newer ASICs. | |
44 | */ | |
45 | void rs600_gpu_init(struct radeon_device *rdev); | |
46 | int rs600_mc_wait_for_idle(struct radeon_device *rdev); | |
771fe6b9 JG |
47 | |
48 | ||
49 | /* | |
50 | * GART. | |
51 | */ | |
52 | void rs600_gart_tlb_flush(struct radeon_device *rdev) | |
53 | { | |
54 | uint32_t tmp; | |
55 | ||
56 | tmp = RREG32_MC(RS600_MC_PT0_CNTL); | |
57 | tmp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE); | |
58 | WREG32_MC(RS600_MC_PT0_CNTL, tmp); | |
59 | ||
60 | tmp = RREG32_MC(RS600_MC_PT0_CNTL); | |
61 | tmp |= RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE; | |
62 | WREG32_MC(RS600_MC_PT0_CNTL, tmp); | |
63 | ||
64 | tmp = RREG32_MC(RS600_MC_PT0_CNTL); | |
65 | tmp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE); | |
66 | WREG32_MC(RS600_MC_PT0_CNTL, tmp); | |
67 | tmp = RREG32_MC(RS600_MC_PT0_CNTL); | |
68 | } | |
69 | ||
4aac0473 | 70 | int rs600_gart_init(struct radeon_device *rdev) |
771fe6b9 | 71 | { |
771fe6b9 JG |
72 | int r; |
73 | ||
4aac0473 JG |
74 | if (rdev->gart.table.vram.robj) { |
75 | WARN(1, "RS600 GART already initialized.\n"); | |
76 | return 0; | |
77 | } | |
771fe6b9 JG |
78 | /* Initialize common gart structure */ |
79 | r = radeon_gart_init(rdev); | |
80 | if (r) { | |
81 | return r; | |
82 | } | |
83 | rdev->gart.table_size = rdev->gart.num_gpu_pages * 8; | |
4aac0473 JG |
84 | return radeon_gart_table_vram_alloc(rdev); |
85 | } | |
86 | ||
87 | int rs600_gart_enable(struct radeon_device *rdev) | |
88 | { | |
89 | uint32_t tmp; | |
90 | int r, i; | |
91 | ||
92 | if (rdev->gart.table.vram.robj == NULL) { | |
93 | dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); | |
94 | return -EINVAL; | |
771fe6b9 | 95 | } |
4aac0473 JG |
96 | r = radeon_gart_table_vram_pin(rdev); |
97 | if (r) | |
98 | return r; | |
771fe6b9 JG |
99 | /* FIXME: setup default page */ |
100 | WREG32_MC(RS600_MC_PT0_CNTL, | |
101 | (RS600_EFFECTIVE_L2_CACHE_SIZE(6) | | |
102 | RS600_EFFECTIVE_L2_QUEUE_SIZE(6))); | |
103 | for (i = 0; i < 19; i++) { | |
104 | WREG32_MC(RS600_MC_PT0_CLIENT0_CNTL + i, | |
105 | (RS600_ENABLE_TRANSLATION_MODE_OVERRIDE | | |
106 | RS600_SYSTEM_ACCESS_MODE_IN_SYS | | |
107 | RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE | | |
108 | RS600_EFFECTIVE_L1_CACHE_SIZE(3) | | |
109 | RS600_ENABLE_FRAGMENT_PROCESSING | | |
110 | RS600_EFFECTIVE_L1_QUEUE_SIZE(3))); | |
111 | } | |
112 | ||
113 | /* System context map to GART space */ | |
114 | WREG32_MC(RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_location); | |
115 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; | |
116 | WREG32_MC(RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, tmp); | |
117 | ||
118 | /* enable first context */ | |
119 | WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_location); | |
120 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; | |
121 | WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR, tmp); | |
122 | WREG32_MC(RS600_MC_PT0_CONTEXT0_CNTL, | |
123 | (RS600_ENABLE_PAGE_TABLE | RS600_PAGE_TABLE_TYPE_FLAT)); | |
124 | /* disable all other contexts */ | |
125 | for (i = 1; i < 8; i++) { | |
126 | WREG32_MC(RS600_MC_PT0_CONTEXT0_CNTL + i, 0); | |
127 | } | |
128 | ||
129 | /* setup the page table */ | |
130 | WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR, | |
131 | rdev->gart.table_addr); | |
132 | WREG32_MC(RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0); | |
133 | ||
134 | /* enable page tables */ | |
135 | tmp = RREG32_MC(RS600_MC_PT0_CNTL); | |
136 | WREG32_MC(RS600_MC_PT0_CNTL, (tmp | RS600_ENABLE_PT)); | |
137 | tmp = RREG32_MC(RS600_MC_CNTL1); | |
138 | WREG32_MC(RS600_MC_CNTL1, (tmp | RS600_ENABLE_PAGE_TABLES)); | |
139 | rs600_gart_tlb_flush(rdev); | |
140 | rdev->gart.ready = true; | |
141 | return 0; | |
142 | } | |
143 | ||
144 | void rs600_gart_disable(struct radeon_device *rdev) | |
145 | { | |
146 | uint32_t tmp; | |
147 | ||
148 | /* FIXME: disable out of gart access */ | |
149 | WREG32_MC(RS600_MC_PT0_CNTL, 0); | |
150 | tmp = RREG32_MC(RS600_MC_CNTL1); | |
151 | tmp &= ~RS600_ENABLE_PAGE_TABLES; | |
152 | WREG32_MC(RS600_MC_CNTL1, tmp); | |
4aac0473 JG |
153 | if (rdev->gart.table.vram.robj) { |
154 | radeon_object_kunmap(rdev->gart.table.vram.robj); | |
155 | radeon_object_unpin(rdev->gart.table.vram.robj); | |
156 | } | |
157 | } | |
158 | ||
159 | void rs600_gart_fini(struct radeon_device *rdev) | |
160 | { | |
161 | rs600_gart_disable(rdev); | |
162 | radeon_gart_table_vram_free(rdev); | |
163 | radeon_gart_fini(rdev); | |
771fe6b9 JG |
164 | } |
165 | ||
166 | #define R600_PTE_VALID (1 << 0) | |
167 | #define R600_PTE_SYSTEM (1 << 1) | |
168 | #define R600_PTE_SNOOPED (1 << 2) | |
169 | #define R600_PTE_READABLE (1 << 5) | |
170 | #define R600_PTE_WRITEABLE (1 << 6) | |
171 | ||
172 | int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | |
173 | { | |
174 | void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; | |
175 | ||
176 | if (i < 0 || i > rdev->gart.num_gpu_pages) { | |
177 | return -EINVAL; | |
178 | } | |
179 | addr = addr & 0xFFFFFFFFFFFFF000ULL; | |
180 | addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED; | |
181 | addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE; | |
182 | writeq(addr, ((void __iomem *)ptr) + (i * 8)); | |
183 | return 0; | |
184 | } | |
185 | ||
186 | ||
187 | /* | |
188 | * MC. | |
189 | */ | |
190 | void rs600_mc_disable_clients(struct radeon_device *rdev) | |
191 | { | |
192 | unsigned tmp; | |
193 | ||
194 | if (r100_gui_wait_for_idle(rdev)) { | |
195 | printk(KERN_WARNING "Failed to wait GUI idle while " | |
196 | "programming pipes. Bad things might happen.\n"); | |
197 | } | |
198 | ||
d39c3b89 | 199 | rv515_vga_render_disable(rdev); |
698443d9 | 200 | |
771fe6b9 JG |
201 | tmp = RREG32(AVIVO_D1VGA_CONTROL); |
202 | WREG32(AVIVO_D1VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE); | |
203 | tmp = RREG32(AVIVO_D2VGA_CONTROL); | |
204 | WREG32(AVIVO_D2VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE); | |
205 | ||
206 | tmp = RREG32(AVIVO_D1CRTC_CONTROL); | |
207 | WREG32(AVIVO_D1CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN); | |
208 | tmp = RREG32(AVIVO_D2CRTC_CONTROL); | |
209 | WREG32(AVIVO_D2CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN); | |
210 | ||
211 | /* make sure all previous write got through */ | |
212 | tmp = RREG32(AVIVO_D2CRTC_CONTROL); | |
213 | ||
214 | mdelay(1); | |
215 | } | |
216 | ||
217 | int rs600_mc_init(struct radeon_device *rdev) | |
218 | { | |
219 | uint32_t tmp; | |
220 | int r; | |
221 | ||
222 | if (r100_debugfs_rbbm_init(rdev)) { | |
223 | DRM_ERROR("Failed to register debugfs file for RBBM !\n"); | |
224 | } | |
225 | ||
226 | rs600_gpu_init(rdev); | |
227 | rs600_gart_disable(rdev); | |
228 | ||
229 | /* Setup GPU memory space */ | |
230 | rdev->mc.vram_location = 0xFFFFFFFFUL; | |
231 | rdev->mc.gtt_location = 0xFFFFFFFFUL; | |
232 | r = radeon_mc_setup(rdev); | |
233 | if (r) { | |
234 | return r; | |
235 | } | |
236 | ||
237 | /* Program GPU memory space */ | |
238 | /* Enable bus master */ | |
239 | tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS; | |
240 | WREG32(RADEON_BUS_CNTL, tmp); | |
241 | /* FIXME: What does AGP means for such chipset ? */ | |
242 | WREG32_MC(RS600_MC_AGP_LOCATION, 0x0FFFFFFF); | |
243 | /* FIXME: are this AGP reg in indirect MC range ? */ | |
244 | WREG32_MC(RS600_MC_AGP_BASE, 0); | |
245 | WREG32_MC(RS600_MC_AGP_BASE_2, 0); | |
246 | rs600_mc_disable_clients(rdev); | |
247 | if (rs600_mc_wait_for_idle(rdev)) { | |
248 | printk(KERN_WARNING "Failed to wait MC idle while " | |
249 | "programming pipes. Bad things might happen.\n"); | |
250 | } | |
7a50f01a | 251 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; |
771fe6b9 JG |
252 | tmp = REG_SET(RS600_MC_FB_TOP, tmp >> 16); |
253 | tmp |= REG_SET(RS600_MC_FB_START, rdev->mc.vram_location >> 16); | |
254 | WREG32_MC(RS600_MC_FB_LOCATION, tmp); | |
255 | WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16); | |
256 | return 0; | |
257 | } | |
258 | ||
259 | void rs600_mc_fini(struct radeon_device *rdev) | |
260 | { | |
771fe6b9 JG |
261 | } |
262 | ||
263 | ||
7ed220d7 MD |
264 | /* |
265 | * Interrupts | |
266 | */ | |
267 | int rs600_irq_set(struct radeon_device *rdev) | |
268 | { | |
269 | uint32_t tmp = 0; | |
270 | uint32_t mode_int = 0; | |
271 | ||
272 | if (rdev->irq.sw_int) { | |
273 | tmp |= RADEON_SW_INT_ENABLE; | |
274 | } | |
275 | if (rdev->irq.crtc_vblank_int[0]) { | |
7ed220d7 MD |
276 | mode_int |= AVIVO_D1MODE_INT_MASK; |
277 | } | |
278 | if (rdev->irq.crtc_vblank_int[1]) { | |
7ed220d7 MD |
279 | mode_int |= AVIVO_D2MODE_INT_MASK; |
280 | } | |
281 | WREG32(RADEON_GEN_INT_CNTL, tmp); | |
282 | WREG32(AVIVO_DxMODE_INT_MASK, mode_int); | |
283 | return 0; | |
284 | } | |
285 | ||
286 | static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int) | |
287 | { | |
288 | uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS); | |
289 | uint32_t irq_mask = RADEON_SW_INT_TEST; | |
290 | ||
291 | if (irqs & AVIVO_DISPLAY_INT_STATUS) { | |
292 | *r500_disp_int = RREG32(AVIVO_DISP_INTERRUPT_STATUS); | |
293 | if (*r500_disp_int & AVIVO_D1_VBLANK_INTERRUPT) { | |
294 | WREG32(AVIVO_D1MODE_VBLANK_STATUS, AVIVO_VBLANK_ACK); | |
295 | } | |
296 | if (*r500_disp_int & AVIVO_D2_VBLANK_INTERRUPT) { | |
297 | WREG32(AVIVO_D2MODE_VBLANK_STATUS, AVIVO_VBLANK_ACK); | |
298 | } | |
299 | } else { | |
300 | *r500_disp_int = 0; | |
301 | } | |
302 | ||
303 | if (irqs) { | |
304 | WREG32(RADEON_GEN_INT_STATUS, irqs); | |
305 | } | |
306 | return irqs & irq_mask; | |
307 | } | |
308 | ||
309 | int rs600_irq_process(struct radeon_device *rdev) | |
310 | { | |
311 | uint32_t status; | |
312 | uint32_t r500_disp_int; | |
313 | ||
314 | status = rs600_irq_ack(rdev, &r500_disp_int); | |
315 | if (!status && !r500_disp_int) { | |
316 | return IRQ_NONE; | |
317 | } | |
318 | while (status || r500_disp_int) { | |
319 | /* SW interrupt */ | |
320 | if (status & RADEON_SW_INT_TEST) { | |
321 | radeon_fence_process(rdev); | |
322 | } | |
323 | /* Vertical blank interrupts */ | |
324 | if (r500_disp_int & AVIVO_D1_VBLANK_INTERRUPT) { | |
325 | drm_handle_vblank(rdev->ddev, 0); | |
326 | } | |
327 | if (r500_disp_int & AVIVO_D2_VBLANK_INTERRUPT) { | |
328 | drm_handle_vblank(rdev->ddev, 1); | |
329 | } | |
330 | status = rs600_irq_ack(rdev, &r500_disp_int); | |
331 | } | |
332 | return IRQ_HANDLED; | |
333 | } | |
334 | ||
335 | u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc) | |
336 | { | |
337 | if (crtc == 0) | |
338 | return RREG32(AVIVO_D1CRTC_FRAME_COUNT); | |
339 | else | |
340 | return RREG32(AVIVO_D2CRTC_FRAME_COUNT); | |
341 | } | |
342 | ||
343 | ||
771fe6b9 JG |
344 | /* |
345 | * Global GPU functions | |
346 | */ | |
771fe6b9 JG |
347 | int rs600_mc_wait_for_idle(struct radeon_device *rdev) |
348 | { | |
349 | unsigned i; | |
350 | uint32_t tmp; | |
351 | ||
352 | for (i = 0; i < rdev->usec_timeout; i++) { | |
353 | /* read MC_STATUS */ | |
354 | tmp = RREG32_MC(RS600_MC_STATUS); | |
355 | if (tmp & RS600_MC_STATUS_IDLE) { | |
356 | return 0; | |
357 | } | |
358 | DRM_UDELAY(1); | |
359 | } | |
360 | return -1; | |
361 | } | |
362 | ||
363 | void rs600_errata(struct radeon_device *rdev) | |
364 | { | |
365 | rdev->pll_errata = 0; | |
366 | } | |
367 | ||
368 | void rs600_gpu_init(struct radeon_device *rdev) | |
369 | { | |
370 | /* FIXME: HDP same place on rs600 ? */ | |
371 | r100_hdp_reset(rdev); | |
d39c3b89 | 372 | rv515_vga_render_disable(rdev); |
771fe6b9 JG |
373 | /* FIXME: is this correct ? */ |
374 | r420_pipes_init(rdev); | |
375 | if (rs600_mc_wait_for_idle(rdev)) { | |
376 | printk(KERN_WARNING "Failed to wait MC idle while " | |
377 | "programming pipes. Bad things might happen.\n"); | |
378 | } | |
379 | } | |
380 | ||
381 | ||
382 | /* | |
383 | * VRAM info. | |
384 | */ | |
385 | void rs600_vram_info(struct radeon_device *rdev) | |
386 | { | |
387 | /* FIXME: to do or is these values sane ? */ | |
388 | rdev->mc.vram_is_ddr = true; | |
389 | rdev->mc.vram_width = 128; | |
390 | } | |
391 | ||
c93bb85b JG |
392 | void rs600_bandwidth_update(struct radeon_device *rdev) |
393 | { | |
394 | /* FIXME: implement, should this be like rs690 ? */ | |
395 | } | |
396 | ||
771fe6b9 JG |
397 | |
398 | /* | |
399 | * Indirect registers accessor | |
400 | */ | |
401 | uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg) | |
402 | { | |
403 | uint32_t r; | |
404 | ||
405 | WREG32(RS600_MC_INDEX, | |
406 | ((reg & RS600_MC_ADDR_MASK) | RS600_MC_IND_CITF_ARB0)); | |
407 | r = RREG32(RS600_MC_DATA); | |
408 | return r; | |
409 | } | |
410 | ||
411 | void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | |
412 | { | |
413 | WREG32(RS600_MC_INDEX, | |
414 | RS600_MC_IND_WR_EN | RS600_MC_IND_CITF_ARB0 | | |
415 | ((reg) & RS600_MC_ADDR_MASK)); | |
416 | WREG32(RS600_MC_DATA, v); | |
417 | } | |
3f7dc91a | 418 | |
3bc68535 | 419 | void rs600_set_safe_registers(struct radeon_device *rdev) |
3f7dc91a DA |
420 | { |
421 | rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm; | |
422 | rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm); | |
3bc68535 JG |
423 | } |
424 | ||
425 | int rs600_init(struct radeon_device *rdev) | |
426 | { | |
427 | rs600_set_safe_registers(rdev); | |
3f7dc91a DA |
428 | return 0; |
429 | } |