]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/powerpc/kvm/booke_interrupts.S
Merge branches 'sh/pio-death', 'sh/nommu', 'sh/clkfwk', 'sh/core' and 'sh/intc-extens...
[net-next-2.6.git] / arch / powerpc / kvm / booke_interrupts.S
CommitLineData
bbf45ba5
HB
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#include <asm/ppc_asm.h>
21#include <asm/kvm_asm.h>
22#include <asm/reg.h>
23#include <asm/mmu-44x.h>
24#include <asm/page.h>
25#include <asm/asm-offsets.h>
26
27#define KVMPPC_MSR_MASK (MSR_CE|MSR_EE|MSR_PR|MSR_DE|MSR_ME|MSR_IS|MSR_DS)
28
29#define VCPU_GPR(n) (VCPU_GPRS + (n * 4))
30
31/* The host stack layout: */
32#define HOST_R1 0 /* Implied by stwu. */
33#define HOST_CALLEE_LR 4
34#define HOST_RUN 8
35/* r2 is special: it holds 'current', and it made nonvolatile in the
36 * kernel with the -ffixed-r2 gcc option. */
37#define HOST_R2 12
38#define HOST_NV_GPRS 16
39#define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4))
40#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + 4)
41#define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */
42#define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */
43
44#define NEED_INST_MASK ((1<<BOOKE_INTERRUPT_PROGRAM) | \
6a0ab738
HB
45 (1<<BOOKE_INTERRUPT_DTLB_MISS) | \
46 (1<<BOOKE_INTERRUPT_DEBUG))
bbf45ba5
HB
47
48#define NEED_DEAR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \
49 (1<<BOOKE_INTERRUPT_DTLB_MISS))
50
51#define NEED_ESR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \
52 (1<<BOOKE_INTERRUPT_INST_STORAGE) | \
53 (1<<BOOKE_INTERRUPT_PROGRAM) | \
54 (1<<BOOKE_INTERRUPT_DTLB_MISS))
55
56.macro KVM_HANDLER ivor_nr
57_GLOBAL(kvmppc_handler_\ivor_nr)
58 /* Get pointer to vcpu and record exit number. */
ee43eb78
BH
59 mtspr SPRN_SPRG_WSCRATCH0, r4
60 mfspr r4, SPRN_SPRG_RVCPU
bbf45ba5
HB
61 stw r5, VCPU_GPR(r5)(r4)
62 stw r6, VCPU_GPR(r6)(r4)
63 mfctr r5
64 lis r6, kvmppc_resume_host@h
65 stw r5, VCPU_CTR(r4)
66 li r5, \ivor_nr
67 ori r6, r6, kvmppc_resume_host@l
68 mtctr r6
69 bctr
70.endm
71
72_GLOBAL(kvmppc_handlers_start)
73KVM_HANDLER BOOKE_INTERRUPT_CRITICAL
74KVM_HANDLER BOOKE_INTERRUPT_MACHINE_CHECK
75KVM_HANDLER BOOKE_INTERRUPT_DATA_STORAGE
76KVM_HANDLER BOOKE_INTERRUPT_INST_STORAGE
77KVM_HANDLER BOOKE_INTERRUPT_EXTERNAL
78KVM_HANDLER BOOKE_INTERRUPT_ALIGNMENT
79KVM_HANDLER BOOKE_INTERRUPT_PROGRAM
80KVM_HANDLER BOOKE_INTERRUPT_FP_UNAVAIL
81KVM_HANDLER BOOKE_INTERRUPT_SYSCALL
82KVM_HANDLER BOOKE_INTERRUPT_AP_UNAVAIL
83KVM_HANDLER BOOKE_INTERRUPT_DECREMENTER
84KVM_HANDLER BOOKE_INTERRUPT_FIT
85KVM_HANDLER BOOKE_INTERRUPT_WATCHDOG
86KVM_HANDLER BOOKE_INTERRUPT_DTLB_MISS
87KVM_HANDLER BOOKE_INTERRUPT_ITLB_MISS
88KVM_HANDLER BOOKE_INTERRUPT_DEBUG
bb3a8a17
HB
89KVM_HANDLER BOOKE_INTERRUPT_SPE_UNAVAIL
90KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_DATA
91KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_ROUND
bbf45ba5
HB
92
93_GLOBAL(kvmppc_handler_len)
94 .long kvmppc_handler_1 - kvmppc_handler_0
95
96
97/* Registers:
ee43eb78 98 * SPRG_SCRATCH0: guest r4
bbf45ba5
HB
99 * r4: vcpu pointer
100 * r5: KVM exit number
101 */
102_GLOBAL(kvmppc_resume_host)
103 stw r3, VCPU_GPR(r3)(r4)
104 mfcr r3
105 stw r3, VCPU_CR(r4)
106 stw r7, VCPU_GPR(r7)(r4)
107 stw r8, VCPU_GPR(r8)(r4)
108 stw r9, VCPU_GPR(r9)(r4)
109
110 li r6, 1
111 slw r6, r6, r5
112
73e75b41
HB
113#ifdef CONFIG_KVM_EXIT_TIMING
114 /* save exit time */
1151:
116 mfspr r7, SPRN_TBRU
117 mfspr r8, SPRN_TBRL
118 mfspr r9, SPRN_TBRU
119 cmpw r9, r7
120 bne 1b
121 stw r8, VCPU_TIMING_EXIT_TBL(r4)
122 stw r9, VCPU_TIMING_EXIT_TBU(r4)
123#endif
124
bbf45ba5
HB
125 /* Save the faulting instruction and all GPRs for emulation. */
126 andi. r7, r6, NEED_INST_MASK
127 beq ..skip_inst_copy
128 mfspr r9, SPRN_SRR0
129 mfmsr r8
130 ori r7, r8, MSR_DS
131 mtmsr r7
132 isync
133 lwz r9, 0(r9)
134 mtmsr r8
135 isync
136 stw r9, VCPU_LAST_INST(r4)
137
138 stw r15, VCPU_GPR(r15)(r4)
139 stw r16, VCPU_GPR(r16)(r4)
140 stw r17, VCPU_GPR(r17)(r4)
141 stw r18, VCPU_GPR(r18)(r4)
142 stw r19, VCPU_GPR(r19)(r4)
143 stw r20, VCPU_GPR(r20)(r4)
144 stw r21, VCPU_GPR(r21)(r4)
145 stw r22, VCPU_GPR(r22)(r4)
146 stw r23, VCPU_GPR(r23)(r4)
147 stw r24, VCPU_GPR(r24)(r4)
148 stw r25, VCPU_GPR(r25)(r4)
149 stw r26, VCPU_GPR(r26)(r4)
150 stw r27, VCPU_GPR(r27)(r4)
151 stw r28, VCPU_GPR(r28)(r4)
152 stw r29, VCPU_GPR(r29)(r4)
153 stw r30, VCPU_GPR(r30)(r4)
154 stw r31, VCPU_GPR(r31)(r4)
155..skip_inst_copy:
156
157 /* Also grab DEAR and ESR before the host can clobber them. */
158
159 andi. r7, r6, NEED_DEAR_MASK
160 beq ..skip_dear
161 mfspr r9, SPRN_DEAR
162 stw r9, VCPU_FAULT_DEAR(r4)
163..skip_dear:
164
165 andi. r7, r6, NEED_ESR_MASK
166 beq ..skip_esr
167 mfspr r9, SPRN_ESR
168 stw r9, VCPU_FAULT_ESR(r4)
169..skip_esr:
170
171 /* Save remaining volatile guest register state to vcpu. */
172 stw r0, VCPU_GPR(r0)(r4)
173 stw r1, VCPU_GPR(r1)(r4)
174 stw r2, VCPU_GPR(r2)(r4)
175 stw r10, VCPU_GPR(r10)(r4)
176 stw r11, VCPU_GPR(r11)(r4)
177 stw r12, VCPU_GPR(r12)(r4)
178 stw r13, VCPU_GPR(r13)(r4)
179 stw r14, VCPU_GPR(r14)(r4) /* We need a NV GPR below. */
180 mflr r3
181 stw r3, VCPU_LR(r4)
182 mfxer r3
183 stw r3, VCPU_XER(r4)
ee43eb78 184 mfspr r3, SPRN_SPRG_RSCRATCH0
bbf45ba5
HB
185 stw r3, VCPU_GPR(r4)(r4)
186 mfspr r3, SPRN_SRR0
187 stw r3, VCPU_PC(r4)
188
189 /* Restore host stack pointer and PID before IVPR, since the host
190 * exception handlers use them. */
191 lwz r1, VCPU_HOST_STACK(r4)
192 lwz r3, VCPU_HOST_PID(r4)
193 mtspr SPRN_PID, r3
194
195 /* Restore host IVPR before re-enabling interrupts. We cheat and know
196 * that Linux IVPR is always 0xc0000000. */
197 lis r3, 0xc000
198 mtspr SPRN_IVPR, r3
199
200 /* Switch to kernel stack and jump to handler. */
201 LOAD_REG_ADDR(r3, kvmppc_handle_exit)
202 mtctr r3
203 lwz r3, HOST_RUN(r1)
204 lwz r2, HOST_R2(r1)
205 mr r14, r4 /* Save vcpu pointer. */
206
207 bctrl /* kvmppc_handle_exit() */
208
209 /* Restore vcpu pointer and the nonvolatiles we used. */
210 mr r4, r14
211 lwz r14, VCPU_GPR(r14)(r4)
212
213 /* Sometimes instruction emulation must restore complete GPR state. */
214 andi. r5, r3, RESUME_FLAG_NV
215 beq ..skip_nv_load
216 lwz r15, VCPU_GPR(r15)(r4)
217 lwz r16, VCPU_GPR(r16)(r4)
218 lwz r17, VCPU_GPR(r17)(r4)
219 lwz r18, VCPU_GPR(r18)(r4)
220 lwz r19, VCPU_GPR(r19)(r4)
221 lwz r20, VCPU_GPR(r20)(r4)
222 lwz r21, VCPU_GPR(r21)(r4)
223 lwz r22, VCPU_GPR(r22)(r4)
224 lwz r23, VCPU_GPR(r23)(r4)
225 lwz r24, VCPU_GPR(r24)(r4)
226 lwz r25, VCPU_GPR(r25)(r4)
227 lwz r26, VCPU_GPR(r26)(r4)
228 lwz r27, VCPU_GPR(r27)(r4)
229 lwz r28, VCPU_GPR(r28)(r4)
230 lwz r29, VCPU_GPR(r29)(r4)
231 lwz r30, VCPU_GPR(r30)(r4)
232 lwz r31, VCPU_GPR(r31)(r4)
233..skip_nv_load:
234
235 /* Should we return to the guest? */
236 andi. r5, r3, RESUME_FLAG_HOST
237 beq lightweight_exit
238
239 srawi r3, r3, 2 /* Shift -ERR back down. */
240
241heavyweight_exit:
242 /* Not returning to guest. */
243
244 /* We already saved guest volatile register state; now save the
245 * non-volatiles. */
246 stw r15, VCPU_GPR(r15)(r4)
247 stw r16, VCPU_GPR(r16)(r4)
248 stw r17, VCPU_GPR(r17)(r4)
249 stw r18, VCPU_GPR(r18)(r4)
250 stw r19, VCPU_GPR(r19)(r4)
251 stw r20, VCPU_GPR(r20)(r4)
252 stw r21, VCPU_GPR(r21)(r4)
253 stw r22, VCPU_GPR(r22)(r4)
254 stw r23, VCPU_GPR(r23)(r4)
255 stw r24, VCPU_GPR(r24)(r4)
256 stw r25, VCPU_GPR(r25)(r4)
257 stw r26, VCPU_GPR(r26)(r4)
258 stw r27, VCPU_GPR(r27)(r4)
259 stw r28, VCPU_GPR(r28)(r4)
260 stw r29, VCPU_GPR(r29)(r4)
261 stw r30, VCPU_GPR(r30)(r4)
262 stw r31, VCPU_GPR(r31)(r4)
263
264 /* Load host non-volatile register state from host stack. */
265 lwz r14, HOST_NV_GPR(r14)(r1)
266 lwz r15, HOST_NV_GPR(r15)(r1)
267 lwz r16, HOST_NV_GPR(r16)(r1)
268 lwz r17, HOST_NV_GPR(r17)(r1)
269 lwz r18, HOST_NV_GPR(r18)(r1)
270 lwz r19, HOST_NV_GPR(r19)(r1)
271 lwz r20, HOST_NV_GPR(r20)(r1)
272 lwz r21, HOST_NV_GPR(r21)(r1)
273 lwz r22, HOST_NV_GPR(r22)(r1)
274 lwz r23, HOST_NV_GPR(r23)(r1)
275 lwz r24, HOST_NV_GPR(r24)(r1)
276 lwz r25, HOST_NV_GPR(r25)(r1)
277 lwz r26, HOST_NV_GPR(r26)(r1)
278 lwz r27, HOST_NV_GPR(r27)(r1)
279 lwz r28, HOST_NV_GPR(r28)(r1)
280 lwz r29, HOST_NV_GPR(r29)(r1)
281 lwz r30, HOST_NV_GPR(r30)(r1)
282 lwz r31, HOST_NV_GPR(r31)(r1)
283
284 /* Return to kvm_vcpu_run(). */
285 lwz r4, HOST_STACK_LR(r1)
286 addi r1, r1, HOST_STACK_SIZE
287 mtlr r4
288 /* r3 still contains the return code from kvmppc_handle_exit(). */
289 blr
290
291
292/* Registers:
293 * r3: kvm_run pointer
294 * r4: vcpu pointer
295 */
296_GLOBAL(__kvmppc_vcpu_run)
297 stwu r1, -HOST_STACK_SIZE(r1)
298 stw r1, VCPU_HOST_STACK(r4) /* Save stack pointer to vcpu. */
299
300 /* Save host state to stack. */
301 stw r3, HOST_RUN(r1)
302 mflr r3
303 stw r3, HOST_STACK_LR(r1)
304
305 /* Save host non-volatile register state to stack. */
306 stw r14, HOST_NV_GPR(r14)(r1)
307 stw r15, HOST_NV_GPR(r15)(r1)
308 stw r16, HOST_NV_GPR(r16)(r1)
309 stw r17, HOST_NV_GPR(r17)(r1)
310 stw r18, HOST_NV_GPR(r18)(r1)
311 stw r19, HOST_NV_GPR(r19)(r1)
312 stw r20, HOST_NV_GPR(r20)(r1)
313 stw r21, HOST_NV_GPR(r21)(r1)
314 stw r22, HOST_NV_GPR(r22)(r1)
315 stw r23, HOST_NV_GPR(r23)(r1)
316 stw r24, HOST_NV_GPR(r24)(r1)
317 stw r25, HOST_NV_GPR(r25)(r1)
318 stw r26, HOST_NV_GPR(r26)(r1)
319 stw r27, HOST_NV_GPR(r27)(r1)
320 stw r28, HOST_NV_GPR(r28)(r1)
321 stw r29, HOST_NV_GPR(r29)(r1)
322 stw r30, HOST_NV_GPR(r30)(r1)
323 stw r31, HOST_NV_GPR(r31)(r1)
324
325 /* Load guest non-volatiles. */
326 lwz r14, VCPU_GPR(r14)(r4)
327 lwz r15, VCPU_GPR(r15)(r4)
328 lwz r16, VCPU_GPR(r16)(r4)
329 lwz r17, VCPU_GPR(r17)(r4)
330 lwz r18, VCPU_GPR(r18)(r4)
331 lwz r19, VCPU_GPR(r19)(r4)
332 lwz r20, VCPU_GPR(r20)(r4)
333 lwz r21, VCPU_GPR(r21)(r4)
334 lwz r22, VCPU_GPR(r22)(r4)
335 lwz r23, VCPU_GPR(r23)(r4)
336 lwz r24, VCPU_GPR(r24)(r4)
337 lwz r25, VCPU_GPR(r25)(r4)
338 lwz r26, VCPU_GPR(r26)(r4)
339 lwz r27, VCPU_GPR(r27)(r4)
340 lwz r28, VCPU_GPR(r28)(r4)
341 lwz r29, VCPU_GPR(r29)(r4)
342 lwz r30, VCPU_GPR(r30)(r4)
343 lwz r31, VCPU_GPR(r31)(r4)
344
345lightweight_exit:
346 stw r2, HOST_R2(r1)
347
348 mfspr r3, SPRN_PID
349 stw r3, VCPU_HOST_PID(r4)
49dd2c49 350 lwz r3, VCPU_SHADOW_PID(r4)
bbf45ba5
HB
351 mtspr SPRN_PID, r3
352
17c885eb 353#ifdef CONFIG_44x
bbf45ba5 354 iccci 0, 0 /* XXX hack */
17c885eb 355#endif
bbf45ba5
HB
356
357 /* Load some guest volatiles. */
358 lwz r0, VCPU_GPR(r0)(r4)
359 lwz r2, VCPU_GPR(r2)(r4)
360 lwz r9, VCPU_GPR(r9)(r4)
361 lwz r10, VCPU_GPR(r10)(r4)
362 lwz r11, VCPU_GPR(r11)(r4)
363 lwz r12, VCPU_GPR(r12)(r4)
364 lwz r13, VCPU_GPR(r13)(r4)
365 lwz r3, VCPU_LR(r4)
366 mtlr r3
367 lwz r3, VCPU_XER(r4)
368 mtxer r3
369
370 /* Switch the IVPR. XXX If we take a TLB miss after this we're screwed,
371 * so how do we make sure vcpu won't fault? */
372 lis r8, kvmppc_booke_handlers@ha
373 lwz r8, kvmppc_booke_handlers@l(r8)
374 mtspr SPRN_IVPR, r8
375
376 /* Save vcpu pointer for the exception handlers. */
ee43eb78 377 mtspr SPRN_SPRG_WVCPU, r4
bbf45ba5
HB
378
379 /* Can't switch the stack pointer until after IVPR is switched,
380 * because host interrupt handlers would get confused. */
381 lwz r1, VCPU_GPR(r1)(r4)
382
383 /* XXX handle USPRG0 */
384 /* Host interrupt handlers may have clobbered these guest-readable
385 * SPRGs, so we need to reload them here with the guest's values. */
386 lwz r3, VCPU_SPRG4(r4)
ee43eb78 387 mtspr SPRN_SPRG4W, r3
bbf45ba5 388 lwz r3, VCPU_SPRG5(r4)
ee43eb78 389 mtspr SPRN_SPRG5W, r3
bbf45ba5 390 lwz r3, VCPU_SPRG6(r4)
ee43eb78 391 mtspr SPRN_SPRG6W, r3
bbf45ba5 392 lwz r3, VCPU_SPRG7(r4)
ee43eb78 393 mtspr SPRN_SPRG7W, r3
bbf45ba5 394
73e75b41
HB
395#ifdef CONFIG_KVM_EXIT_TIMING
396 /* save enter time */
3971:
398 mfspr r6, SPRN_TBRU
399 mfspr r7, SPRN_TBRL
400 mfspr r8, SPRN_TBRU
401 cmpw r8, r6
402 bne 1b
403 stw r7, VCPU_TIMING_LAST_ENTER_TBL(r4)
404 stw r8, VCPU_TIMING_LAST_ENTER_TBU(r4)
405#endif
406
bbf45ba5
HB
407 /* Finish loading guest volatiles and jump to guest. */
408 lwz r3, VCPU_CTR(r4)
409 mtctr r3
410 lwz r3, VCPU_CR(r4)
411 mtcr r3
412 lwz r5, VCPU_GPR(r5)(r4)
413 lwz r6, VCPU_GPR(r6)(r4)
414 lwz r7, VCPU_GPR(r7)(r4)
415 lwz r8, VCPU_GPR(r8)(r4)
416 lwz r3, VCPU_PC(r4)
417 mtsrr0 r3
666e7252 418 lwz r3, VCPU_SHARED(r4)
df8940ea 419 lwz r3, (VCPU_SHARED_MSR + 4)(r3)
bbf45ba5
HB
420 oris r3, r3, KVMPPC_MSR_MASK@h
421 ori r3, r3, KVMPPC_MSR_MASK@l
422 mtsrr1 r3
6a0ab738
HB
423
424 /* Clear any debug events which occurred since we disabled MSR[DE].
425 * XXX This gives us a 3-instruction window in which a breakpoint
426 * intended for guest context could fire in the host instead. */
427 lis r3, 0xffff
428 ori r3, r3, 0xffff
429 mtspr SPRN_DBSR, r3
430
bbf45ba5
HB
431 lwz r3, VCPU_GPR(r3)(r4)
432 lwz r4, VCPU_GPR(r4)(r4)
433 rfi