]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/powerpc/kvm/emulate.c
KVM: powerpc: Move vector to irqprio resolving to separate function
[net-next-2.6.git] / arch / powerpc / kvm / emulate.c
CommitLineData
bbf45ba5
HB
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#include <linux/jiffies.h>
544c6761 21#include <linux/hrtimer.h>
bbf45ba5
HB
22#include <linux/types.h>
23#include <linux/string.h>
24#include <linux/kvm_host.h>
25
75f74f0d 26#include <asm/reg.h>
bbf45ba5
HB
27#include <asm/time.h>
28#include <asm/byteorder.h>
29#include <asm/kvm_ppc.h>
c381a043 30#include <asm/disassemble.h>
73e75b41 31#include "timing.h"
46f43c6e 32#include "trace.h"
bbf45ba5 33
cea5d8c9 34#define OP_TRAP 3
513579e3 35#define OP_TRAP_64 2
cea5d8c9
HB
36
37#define OP_31_XOP_LWZX 23
38#define OP_31_XOP_LBZX 87
39#define OP_31_XOP_STWX 151
40#define OP_31_XOP_STBX 215
41#define OP_31_XOP_STBUX 247
42#define OP_31_XOP_LHZX 279
43#define OP_31_XOP_LHZUX 311
44#define OP_31_XOP_MFSPR 339
45#define OP_31_XOP_STHX 407
46#define OP_31_XOP_STHUX 439
47#define OP_31_XOP_MTSPR 467
48#define OP_31_XOP_DCBI 470
49#define OP_31_XOP_LWBRX 534
50#define OP_31_XOP_TLBSYNC 566
51#define OP_31_XOP_STWBRX 662
52#define OP_31_XOP_LHBRX 790
53#define OP_31_XOP_STHBRX 918
54
55#define OP_LWZ 32
56#define OP_LWZU 33
57#define OP_LBZ 34
58#define OP_LBZU 35
59#define OP_STW 36
60#define OP_STWU 37
61#define OP_STB 38
62#define OP_STBU 39
63#define OP_LHZ 40
64#define OP_LHZU 41
65#define OP_STH 44
66#define OP_STHU 45
67
513579e3
AG
68#ifdef CONFIG_PPC64
69static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu)
70{
71 return 1;
72}
73#else
74static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu)
75{
76 return vcpu->arch.tcr & TCR_DIE;
77}
78#endif
79
75f74f0d 80void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
bbf45ba5 81{
544c6761 82 unsigned long dec_nsec;
9a7a9b09 83
544c6761 84 pr_debug("mtDEC: %x\n", vcpu->arch.dec);
513579e3
AG
85#ifdef CONFIG_PPC64
86 /* POWER4+ triggers a dec interrupt if the value is < 0 */
87 if (vcpu->arch.dec & 0x80000000) {
544c6761 88 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
513579e3
AG
89 kvmppc_core_queue_dec(vcpu);
90 return;
91 }
92#endif
93 if (kvmppc_dec_enabled(vcpu)) {
bbf45ba5
HB
94 /* The decrementer ticks at the same rate as the timebase, so
95 * that's how we convert the guest DEC value to the number of
96 * host ticks. */
bbf45ba5 97
544c6761
AG
98 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
99 dec_nsec = vcpu->arch.dec;
100 dec_nsec *= 1000;
101 dec_nsec /= tb_ticks_per_usec;
102 hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec),
103 HRTIMER_MODE_REL);
513579e3 104 vcpu->arch.dec_jiffies = get_tb();
bbf45ba5 105 } else {
544c6761 106 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
bbf45ba5
HB
107 }
108}
109
bbf45ba5
HB
110/* XXX to do:
111 * lhax
112 * lhaux
113 * lswx
114 * lswi
115 * stswx
116 * stswi
117 * lha
118 * lhau
119 * lmw
120 * stmw
121 *
122 * XXX is_bigendian should depend on MMU mapping or MSR[LE]
123 */
75f74f0d
HB
124/* XXX Should probably auto-generate instruction decoding for a particular core
125 * from opcode tables in the future. */
bbf45ba5
HB
126int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
127{
128 u32 inst = vcpu->arch.last_inst;
129 u32 ea;
130 int ra;
131 int rb;
bbf45ba5
HB
132 int rs;
133 int rt;
134 int sprn;
bbf45ba5
HB
135 enum emulation_result emulated = EMULATE_DONE;
136 int advance = 1;
137
73e75b41
HB
138 /* this default type might be overwritten by subcategories */
139 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
140
513579e3
AG
141 pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
142
bbf45ba5 143 switch (get_op(inst)) {
cea5d8c9 144 case OP_TRAP:
513579e3
AG
145#ifdef CONFIG_PPC64
146 case OP_TRAP_64:
147#else
fcfdbd26 148 vcpu->arch.esr |= ESR_PTR;
513579e3 149#endif
9dd921cf 150 kvmppc_core_queue_program(vcpu);
bbf45ba5
HB
151 advance = 0;
152 break;
153
bbf45ba5
HB
154 case 31:
155 switch (get_xop(inst)) {
156
cea5d8c9 157 case OP_31_XOP_LWZX:
ac3cd34e
HB
158 rt = get_rt(inst);
159 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
160 break;
161
cea5d8c9 162 case OP_31_XOP_LBZX:
bbf45ba5
HB
163 rt = get_rt(inst);
164 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
165 break;
166
cea5d8c9 167 case OP_31_XOP_STWX:
ac3cd34e
HB
168 rs = get_rs(inst);
169 emulated = kvmppc_handle_store(run, vcpu,
170 vcpu->arch.gpr[rs],
171 4, 1);
172 break;
173
cea5d8c9 174 case OP_31_XOP_STBX:
bbf45ba5
HB
175 rs = get_rs(inst);
176 emulated = kvmppc_handle_store(run, vcpu,
177 vcpu->arch.gpr[rs],
178 1, 1);
179 break;
180
cea5d8c9 181 case OP_31_XOP_STBUX:
bbf45ba5
HB
182 rs = get_rs(inst);
183 ra = get_ra(inst);
184 rb = get_rb(inst);
185
186 ea = vcpu->arch.gpr[rb];
187 if (ra)
188 ea += vcpu->arch.gpr[ra];
189
190 emulated = kvmppc_handle_store(run, vcpu,
191 vcpu->arch.gpr[rs],
192 1, 1);
193 vcpu->arch.gpr[rs] = ea;
194 break;
195
cea5d8c9 196 case OP_31_XOP_LHZX:
bbf45ba5
HB
197 rt = get_rt(inst);
198 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
199 break;
200
cea5d8c9 201 case OP_31_XOP_LHZUX:
bbf45ba5
HB
202 rt = get_rt(inst);
203 ra = get_ra(inst);
204 rb = get_rb(inst);
205
206 ea = vcpu->arch.gpr[rb];
207 if (ra)
208 ea += vcpu->arch.gpr[ra];
209
210 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
211 vcpu->arch.gpr[ra] = ea;
212 break;
213
cea5d8c9 214 case OP_31_XOP_MFSPR:
bbf45ba5
HB
215 sprn = get_sprn(inst);
216 rt = get_rt(inst);
217
218 switch (sprn) {
219 case SPRN_SRR0:
220 vcpu->arch.gpr[rt] = vcpu->arch.srr0; break;
221 case SPRN_SRR1:
222 vcpu->arch.gpr[rt] = vcpu->arch.srr1; break;
bbf45ba5 223 case SPRN_PVR:
513579e3 224 vcpu->arch.gpr[rt] = vcpu->arch.pvr; break;
06579dd9 225 case SPRN_PIR:
513579e3
AG
226 vcpu->arch.gpr[rt] = vcpu->vcpu_id; break;
227 case SPRN_MSSSR0:
228 vcpu->arch.gpr[rt] = 0; break;
bbf45ba5
HB
229
230 /* Note: mftb and TBRL/TBWL are user-accessible, so
231 * the guest can always access the real TB anyways.
232 * In fact, we probably will never see these traps. */
233 case SPRN_TBWL:
513579e3 234 vcpu->arch.gpr[rt] = get_tb() >> 32; break;
bbf45ba5 235 case SPRN_TBWU:
513579e3 236 vcpu->arch.gpr[rt] = get_tb(); break;
bbf45ba5
HB
237
238 case SPRN_SPRG0:
239 vcpu->arch.gpr[rt] = vcpu->arch.sprg0; break;
240 case SPRN_SPRG1:
241 vcpu->arch.gpr[rt] = vcpu->arch.sprg1; break;
242 case SPRN_SPRG2:
243 vcpu->arch.gpr[rt] = vcpu->arch.sprg2; break;
244 case SPRN_SPRG3:
245 vcpu->arch.gpr[rt] = vcpu->arch.sprg3; break;
246 /* Note: SPRG4-7 are user-readable, so we don't get
247 * a trap. */
248
9a7a9b09
AG
249 case SPRN_DEC:
250 {
513579e3 251 u64 jd = get_tb() - vcpu->arch.dec_jiffies;
9a7a9b09 252 vcpu->arch.gpr[rt] = vcpu->arch.dec - jd;
513579e3 253 pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n", vcpu->arch.dec, jd, vcpu->arch.gpr[rt]);
9a7a9b09
AG
254 break;
255 }
bbf45ba5 256 default:
75f74f0d
HB
257 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt);
258 if (emulated == EMULATE_FAIL) {
259 printk("mfspr: unknown spr %x\n", sprn);
260 vcpu->arch.gpr[rt] = 0;
261 }
bbf45ba5
HB
262 break;
263 }
264 break;
265
cea5d8c9 266 case OP_31_XOP_STHX:
bbf45ba5
HB
267 rs = get_rs(inst);
268 ra = get_ra(inst);
269 rb = get_rb(inst);
270
271 emulated = kvmppc_handle_store(run, vcpu,
272 vcpu->arch.gpr[rs],
273 2, 1);
274 break;
275
cea5d8c9 276 case OP_31_XOP_STHUX:
bbf45ba5
HB
277 rs = get_rs(inst);
278 ra = get_ra(inst);
279 rb = get_rb(inst);
280
281 ea = vcpu->arch.gpr[rb];
282 if (ra)
283 ea += vcpu->arch.gpr[ra];
284
285 emulated = kvmppc_handle_store(run, vcpu,
286 vcpu->arch.gpr[rs],
287 2, 1);
288 vcpu->arch.gpr[ra] = ea;
289 break;
290
cea5d8c9 291 case OP_31_XOP_MTSPR:
bbf45ba5
HB
292 sprn = get_sprn(inst);
293 rs = get_rs(inst);
294 switch (sprn) {
295 case SPRN_SRR0:
296 vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break;
297 case SPRN_SRR1:
298 vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break;
bbf45ba5
HB
299
300 /* XXX We need to context-switch the timebase for
301 * watchdog and FIT. */
302 case SPRN_TBWL: break;
303 case SPRN_TBWU: break;
304
513579e3
AG
305 case SPRN_MSSSR0: break;
306
bbf45ba5
HB
307 case SPRN_DEC:
308 vcpu->arch.dec = vcpu->arch.gpr[rs];
309 kvmppc_emulate_dec(vcpu);
310 break;
311
bbf45ba5
HB
312 case SPRN_SPRG0:
313 vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break;
314 case SPRN_SPRG1:
315 vcpu->arch.sprg1 = vcpu->arch.gpr[rs]; break;
316 case SPRN_SPRG2:
317 vcpu->arch.sprg2 = vcpu->arch.gpr[rs]; break;
318 case SPRN_SPRG3:
319 vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break;
320
bbf45ba5 321 default:
75f74f0d
HB
322 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs);
323 if (emulated == EMULATE_FAIL)
324 printk("mtspr: unknown spr %x\n", sprn);
bbf45ba5
HB
325 break;
326 }
327 break;
328
cea5d8c9 329 case OP_31_XOP_DCBI:
bbf45ba5
HB
330 /* Do nothing. The guest is performing dcbi because
331 * hardware DMA is not snooped by the dcache, but
332 * emulated DMA either goes through the dcache as
333 * normal writes, or the host kernel has handled dcache
334 * coherence. */
335 break;
336
cea5d8c9 337 case OP_31_XOP_LWBRX:
bbf45ba5
HB
338 rt = get_rt(inst);
339 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
340 break;
341
cea5d8c9 342 case OP_31_XOP_TLBSYNC:
bbf45ba5
HB
343 break;
344
cea5d8c9 345 case OP_31_XOP_STWBRX:
bbf45ba5
HB
346 rs = get_rs(inst);
347 ra = get_ra(inst);
348 rb = get_rb(inst);
349
350 emulated = kvmppc_handle_store(run, vcpu,
351 vcpu->arch.gpr[rs],
352 4, 0);
353 break;
354
cea5d8c9 355 case OP_31_XOP_LHBRX:
bbf45ba5
HB
356 rt = get_rt(inst);
357 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
358 break;
359
cea5d8c9 360 case OP_31_XOP_STHBRX:
bbf45ba5
HB
361 rs = get_rs(inst);
362 ra = get_ra(inst);
363 rb = get_rb(inst);
364
365 emulated = kvmppc_handle_store(run, vcpu,
366 vcpu->arch.gpr[rs],
367 2, 0);
368 break;
369
bbf45ba5 370 default:
75f74f0d 371 /* Attempt core-specific emulation below. */
bbf45ba5 372 emulated = EMULATE_FAIL;
bbf45ba5
HB
373 }
374 break;
375
cea5d8c9 376 case OP_LWZ:
bbf45ba5
HB
377 rt = get_rt(inst);
378 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
379 break;
380
cea5d8c9 381 case OP_LWZU:
bbf45ba5
HB
382 ra = get_ra(inst);
383 rt = get_rt(inst);
384 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
385 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
386 break;
387
cea5d8c9 388 case OP_LBZ:
bbf45ba5
HB
389 rt = get_rt(inst);
390 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
391 break;
392
cea5d8c9 393 case OP_LBZU:
bbf45ba5
HB
394 ra = get_ra(inst);
395 rt = get_rt(inst);
396 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
397 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
398 break;
399
cea5d8c9 400 case OP_STW:
bbf45ba5
HB
401 rs = get_rs(inst);
402 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
403 4, 1);
404 break;
405
cea5d8c9 406 case OP_STWU:
bbf45ba5
HB
407 ra = get_ra(inst);
408 rs = get_rs(inst);
409 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
410 4, 1);
411 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
412 break;
413
cea5d8c9 414 case OP_STB:
bbf45ba5
HB
415 rs = get_rs(inst);
416 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
417 1, 1);
418 break;
419
cea5d8c9 420 case OP_STBU:
bbf45ba5
HB
421 ra = get_ra(inst);
422 rs = get_rs(inst);
423 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
424 1, 1);
425 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
426 break;
427
cea5d8c9 428 case OP_LHZ:
bbf45ba5
HB
429 rt = get_rt(inst);
430 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
431 break;
432
cea5d8c9 433 case OP_LHZU:
bbf45ba5
HB
434 ra = get_ra(inst);
435 rt = get_rt(inst);
436 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
437 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
438 break;
439
cea5d8c9 440 case OP_STH:
bbf45ba5
HB
441 rs = get_rs(inst);
442 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
443 2, 1);
444 break;
445
cea5d8c9 446 case OP_STHU:
bbf45ba5
HB
447 ra = get_ra(inst);
448 rs = get_rs(inst);
449 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
450 2, 1);
451 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
452 break;
453
454 default:
bbf45ba5 455 emulated = EMULATE_FAIL;
75f74f0d
HB
456 }
457
458 if (emulated == EMULATE_FAIL) {
459 emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance);
460 if (emulated == EMULATE_FAIL) {
461 advance = 0;
462 printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
463 "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
464 }
bbf45ba5
HB
465 }
466
46f43c6e 467 trace_kvm_ppc_instr(inst, vcpu->arch.pc, emulated);
3b4bd796 468
bbf45ba5
HB
469 if (advance)
470 vcpu->arch.pc += 4; /* Advance past emulated instruction. */
471
472 return emulated;
473}