]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/kvm/kvm.h
KVM: Portability: Move kvm_vcpu definition back to kvm.h
[net-next-2.6.git] / drivers / kvm / kvm.h
CommitLineData
6aa8b732
AK
1#ifndef __KVM_H
2#define __KVM_H
3
4/*
5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
7 */
8
9#include <linux/types.h>
e56a7a28 10#include <linux/hardirq.h>
6aa8b732
AK
11#include <linux/list.h>
12#include <linux/mutex.h>
13#include <linux/spinlock.h>
06ff0d37
MR
14#include <linux/signal.h>
15#include <linux/sched.h>
6aa8b732 16#include <linux/mm.h>
15ad7146 17#include <linux/preempt.h>
e8edc6e0 18#include <asm/signal.h>
6aa8b732 19
6aa8b732 20#include <linux/kvm.h>
102d8325 21#include <linux/kvm_para.h>
6aa8b732 22
d77a39d9
HB
23#include "types.h"
24
d657a98e
ZX
25#include "x86.h"
26
ef9254df 27#define KVM_MAX_VCPUS 4
e8207547 28#define KVM_ALIAS_SLOTS 4
2e2c618d 29#define KVM_MEMORY_SLOTS 8
e0d62c7f
IE
30/* memory slots that does not exposed to userspace */
31#define KVM_PRIVATE_MEM_SLOTS 4
6aa8b732 32
039576c0
AK
33#define KVM_PIO_PAGE_OFFSET 1
34
d9e368d6
AK
35/*
36 * vcpu->requests bit members
37 */
3176bc3e 38#define KVM_REQ_TLB_FLUSH 0
d9e368d6 39
6aa8b732 40
6aa8b732 41struct kvm_vcpu;
c16f862d 42extern struct kmem_cache *kvm_vcpu_cache;
6aa8b732 43
6aa8b732
AK
44struct kvm_guest_debug {
45 int enabled;
46 unsigned long bp[4];
47 int singlestep;
48};
49
ba1389b7 50struct kvm_vcpu_stat {
1165f5fe
AK
51 u32 pf_fixed;
52 u32 pf_guest;
53 u32 tlb_flush;
54 u32 invlpg;
55
56 u32 exits;
57 u32 io_exits;
58 u32 mmio_exits;
59 u32 signal_exits;
60 u32 irq_window_exits;
61 u32 halt_exits;
b6958ce4 62 u32 halt_wakeup;
1165f5fe
AK
63 u32 request_irq_exits;
64 u32 irq_exits;
e1beb1d3 65 u32 host_state_reload;
2cc51560 66 u32 efer_reload;
f096ed85 67 u32 fpu_reload;
f2b5756b
AK
68 u32 insn_emulation;
69 u32 insn_emulation_fail;
1165f5fe
AK
70};
71
2eeb2e94
GH
72/*
73 * It would be nice to use something smarter than a linear search, TBD...
74 * Thankfully we dont expect many devices to register (famous last words :),
75 * so until then it will suffice. At least its abstracted so we can change
76 * in one place.
77 */
78struct kvm_io_bus {
79 int dev_count;
80#define NR_IOBUS_DEVS 6
81 struct kvm_io_device *devs[NR_IOBUS_DEVS];
82};
83
84void kvm_io_bus_init(struct kvm_io_bus *bus);
85void kvm_io_bus_destroy(struct kvm_io_bus *bus);
86struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr);
87void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
88 struct kvm_io_device *dev);
89
34c16eec
ZX
90#ifdef CONFIG_HAS_IOMEM
91#define KVM_VCPU_MMIO \
92 int mmio_needed; \
93 int mmio_read_completed; \
94 int mmio_is_write; \
95 int mmio_size; \
96 unsigned char mmio_data[8]; \
6aa8b732
AK
97 gpa_t mmio_phys_addr;
98
34c16eec
ZX
99#else
100#define KVM_VCPU_MMIO
1961d276 101
34c16eec 102#endif
1165f5fe 103
34c16eec
ZX
104#define KVM_VCPU_COMM \
105 struct kvm *kvm; \
106 struct preempt_notifier preempt_notifier; \
107 int vcpu_id; \
108 struct mutex mutex; \
109 int cpu; \
110 struct kvm_run *run; \
111 int guest_mode; \
112 unsigned long requests; \
113 struct kvm_guest_debug guest_debug; \
114 int fpu_active; \
115 int guest_fpu_loaded; \
116 wait_queue_head_t wq; \
117 int sigset_active; \
118 sigset_t sigset; \
ba1389b7 119 struct kvm_vcpu_stat stat; \
34c16eec 120 KVM_VCPU_MMIO
6aa8b732 121
d657a98e
ZX
122struct kvm_vcpu {
123 KVM_VCPU_COMM;
124
125 struct kvm_vcpu_arch arch;
126};
127
e8207547
AK
128struct kvm_mem_alias {
129 gfn_t base_gfn;
130 unsigned long npages;
131 gfn_t target_gfn;
132};
133
6aa8b732
AK
134struct kvm_memory_slot {
135 gfn_t base_gfn;
136 unsigned long npages;
137 unsigned long flags;
290fc38d 138 unsigned long *rmap;
6aa8b732 139 unsigned long *dirty_bitmap;
8a7ae055 140 unsigned long userspace_addr;
80b14b5b 141 int user_alloc;
6aa8b732
AK
142};
143
ba1389b7 144struct kvm_vm_stat {
4cee5764
AK
145 u32 mmu_shadow_zapped;
146 u32 mmu_pte_write;
147 u32 mmu_pte_updated;
148 u32 mmu_pde_zapped;
149 u32 mmu_flooded;
150 u32 mmu_recycled;
0f74a24c 151 u32 remote_tlb_flush;
ba1389b7
AK
152};
153
6aa8b732 154struct kvm {
11ec2804 155 struct mutex lock; /* protects everything except vcpus */
6d4e4c4f 156 struct mm_struct *mm; /* userspace tied to this vm */
e8207547
AK
157 int naliases;
158 struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS];
6aa8b732 159 int nmemslots;
e0d62c7f
IE
160 struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
161 KVM_PRIVATE_MEM_SLOTS];
cea0f0e7
AK
162 /*
163 * Hash table of struct kvm_mmu_page.
164 */
6aa8b732 165 struct list_head active_mmu_pages;
82ce2c96
IE
166 unsigned int n_free_mmu_pages;
167 unsigned int n_requested_mmu_pages;
168 unsigned int n_alloc_mmu_pages;
cea0f0e7 169 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
fb3f0f51 170 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
133de902 171 struct list_head vm_list;
bccf2150 172 struct file *filp;
2eeb2e94 173 struct kvm_io_bus mmio_bus;
74906345 174 struct kvm_io_bus pio_bus;
85f455f7 175 struct kvm_pic *vpic;
1fd4f2a5 176 struct kvm_ioapic *vioapic;
932f72ad 177 int round_robin_prev_vcpu;
cbc94022 178 unsigned int tss_addr;
f78e0e2e 179 struct page *apic_access_page;
ba1389b7 180 struct kvm_vm_stat stat;
6aa8b732
AK
181};
182
f0242478
RR
183/* The guest did something we don't support. */
184#define pr_unimpl(vcpu, fmt, ...) \
185 do { \
186 if (printk_ratelimit()) \
187 printk(KERN_ERR "kvm: %i: cpu%i " fmt, \
188 current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__); \
d77c26fc 189 } while (0)
f0242478 190
6aa8b732
AK
191#define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
192#define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
193
fb3f0f51
RR
194int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
195void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
196
313a3dc7
CO
197void vcpu_load(struct kvm_vcpu *vcpu);
198void vcpu_put(struct kvm_vcpu *vcpu);
199
e9b11c17
ZX
200void decache_vcpus_on_cpu(int cpu);
201
313a3dc7 202
f8c16bba 203int kvm_init(void *opaque, unsigned int vcpu_size,
c16f862d 204 struct module *module);
cb498ea2 205void kvm_exit(void);
6aa8b732 206
6aa8b732
AK
207#define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
208#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
209static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
039576c0 210struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva);
6aa8b732 211
cea7bb21 212extern struct page *bad_page;
6aa8b732 213
cea7bb21 214int is_error_page(struct page *page);
f9d46eb0 215int kvm_is_error_hva(unsigned long addr);
210c7c4d
IE
216int kvm_set_memory_region(struct kvm *kvm,
217 struct kvm_userspace_memory_region *mem,
218 int user_alloc);
f78e0e2e
SY
219int __kvm_set_memory_region(struct kvm *kvm,
220 struct kvm_userspace_memory_region *mem,
221 int user_alloc);
0de10343
ZX
222int kvm_arch_set_memory_region(struct kvm *kvm,
223 struct kvm_userspace_memory_region *mem,
224 struct kvm_memory_slot old,
225 int user_alloc);
290fc38d 226gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
954bbbc2 227struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
b4231d61
IE
228void kvm_release_page_clean(struct page *page);
229void kvm_release_page_dirty(struct page *page);
195aefde
IE
230int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
231 int len);
232int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
233int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
234 int offset, int len);
235int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
236 unsigned long len);
237int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
238int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
6aa8b732 239struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
e0d62c7f 240int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
6aa8b732
AK
241void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
242
8776e519 243void kvm_vcpu_block(struct kvm_vcpu *vcpu);
6aa8b732 244void kvm_resched(struct kvm_vcpu *vcpu);
7702fd1f
AK
245void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
246void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
d9e368d6 247void kvm_flush_remote_tlbs(struct kvm *kvm);
6aa8b732 248
043405e1
CO
249long kvm_arch_dev_ioctl(struct file *filp,
250 unsigned int ioctl, unsigned long arg);
313a3dc7
CO
251long kvm_arch_vcpu_ioctl(struct file *filp,
252 unsigned int ioctl, unsigned long arg);
253void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
254void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
018d00d2
ZX
255
256int kvm_dev_ioctl_check_extension(long ext);
257
5bb064dc
ZX
258int kvm_get_dirty_log(struct kvm *kvm,
259 struct kvm_dirty_log *log, int *is_dirty);
260int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
261 struct kvm_dirty_log *log);
262
1fe779f8
CO
263int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
264 struct
265 kvm_userspace_memory_region *mem,
266 int user_alloc);
267long kvm_arch_vm_ioctl(struct file *filp,
268 unsigned int ioctl, unsigned long arg);
269void kvm_arch_destroy_vm(struct kvm *kvm);
313a3dc7 270
d0752060
HB
271int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
272int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
273
8b006791
ZX
274int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
275 struct kvm_translation *tr);
276
b6c7a5dc
HB
277int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
278int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
279int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
280 struct kvm_sregs *sregs);
281int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
282 struct kvm_sregs *sregs);
283int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
284 struct kvm_debug_guest *dbg);
285int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
286
f8c16bba
ZX
287int kvm_arch_init(void *opaque);
288void kvm_arch_exit(void);
043405e1 289
e9b11c17
ZX
290int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
291void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
292
293void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
294void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
295void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
296struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
26e5215f 297int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
d40ccc62 298void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
e9b11c17
ZX
299
300int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu);
301void kvm_arch_hardware_enable(void *garbage);
302void kvm_arch_hardware_disable(void *garbage);
303int kvm_arch_hardware_setup(void);
304void kvm_arch_hardware_unsetup(void);
305void kvm_arch_check_processor_compat(void *rtn);
1d737c8a 306int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
e9b11c17 307
d19a9cd2
ZX
308void kvm_free_physmem(struct kvm *kvm);
309
310struct kvm *kvm_arch_create_vm(void);
311void kvm_arch_destroy_vm(struct kvm *kvm);
e9b11c17 312
682c59a3
ZX
313int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
314int kvm_cpu_has_interrupt(struct kvm_vcpu *v);
315
d172fcd3
LV
316static inline void kvm_guest_enter(void)
317{
e56a7a28 318 account_system_vtime(current);
d172fcd3
LV
319 current->flags |= PF_VCPU;
320}
321
322static inline void kvm_guest_exit(void)
323{
e56a7a28 324 account_system_vtime(current);
d172fcd3
LV
325 current->flags &= ~PF_VCPU;
326}
327
6aa8b732
AK
328static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot)
329{
330 return slot - kvm->memslots;
331}
332
1755fbcc
AK
333static inline gpa_t gfn_to_gpa(gfn_t gfn)
334{
335 return (gpa_t)gfn << PAGE_SHIFT;
336}
6aa8b732 337
ba1389b7
AK
338enum kvm_stat_kind {
339 KVM_STAT_VM,
340 KVM_STAT_VCPU,
341};
342
417bc304
HB
343struct kvm_stats_debugfs_item {
344 const char *name;
345 int offset;
ba1389b7 346 enum kvm_stat_kind kind;
417bc304
HB
347 struct dentry *dentry;
348};
349extern struct kvm_stats_debugfs_item debugfs_entries[];
350
6aa8b732 351#endif