]> bbs.cooldavid.org Git - net-next-2.6.git/blame - include/linux/kvm_host.h
KVM: Use pointer to vcpu instead of vcpu_id in timer code.
[net-next-2.6.git] / include / linux / kvm_host.h
CommitLineData
edf88417
AK
1#ifndef __KVM_HOST_H
2#define __KVM_HOST_H
6aa8b732
AK
3
4/*
5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
7 */
8
9#include <linux/types.h>
e56a7a28 10#include <linux/hardirq.h>
6aa8b732
AK
11#include <linux/list.h>
12#include <linux/mutex.h>
13#include <linux/spinlock.h>
06ff0d37
MR
14#include <linux/signal.h>
15#include <linux/sched.h>
6aa8b732 16#include <linux/mm.h>
15ad7146 17#include <linux/preempt.h>
d4c9ff2d 18#include <linux/marker.h>
0937c48d 19#include <linux/msi.h>
e8edc6e0 20#include <asm/signal.h>
6aa8b732 21
6aa8b732 22#include <linux/kvm.h>
102d8325 23#include <linux/kvm_para.h>
6aa8b732 24
edf88417 25#include <linux/kvm_types.h>
d77a39d9 26
edf88417 27#include <asm/kvm_host.h>
d657a98e 28
d9e368d6
AK
29/*
30 * vcpu->requests bit members
31 */
3176bc3e 32#define KVM_REQ_TLB_FLUSH 0
2f52d58c 33#define KVM_REQ_MIGRATE_TIMER 1
b209749f 34#define KVM_REQ_REPORT_TPR_ACCESS 2
2e53d63a 35#define KVM_REQ_MMU_RELOAD 3
71c4dfaf 36#define KVM_REQ_TRIPLE_FAULT 4
06e05645 37#define KVM_REQ_PENDING_TIMER 5
d7690175 38#define KVM_REQ_UNHALT 6
4731d4c7 39#define KVM_REQ_MMU_SYNC 7
c8076604 40#define KVM_REQ_KVMCLOCK_UPDATE 8
32f88400 41#define KVM_REQ_KICK 9
6aa8b732 42
5550af4d
SY
43#define KVM_USERSPACE_IRQ_SOURCE_ID 0
44
6aa8b732 45struct kvm_vcpu;
c16f862d 46extern struct kmem_cache *kvm_vcpu_cache;
6aa8b732 47
2eeb2e94
GH
48/*
49 * It would be nice to use something smarter than a linear search, TBD...
50 * Thankfully we dont expect many devices to register (famous last words :),
51 * so until then it will suffice. At least its abstracted so we can change
52 * in one place.
53 */
54struct kvm_io_bus {
55 int dev_count;
56#define NR_IOBUS_DEVS 6
57 struct kvm_io_device *devs[NR_IOBUS_DEVS];
58};
59
60void kvm_io_bus_init(struct kvm_io_bus *bus);
61void kvm_io_bus_destroy(struct kvm_io_bus *bus);
92760499
LV
62struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus,
63 gpa_t addr, int len, int is_write);
2eeb2e94
GH
64void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
65 struct kvm_io_device *dev);
66
d17fbbf7
ZX
67struct kvm_vcpu {
68 struct kvm *kvm;
31bb117e 69#ifdef CONFIG_PREEMPT_NOTIFIERS
d17fbbf7 70 struct preempt_notifier preempt_notifier;
31bb117e 71#endif
d17fbbf7
ZX
72 int vcpu_id;
73 struct mutex mutex;
74 int cpu;
75 struct kvm_run *run;
d17fbbf7 76 unsigned long requests;
d0bfb940 77 unsigned long guest_debug;
d17fbbf7
ZX
78 int fpu_active;
79 int guest_fpu_loaded;
80 wait_queue_head_t wq;
81 int sigset_active;
82 sigset_t sigset;
83 struct kvm_vcpu_stat stat;
84
34c16eec 85#ifdef CONFIG_HAS_IOMEM
d17fbbf7
ZX
86 int mmio_needed;
87 int mmio_read_completed;
88 int mmio_is_write;
89 int mmio_size;
90 unsigned char mmio_data[8];
6aa8b732 91 gpa_t mmio_phys_addr;
34c16eec 92#endif
1165f5fe 93
d657a98e
ZX
94 struct kvm_vcpu_arch arch;
95};
96
6aa8b732
AK
97struct kvm_memory_slot {
98 gfn_t base_gfn;
99 unsigned long npages;
100 unsigned long flags;
290fc38d 101 unsigned long *rmap;
6aa8b732 102 unsigned long *dirty_bitmap;
05da4558
MT
103 struct {
104 unsigned long rmap_pde;
105 int write_count;
106 } *lpage_info;
8a7ae055 107 unsigned long userspace_addr;
80b14b5b 108 int user_alloc;
6aa8b732
AK
109};
110
399ec807
AK
111struct kvm_kernel_irq_routing_entry {
112 u32 gsi;
5116d8f6 113 u32 type;
4925663a 114 int (*set)(struct kvm_kernel_irq_routing_entry *e,
399ec807
AK
115 struct kvm *kvm, int level);
116 union {
117 struct {
118 unsigned irqchip;
119 unsigned pin;
120 } irqchip;
79950e10 121 struct msi_msg msi;
399ec807
AK
122 };
123 struct list_head link;
124};
125
6aa8b732 126struct kvm {
aaee2c94 127 spinlock_t mmu_lock;
84261923 128 spinlock_t requests_lock;
72dc67a6 129 struct rw_semaphore slots_lock;
6d4e4c4f 130 struct mm_struct *mm; /* userspace tied to this vm */
6aa8b732 131 int nmemslots;
e0d62c7f
IE
132 struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
133 KVM_PRIVATE_MEM_SLOTS];
c5af89b6 134 struct kvm_vcpu *bsp_vcpu;
fb3f0f51 135 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
133de902 136 struct list_head vm_list;
60eead79 137 struct mutex lock;
2eeb2e94 138 struct kvm_io_bus mmio_bus;
74906345 139 struct kvm_io_bus pio_bus;
721eecbf
GH
140#ifdef CONFIG_HAVE_KVM_EVENTFD
141 struct {
142 spinlock_t lock;
143 struct list_head items;
144 } irqfds;
145#endif
ba1389b7 146 struct kvm_vm_stat stat;
d69fb81f 147 struct kvm_arch arch;
d39f13b0 148 atomic_t users_count;
5f94c174
LV
149#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
150 struct kvm_coalesced_mmio_dev *coalesced_mmio_dev;
151 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
152#endif
e930bffe 153
60eead79 154 struct mutex irq_lock;
75858a84 155#ifdef CONFIG_HAVE_KVM_IRQCHIP
399ec807 156 struct list_head irq_routing; /* of kvm_kernel_irq_routing_entry */
75858a84
AK
157 struct hlist_head mask_notifier_list;
158#endif
159
e930bffe
AA
160#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
161 struct mmu_notifier mmu_notifier;
162 unsigned long mmu_notifier_seq;
163 long mmu_notifier_count;
164#endif
6aa8b732
AK
165};
166
f0242478
RR
167/* The guest did something we don't support. */
168#define pr_unimpl(vcpu, fmt, ...) \
169 do { \
170 if (printk_ratelimit()) \
171 printk(KERN_ERR "kvm: %i: cpu%i " fmt, \
172 current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__); \
d77c26fc 173 } while (0)
f0242478 174
6aa8b732
AK
175#define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
176#define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
177
fb3f0f51
RR
178int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
179void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
180
313a3dc7
CO
181void vcpu_load(struct kvm_vcpu *vcpu);
182void vcpu_put(struct kvm_vcpu *vcpu);
183
f8c16bba 184int kvm_init(void *opaque, unsigned int vcpu_size,
c16f862d 185 struct module *module);
cb498ea2 186void kvm_exit(void);
6aa8b732 187
d39f13b0
IE
188void kvm_get_kvm(struct kvm *kvm);
189void kvm_put_kvm(struct kvm *kvm);
190
6aa8b732
AK
191#define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
192#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
193static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
039576c0 194struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva);
6aa8b732 195
cea7bb21 196extern struct page *bad_page;
35149e21 197extern pfn_t bad_pfn;
6aa8b732 198
cea7bb21 199int is_error_page(struct page *page);
35149e21 200int is_error_pfn(pfn_t pfn);
f9d46eb0 201int kvm_is_error_hva(unsigned long addr);
210c7c4d
IE
202int kvm_set_memory_region(struct kvm *kvm,
203 struct kvm_userspace_memory_region *mem,
204 int user_alloc);
f78e0e2e
SY
205int __kvm_set_memory_region(struct kvm *kvm,
206 struct kvm_userspace_memory_region *mem,
207 int user_alloc);
0de10343
ZX
208int kvm_arch_set_memory_region(struct kvm *kvm,
209 struct kvm_userspace_memory_region *mem,
210 struct kvm_memory_slot old,
211 int user_alloc);
34d4cb8f 212void kvm_arch_flush_shadow(struct kvm *kvm);
290fc38d 213gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
954bbbc2 214struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
05da4558 215unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
b4231d61
IE
216void kvm_release_page_clean(struct page *page);
217void kvm_release_page_dirty(struct page *page);
35149e21
AL
218void kvm_set_page_dirty(struct page *page);
219void kvm_set_page_accessed(struct page *page);
220
221pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
222void kvm_release_pfn_dirty(pfn_t);
223void kvm_release_pfn_clean(pfn_t pfn);
224void kvm_set_pfn_dirty(pfn_t pfn);
225void kvm_set_pfn_accessed(pfn_t pfn);
226void kvm_get_pfn(pfn_t pfn);
227
195aefde
IE
228int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
229 int len);
7ec54588
MT
230int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
231 unsigned long len);
195aefde
IE
232int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
233int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
234 int offset, int len);
235int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
236 unsigned long len);
237int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
238int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
6aa8b732 239struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
e0d62c7f 240int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
6aa8b732
AK
241void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
242
8776e519 243void kvm_vcpu_block(struct kvm_vcpu *vcpu);
6aa8b732 244void kvm_resched(struct kvm_vcpu *vcpu);
7702fd1f
AK
245void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
246void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
d9e368d6 247void kvm_flush_remote_tlbs(struct kvm *kvm);
2e53d63a 248void kvm_reload_remote_mmus(struct kvm *kvm);
6aa8b732 249
043405e1
CO
250long kvm_arch_dev_ioctl(struct file *filp,
251 unsigned int ioctl, unsigned long arg);
313a3dc7
CO
252long kvm_arch_vcpu_ioctl(struct file *filp,
253 unsigned int ioctl, unsigned long arg);
018d00d2
ZX
254
255int kvm_dev_ioctl_check_extension(long ext);
256
5bb064dc
ZX
257int kvm_get_dirty_log(struct kvm *kvm,
258 struct kvm_dirty_log *log, int *is_dirty);
259int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
260 struct kvm_dirty_log *log);
261
1fe779f8
CO
262int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
263 struct
264 kvm_userspace_memory_region *mem,
265 int user_alloc);
266long kvm_arch_vm_ioctl(struct file *filp,
267 unsigned int ioctl, unsigned long arg);
313a3dc7 268
d0752060
HB
269int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
270int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
271
8b006791
ZX
272int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
273 struct kvm_translation *tr);
274
b6c7a5dc
HB
275int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
276int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
277int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
278 struct kvm_sregs *sregs);
279int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
280 struct kvm_sregs *sregs);
62d9f0db
MT
281int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
282 struct kvm_mp_state *mp_state);
283int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
284 struct kvm_mp_state *mp_state);
d0bfb940
JK
285int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
286 struct kvm_guest_debug *dbg);
b6c7a5dc
HB
287int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
288
f8c16bba
ZX
289int kvm_arch_init(void *opaque);
290void kvm_arch_exit(void);
043405e1 291
e9b11c17
ZX
292int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
293void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
294
295void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
296void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
297void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
298struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
26e5215f 299int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
d40ccc62 300void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
e9b11c17
ZX
301
302int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu);
303void kvm_arch_hardware_enable(void *garbage);
304void kvm_arch_hardware_disable(void *garbage);
305int kvm_arch_hardware_setup(void);
306void kvm_arch_hardware_unsetup(void);
307void kvm_arch_check_processor_compat(void *rtn);
1d737c8a 308int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
78646121 309int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
e9b11c17 310
d19a9cd2
ZX
311void kvm_free_physmem(struct kvm *kvm);
312
313struct kvm *kvm_arch_create_vm(void);
314void kvm_arch_destroy_vm(struct kvm *kvm);
8a98f664 315void kvm_free_all_assigned_devices(struct kvm *kvm);
ad8ba2cd 316void kvm_arch_sync_events(struct kvm *kvm);
e9b11c17 317
682c59a3
ZX
318int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
319int kvm_cpu_has_interrupt(struct kvm_vcpu *v);
3d80840d 320int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
5736199a 321void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
682c59a3 322
c77fb9dc
XZ
323int kvm_is_mmio_pfn(pfn_t pfn);
324
62c476c7
BAY
325struct kvm_irq_ack_notifier {
326 struct hlist_node link;
327 unsigned gsi;
328 void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
329};
330
2350bd1f 331#define KVM_ASSIGNED_MSIX_PENDING 0x1
c1e01514
SY
332struct kvm_guest_msix_entry {
333 u32 vector;
334 u16 entry;
335 u16 flags;
336};
337
62c476c7
BAY
338struct kvm_assigned_dev_kernel {
339 struct kvm_irq_ack_notifier ack_notifier;
340 struct work_struct interrupt_work;
341 struct list_head list;
342 int assigned_dev_id;
343 int host_busnr;
344 int host_devfn;
c1e01514 345 unsigned int entries_nr;
62c476c7 346 int host_irq;
defaf158 347 bool host_irq_disabled;
c1e01514 348 struct msix_entry *host_msix_entries;
62c476c7 349 int guest_irq;
c1e01514 350 struct kvm_guest_msix_entry *guest_msix_entries;
4f906c19 351 unsigned long irq_requested_type;
5550af4d 352 int irq_source_id;
b653574a 353 int flags;
62c476c7
BAY
354 struct pci_dev *dev;
355 struct kvm *kvm;
547de29e 356 spinlock_t assigned_dev_lock;
62c476c7 357};
75858a84
AK
358
359struct kvm_irq_mask_notifier {
360 void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
361 int irq;
362 struct hlist_node link;
363};
364
365void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
366 struct kvm_irq_mask_notifier *kimn);
367void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
368 struct kvm_irq_mask_notifier *kimn);
369void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask);
370
4925663a 371int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level);
44882eed 372void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
3de42dc0
XZ
373void kvm_register_irq_ack_notifier(struct kvm *kvm,
374 struct kvm_irq_ack_notifier *kian);
fa40a821
MT
375void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
376 struct kvm_irq_ack_notifier *kian);
5550af4d
SY
377int kvm_request_irq_source_id(struct kvm *kvm);
378void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
62c476c7 379
522c68c4
SY
380/* For vcpu->arch.iommu_flags */
381#define KVM_IOMMU_CACHE_COHERENCY 0x1
382
19de40a8 383#ifdef CONFIG_IOMMU_API
62c476c7
BAY
384int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn,
385 unsigned long npages);
260782bc 386int kvm_iommu_map_guest(struct kvm *kvm);
62c476c7 387int kvm_iommu_unmap_guest(struct kvm *kvm);
260782bc
WH
388int kvm_assign_device(struct kvm *kvm,
389 struct kvm_assigned_dev_kernel *assigned_dev);
0a920356
WH
390int kvm_deassign_device(struct kvm *kvm,
391 struct kvm_assigned_dev_kernel *assigned_dev);
19de40a8 392#else /* CONFIG_IOMMU_API */
62c476c7
BAY
393static inline int kvm_iommu_map_pages(struct kvm *kvm,
394 gfn_t base_gfn,
395 unsigned long npages)
396{
397 return 0;
398}
399
260782bc 400static inline int kvm_iommu_map_guest(struct kvm *kvm)
62c476c7
BAY
401{
402 return -ENODEV;
403}
404
405static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
406{
407 return 0;
408}
260782bc
WH
409
410static inline int kvm_assign_device(struct kvm *kvm,
411 struct kvm_assigned_dev_kernel *assigned_dev)
412{
413 return 0;
414}
0a920356
WH
415
416static inline int kvm_deassign_device(struct kvm *kvm,
417 struct kvm_assigned_dev_kernel *assigned_dev)
418{
419 return 0;
420}
19de40a8 421#endif /* CONFIG_IOMMU_API */
62c476c7 422
d172fcd3
LV
423static inline void kvm_guest_enter(void)
424{
e56a7a28 425 account_system_vtime(current);
d172fcd3
LV
426 current->flags |= PF_VCPU;
427}
428
429static inline void kvm_guest_exit(void)
430{
e56a7a28 431 account_system_vtime(current);
d172fcd3
LV
432 current->flags &= ~PF_VCPU;
433}
434
6aa8b732
AK
435static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot)
436{
437 return slot - kvm->memslots;
438}
439
1755fbcc
AK
440static inline gpa_t gfn_to_gpa(gfn_t gfn)
441{
442 return (gpa_t)gfn << PAGE_SHIFT;
443}
6aa8b732 444
62c476c7
BAY
445static inline hpa_t pfn_to_hpa(pfn_t pfn)
446{
447 return (hpa_t)pfn << PAGE_SHIFT;
448}
449
2f599714 450static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
2f52d58c
AK
451{
452 set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
453}
454
ba1389b7
AK
455enum kvm_stat_kind {
456 KVM_STAT_VM,
457 KVM_STAT_VCPU,
458};
459
417bc304
HB
460struct kvm_stats_debugfs_item {
461 const char *name;
462 int offset;
ba1389b7 463 enum kvm_stat_kind kind;
417bc304
HB
464 struct dentry *dentry;
465};
466extern struct kvm_stats_debugfs_item debugfs_entries[];
76f7c879 467extern struct dentry *kvm_debugfs_dir;
d4c9ff2d 468
d98e6346
HB
469#define KVMTRACE_5D(evt, vcpu, d1, d2, d3, d4, d5, name) \
470 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
471 vcpu, 5, d1, d2, d3, d4, d5)
472#define KVMTRACE_4D(evt, vcpu, d1, d2, d3, d4, name) \
473 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
474 vcpu, 4, d1, d2, d3, d4, 0)
475#define KVMTRACE_3D(evt, vcpu, d1, d2, d3, name) \
476 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
477 vcpu, 3, d1, d2, d3, 0, 0)
478#define KVMTRACE_2D(evt, vcpu, d1, d2, name) \
479 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
480 vcpu, 2, d1, d2, 0, 0, 0)
481#define KVMTRACE_1D(evt, vcpu, d1, name) \
482 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
483 vcpu, 1, d1, 0, 0, 0, 0)
484#define KVMTRACE_0D(evt, vcpu, name) \
485 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
486 vcpu, 0, 0, 0, 0, 0, 0)
487
d4c9ff2d
FEL
488#ifdef CONFIG_KVM_TRACE
489int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg);
490void kvm_trace_cleanup(void);
491#else
492static inline
493int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg)
494{
495 return -EINVAL;
496}
497#define kvm_trace_cleanup() ((void)0)
498#endif
417bc304 499
e930bffe
AA
500#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
501static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq)
502{
503 if (unlikely(vcpu->kvm->mmu_notifier_count))
504 return 1;
505 /*
506 * Both reads happen under the mmu_lock and both values are
507 * modified under mmu_lock, so there's no need of smb_rmb()
508 * here in between, otherwise mmu_notifier_count should be
509 * read before mmu_notifier_seq, see
510 * mmu_notifier_invalidate_range_end write side.
511 */
512 if (vcpu->kvm->mmu_notifier_seq != mmu_seq)
513 return 1;
514 return 0;
515}
516#endif
517
399ec807
AK
518#ifdef CONFIG_HAVE_KVM_IRQCHIP
519
520#define KVM_MAX_IRQ_ROUTES 1024
521
522int kvm_setup_default_irq_routing(struct kvm *kvm);
523int kvm_set_irq_routing(struct kvm *kvm,
524 const struct kvm_irq_routing_entry *entries,
525 unsigned nr,
526 unsigned flags);
527void kvm_free_irq_routing(struct kvm *kvm);
528
529#else
530
531static inline void kvm_free_irq_routing(struct kvm *kvm) {}
532
533#endif
534
721eecbf
GH
535#ifdef CONFIG_HAVE_KVM_EVENTFD
536
537void kvm_irqfd_init(struct kvm *kvm);
538int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags);
539void kvm_irqfd_release(struct kvm *kvm);
540
541#else
542
543static inline void kvm_irqfd_init(struct kvm *kvm) {}
544static inline int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags)
545{
546 return -EINVAL;
547}
548
549static inline void kvm_irqfd_release(struct kvm *kvm) {}
550
551#endif /* CONFIG_HAVE_KVM_EVENTFD */
552
c5af89b6
GN
553static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
554{
555 return vcpu->kvm->bsp_vcpu == vcpu;
556}
6aa8b732 557#endif