1 #ifndef _ASM_X86_PARAVIRT_H
2 #define _ASM_X86_PARAVIRT_H
3 /* Various instructions on x86 need to be replaced for
4 * para-virtualization: those hooks are defined here. */
7 #include <asm/pgtable_types.h>
10 /* Bitmask of what can be clobbered: usually at least eax. */
12 #define CLBR_EAX (1 << 0)
13 #define CLBR_ECX (1 << 1)
14 #define CLBR_EDX (1 << 2)
15 #define CLBR_EDI (1 << 3)
18 /* CLBR_ANY should match all regs platform has. For i386, that's just it */
19 #define CLBR_ANY ((1 << 4) - 1)
21 #define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX)
22 #define CLBR_RET_REG (CLBR_EAX | CLBR_EDX)
23 #define CLBR_SCRATCH (0)
25 #define CLBR_RAX CLBR_EAX
26 #define CLBR_RCX CLBR_ECX
27 #define CLBR_RDX CLBR_EDX
28 #define CLBR_RDI CLBR_EDI
29 #define CLBR_RSI (1 << 4)
30 #define CLBR_R8 (1 << 5)
31 #define CLBR_R9 (1 << 6)
32 #define CLBR_R10 (1 << 7)
33 #define CLBR_R11 (1 << 8)
35 #define CLBR_ANY ((1 << 9) - 1)
37 #define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
38 CLBR_RCX | CLBR_R8 | CLBR_R9)
39 #define CLBR_RET_REG (CLBR_RAX)
40 #define CLBR_SCRATCH (CLBR_R10 | CLBR_R11)
42 #include <asm/desc_defs.h>
45 #define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
48 #include <linux/types.h>
49 #include <linux/cpumask.h>
50 #include <asm/kmap_types.h>
51 #include <asm/desc_defs.h>
62 * Wrapper type for pointers to code which uses the non-standard
63 * calling convention. See PV_CALL_SAVE_REGS_THUNK below.
65 struct paravirt_callee_save {
71 unsigned int kernel_rpl;
72 int shared_kernel_pmd;
79 * Patch may replace one of the defined code sequences with
80 * arbitrary code, subject to the same register constraints.
81 * This generally means the code is not free to clobber any
82 * registers other than EAX. The patch function should return
83 * the number of bytes of code generated, as we nop pad the
84 * rest in generic code.
86 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
87 unsigned long addr, unsigned len);
89 /* Basic arch-specific setup */
90 void (*arch_setup)(void);
91 char *(*memory_setup)(void);
92 void (*post_allocator_init)(void);
94 /* Print a banner to identify the environment */
100 /* Set deferred update mode, used for batching operations. */
106 void (*time_init)(void);
108 /* Set and set time of day */
109 unsigned long (*get_wallclock)(void);
110 int (*set_wallclock)(unsigned long);
112 unsigned long long (*sched_clock)(void);
113 unsigned long (*get_tsc_khz)(void);
117 /* hooks for various privileged instructions */
118 unsigned long (*get_debugreg)(int regno);
119 void (*set_debugreg)(int regno, unsigned long value);
123 unsigned long (*read_cr0)(void);
124 void (*write_cr0)(unsigned long);
126 unsigned long (*read_cr4_safe)(void);
127 unsigned long (*read_cr4)(void);
128 void (*write_cr4)(unsigned long);
131 unsigned long (*read_cr8)(void);
132 void (*write_cr8)(unsigned long);
135 /* Segment descriptor handling */
136 void (*load_tr_desc)(void);
137 void (*load_gdt)(const struct desc_ptr *);
138 void (*load_idt)(const struct desc_ptr *);
139 void (*store_gdt)(struct desc_ptr *);
140 void (*store_idt)(struct desc_ptr *);
141 void (*set_ldt)(const void *desc, unsigned entries);
142 unsigned long (*store_tr)(void);
143 void (*load_tls)(struct thread_struct *t, unsigned int cpu);
145 void (*load_gs_index)(unsigned int idx);
147 void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
149 void (*write_gdt_entry)(struct desc_struct *,
150 int entrynum, const void *desc, int size);
151 void (*write_idt_entry)(gate_desc *,
152 int entrynum, const gate_desc *gate);
153 void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
154 void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
156 void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
158 void (*set_iopl_mask)(unsigned mask);
160 void (*wbinvd)(void);
161 void (*io_delay)(void);
163 /* cpuid emulation, mostly so that caps bits can be disabled */
164 void (*cpuid)(unsigned int *eax, unsigned int *ebx,
165 unsigned int *ecx, unsigned int *edx);
167 /* MSR, PMC and TSR operations.
168 err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
169 u64 (*read_msr_amd)(unsigned int msr, int *err);
170 u64 (*read_msr)(unsigned int msr, int *err);
171 int (*rdmsr_regs)(u32 *regs);
172 int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
173 int (*wrmsr_regs)(u32 *regs);
175 u64 (*read_tsc)(void);
176 u64 (*read_pmc)(int counter);
177 unsigned long long (*read_tscp)(unsigned int *aux);
180 * Atomically enable interrupts and return to userspace. This
181 * is only ever used to return to 32-bit processes; in a
182 * 64-bit kernel, it's used for 32-on-64 compat processes, but
183 * never native 64-bit processes. (Jump, not call.)
185 void (*irq_enable_sysexit)(void);
188 * Switch to usermode gs and return to 64-bit usermode using
189 * sysret. Only used in 64-bit kernels to return to 64-bit
190 * processes. Usermode register state, including %rsp, must
191 * already be restored.
193 void (*usergs_sysret64)(void);
196 * Switch to usermode gs and return to 32-bit usermode using
197 * sysret. Used to return to 32-on-64 compat processes.
198 * Other usermode register state, including %esp, must already
201 void (*usergs_sysret32)(void);
203 /* Normal iret. Jump to this with the standard iret stack
207 void (*swapgs)(void);
209 void (*start_context_switch)(struct task_struct *prev);
210 void (*end_context_switch)(struct task_struct *next);
214 void (*init_IRQ)(void);
217 * Get/set interrupt state. save_fl and restore_fl are only
218 * expected to use X86_EFLAGS_IF; all other bits
219 * returned from save_fl are undefined, and may be ignored by
222 * NOTE: These functions callers expect the callee to preserve
223 * more registers than the standard C calling convention.
225 struct paravirt_callee_save save_fl;
226 struct paravirt_callee_save restore_fl;
227 struct paravirt_callee_save irq_disable;
228 struct paravirt_callee_save irq_enable;
230 void (*safe_halt)(void);
234 void (*adjust_exception_frame)(void);
239 #ifdef CONFIG_X86_LOCAL_APIC
240 void (*setup_boot_clock)(void);
241 void (*setup_secondary_clock)(void);
243 void (*startup_ipi_hook)(int phys_apicid,
244 unsigned long start_eip,
245 unsigned long start_esp);
251 * Called before/after init_mm pagetable setup. setup_start
252 * may reset %cr3, and may pre-install parts of the pagetable;
253 * pagetable setup is expected to preserve any existing
256 void (*pagetable_setup_start)(pgd_t *pgd_base);
257 void (*pagetable_setup_done)(pgd_t *pgd_base);
259 unsigned long (*read_cr2)(void);
260 void (*write_cr2)(unsigned long);
262 unsigned long (*read_cr3)(void);
263 void (*write_cr3)(unsigned long);
266 * Hooks for intercepting the creation/use/destruction of an
269 void (*activate_mm)(struct mm_struct *prev,
270 struct mm_struct *next);
271 void (*dup_mmap)(struct mm_struct *oldmm,
272 struct mm_struct *mm);
273 void (*exit_mmap)(struct mm_struct *mm);
277 void (*flush_tlb_user)(void);
278 void (*flush_tlb_kernel)(void);
279 void (*flush_tlb_single)(unsigned long addr);
280 void (*flush_tlb_others)(const struct cpumask *cpus,
281 struct mm_struct *mm,
284 /* Hooks for allocating and freeing a pagetable top-level */
285 int (*pgd_alloc)(struct mm_struct *mm);
286 void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
289 * Hooks for allocating/releasing pagetable pages when they're
290 * attached to a pagetable
292 void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
293 void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
294 void (*alloc_pmd_clone)(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count);
295 void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
296 void (*release_pte)(unsigned long pfn);
297 void (*release_pmd)(unsigned long pfn);
298 void (*release_pud)(unsigned long pfn);
300 /* Pagetable manipulation functions */
301 void (*set_pte)(pte_t *ptep, pte_t pteval);
302 void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
303 pte_t *ptep, pte_t pteval);
304 void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
305 void (*pte_update)(struct mm_struct *mm, unsigned long addr,
307 void (*pte_update_defer)(struct mm_struct *mm,
308 unsigned long addr, pte_t *ptep);
310 pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
312 void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
313 pte_t *ptep, pte_t pte);
315 struct paravirt_callee_save pte_val;
316 struct paravirt_callee_save make_pte;
318 struct paravirt_callee_save pgd_val;
319 struct paravirt_callee_save make_pgd;
321 #if PAGETABLE_LEVELS >= 3
322 #ifdef CONFIG_X86_PAE
323 void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
324 void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
326 void (*pmd_clear)(pmd_t *pmdp);
328 #endif /* CONFIG_X86_PAE */
330 void (*set_pud)(pud_t *pudp, pud_t pudval);
332 struct paravirt_callee_save pmd_val;
333 struct paravirt_callee_save make_pmd;
335 #if PAGETABLE_LEVELS == 4
336 struct paravirt_callee_save pud_val;
337 struct paravirt_callee_save make_pud;
339 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
340 #endif /* PAGETABLE_LEVELS == 4 */
341 #endif /* PAGETABLE_LEVELS >= 3 */
343 #ifdef CONFIG_HIGHPTE
344 void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
347 struct pv_lazy_ops lazy_mode;
351 /* Sometimes the physical address is a pfn, and sometimes its
352 an mfn. We can tell which is which from the index. */
353 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
354 phys_addr_t phys, pgprot_t flags);
359 int (*spin_is_locked)(struct raw_spinlock *lock);
360 int (*spin_is_contended)(struct raw_spinlock *lock);
361 void (*spin_lock)(struct raw_spinlock *lock);
362 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
363 int (*spin_trylock)(struct raw_spinlock *lock);
364 void (*spin_unlock)(struct raw_spinlock *lock);
367 /* This contains all the paravirt structures: we get a convenient
368 * number for each function using the offset which we use to indicate
370 struct paravirt_patch_template {
371 struct pv_init_ops pv_init_ops;
372 struct pv_time_ops pv_time_ops;
373 struct pv_cpu_ops pv_cpu_ops;
374 struct pv_irq_ops pv_irq_ops;
375 struct pv_apic_ops pv_apic_ops;
376 struct pv_mmu_ops pv_mmu_ops;
377 struct pv_lock_ops pv_lock_ops;
380 extern struct pv_info pv_info;
381 extern struct pv_init_ops pv_init_ops;
382 extern struct pv_time_ops pv_time_ops;
383 extern struct pv_cpu_ops pv_cpu_ops;
384 extern struct pv_irq_ops pv_irq_ops;
385 extern struct pv_apic_ops pv_apic_ops;
386 extern struct pv_mmu_ops pv_mmu_ops;
387 extern struct pv_lock_ops pv_lock_ops;
389 #define PARAVIRT_PATCH(x) \
390 (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
392 #define paravirt_type(op) \
393 [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
394 [paravirt_opptr] "i" (&(op))
395 #define paravirt_clobber(clobber) \
396 [paravirt_clobber] "i" (clobber)
399 * Generate some code, and mark it as patchable by the
400 * apply_paravirt() alternate instruction patcher.
402 #define _paravirt_alt(insn_string, type, clobber) \
403 "771:\n\t" insn_string "\n" "772:\n" \
404 ".pushsection .parainstructions,\"a\"\n" \
407 " .byte " type "\n" \
408 " .byte 772b-771b\n" \
409 " .short " clobber "\n" \
412 /* Generate patchable code, with the default asm parameters. */
413 #define paravirt_alt(insn_string) \
414 _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
416 /* Simple instruction patching code. */
417 #define DEF_NATIVE(ops, name, code) \
418 extern const char start_##ops##_##name[], end_##ops##_##name[]; \
419 asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
421 unsigned paravirt_patch_nop(void);
422 unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
423 unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
424 unsigned paravirt_patch_ignore(unsigned len);
425 unsigned paravirt_patch_call(void *insnbuf,
426 const void *target, u16 tgt_clobbers,
427 unsigned long addr, u16 site_clobbers,
429 unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
430 unsigned long addr, unsigned len);
431 unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
432 unsigned long addr, unsigned len);
434 unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
435 const char *start, const char *end);
437 unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
438 unsigned long addr, unsigned len);
440 int paravirt_disable_iospace(void);
443 * This generates an indirect call based on the operation type number.
444 * The type number, computed in PARAVIRT_PATCH, is derived from the
445 * offset into the paravirt_patch_template structure, and can therefore be
446 * freely converted back into a structure offset.
448 #define PARAVIRT_CALL "call *%c[paravirt_opptr];"
451 * These macros are intended to wrap calls through one of the paravirt
452 * ops structs, so that they can be later identified and patched at
455 * Normally, a call to a pv_op function is a simple indirect call:
456 * (pv_op_struct.operations)(args...).
458 * Unfortunately, this is a relatively slow operation for modern CPUs,
459 * because it cannot necessarily determine what the destination
460 * address is. In this case, the address is a runtime constant, so at
461 * the very least we can patch the call to e a simple direct call, or
462 * ideally, patch an inline implementation into the callsite. (Direct
463 * calls are essentially free, because the call and return addresses
464 * are completely predictable.)
466 * For i386, these macros rely on the standard gcc "regparm(3)" calling
467 * convention, in which the first three arguments are placed in %eax,
468 * %edx, %ecx (in that order), and the remaining arguments are placed
469 * on the stack. All caller-save registers (eax,edx,ecx) are expected
470 * to be modified (either clobbered or used for return values).
471 * X86_64, on the other hand, already specifies a register-based calling
472 * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
473 * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
474 * special handling for dealing with 4 arguments, unlike i386.
475 * However, x86_64 also have to clobber all caller saved registers, which
476 * unfortunately, are quite a bit (r8 - r11)
478 * The call instruction itself is marked by placing its start address
479 * and size into the .parainstructions section, so that
480 * apply_paravirt() in arch/i386/kernel/alternative.c can do the
481 * appropriate patching under the control of the backend pv_init_ops
484 * Unfortunately there's no way to get gcc to generate the args setup
485 * for the call, and then allow the call itself to be generated by an
486 * inline asm. Because of this, we must do the complete arg setup and
487 * return value handling from within these macros. This is fairly
490 * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
491 * It could be extended to more arguments, but there would be little
492 * to be gained from that. For each number of arguments, there are
493 * the two VCALL and CALL variants for void and non-void functions.
495 * When there is a return value, the invoker of the macro must specify
496 * the return type. The macro then uses sizeof() on that type to
497 * determine whether its a 32 or 64 bit value, and places the return
498 * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
499 * 64-bit). For x86_64 machines, it just returns at %rax regardless of
500 * the return value size.
502 * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
503 * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
506 * Small structures are passed and returned in registers. The macro
507 * calling convention can't directly deal with this, so the wrapper
508 * functions must do this.
510 * These PVOP_* macros are only defined within this header. This
511 * means that all uses must be wrapped in inline functions. This also
512 * makes sure the incoming and outgoing types are always correct.
515 #define PVOP_VCALL_ARGS \
516 unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx
517 #define PVOP_CALL_ARGS PVOP_VCALL_ARGS
519 #define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x))
520 #define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x))
521 #define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x))
523 #define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
525 #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
527 #define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx)
528 #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
530 #define EXTRA_CLOBBERS
531 #define VEXTRA_CLOBBERS
532 #else /* CONFIG_X86_64 */
533 #define PVOP_VCALL_ARGS \
534 unsigned long __edi = __edi, __esi = __esi, \
535 __edx = __edx, __ecx = __ecx
536 #define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax
538 #define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
539 #define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
540 #define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x))
541 #define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x))
543 #define PVOP_VCALL_CLOBBERS "=D" (__edi), \
544 "=S" (__esi), "=d" (__edx), \
546 #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
548 #define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
549 #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
551 #define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
552 #define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
553 #endif /* CONFIG_X86_32 */
555 #ifdef CONFIG_PARAVIRT_DEBUG
556 #define PVOP_TEST_NULL(op) BUG_ON(op == NULL)
558 #define PVOP_TEST_NULL(op) ((void)op)
561 #define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \
566 PVOP_TEST_NULL(op); \
567 /* This is 32-bit specific, but is okay in 64-bit */ \
568 /* since this condition will never hold */ \
569 if (sizeof(rettype) > sizeof(unsigned long)) { \
571 paravirt_alt(PARAVIRT_CALL) \
574 : paravirt_type(op), \
575 paravirt_clobber(clbr), \
577 : "memory", "cc" extra_clbr); \
578 __ret = (rettype)((((u64)__edx) << 32) | __eax); \
581 paravirt_alt(PARAVIRT_CALL) \
584 : paravirt_type(op), \
585 paravirt_clobber(clbr), \
587 : "memory", "cc" extra_clbr); \
588 __ret = (rettype)__eax; \
593 #define __PVOP_CALL(rettype, op, pre, post, ...) \
594 ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \
595 EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
597 #define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \
598 ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
599 PVOP_CALLEE_CLOBBERS, , \
600 pre, post, ##__VA_ARGS__)
603 #define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \
606 PVOP_TEST_NULL(op); \
608 paravirt_alt(PARAVIRT_CALL) \
611 : paravirt_type(op), \
612 paravirt_clobber(clbr), \
614 : "memory", "cc" extra_clbr); \
617 #define __PVOP_VCALL(op, pre, post, ...) \
618 ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \
620 pre, post, ##__VA_ARGS__)
622 #define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \
623 ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
624 PVOP_VCALLEE_CLOBBERS, , \
625 pre, post, ##__VA_ARGS__)
629 #define PVOP_CALL0(rettype, op) \
630 __PVOP_CALL(rettype, op, "", "")
631 #define PVOP_VCALL0(op) \
632 __PVOP_VCALL(op, "", "")
634 #define PVOP_CALLEE0(rettype, op) \
635 __PVOP_CALLEESAVE(rettype, op, "", "")
636 #define PVOP_VCALLEE0(op) \
637 __PVOP_VCALLEESAVE(op, "", "")
640 #define PVOP_CALL1(rettype, op, arg1) \
641 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
642 #define PVOP_VCALL1(op, arg1) \
643 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
645 #define PVOP_CALLEE1(rettype, op, arg1) \
646 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
647 #define PVOP_VCALLEE1(op, arg1) \
648 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
651 #define PVOP_CALL2(rettype, op, arg1, arg2) \
652 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
653 PVOP_CALL_ARG2(arg2))
654 #define PVOP_VCALL2(op, arg1, arg2) \
655 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
656 PVOP_CALL_ARG2(arg2))
658 #define PVOP_CALLEE2(rettype, op, arg1, arg2) \
659 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
660 PVOP_CALL_ARG2(arg2))
661 #define PVOP_VCALLEE2(op, arg1, arg2) \
662 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \
663 PVOP_CALL_ARG2(arg2))
666 #define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
667 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
668 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
669 #define PVOP_VCALL3(op, arg1, arg2, arg3) \
670 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
671 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
673 /* This is the only difference in x86_64. We can make it much simpler */
675 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
676 __PVOP_CALL(rettype, op, \
677 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
678 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
679 PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
680 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
682 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
683 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
684 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
686 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
687 __PVOP_CALL(rettype, op, "", "", \
688 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
689 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
690 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
691 __PVOP_VCALL(op, "", "", \
692 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
693 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
696 static inline int paravirt_enabled(void)
698 return pv_info.paravirt_enabled;
701 static inline void load_sp0(struct tss_struct *tss,
702 struct thread_struct *thread)
704 PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
707 #define ARCH_SETUP pv_init_ops.arch_setup();
708 static inline unsigned long get_wallclock(void)
710 return PVOP_CALL0(unsigned long, pv_time_ops.get_wallclock);
713 static inline int set_wallclock(unsigned long nowtime)
715 return PVOP_CALL1(int, pv_time_ops.set_wallclock, nowtime);
718 static inline void (*choose_time_init(void))(void)
720 return pv_time_ops.time_init;
723 /* The paravirtualized CPUID instruction. */
724 static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
725 unsigned int *ecx, unsigned int *edx)
727 PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
731 * These special macros can be used to get or set a debugging register
733 static inline unsigned long paravirt_get_debugreg(int reg)
735 return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
737 #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
738 static inline void set_debugreg(unsigned long val, int reg)
740 PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
743 static inline void clts(void)
745 PVOP_VCALL0(pv_cpu_ops.clts);
748 static inline unsigned long read_cr0(void)
750 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
753 static inline void write_cr0(unsigned long x)
755 PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
758 static inline unsigned long read_cr2(void)
760 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
763 static inline void write_cr2(unsigned long x)
765 PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
768 static inline unsigned long read_cr3(void)
770 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
773 static inline void write_cr3(unsigned long x)
775 PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
778 static inline unsigned long read_cr4(void)
780 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
782 static inline unsigned long read_cr4_safe(void)
784 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
787 static inline void write_cr4(unsigned long x)
789 PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
793 static inline unsigned long read_cr8(void)
795 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
798 static inline void write_cr8(unsigned long x)
800 PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
804 static inline void raw_safe_halt(void)
806 PVOP_VCALL0(pv_irq_ops.safe_halt);
809 static inline void halt(void)
811 PVOP_VCALL0(pv_irq_ops.safe_halt);
814 static inline void wbinvd(void)
816 PVOP_VCALL0(pv_cpu_ops.wbinvd);
819 #define get_kernel_rpl() (pv_info.kernel_rpl)
821 static inline u64 paravirt_read_msr(unsigned msr, int *err)
823 return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
826 static inline int paravirt_rdmsr_regs(u32 *regs)
828 return PVOP_CALL1(int, pv_cpu_ops.rdmsr_regs, regs);
831 static inline u64 paravirt_read_msr_amd(unsigned msr, int *err)
833 return PVOP_CALL2(u64, pv_cpu_ops.read_msr_amd, msr, err);
835 static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
837 return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
840 static inline int paravirt_wrmsr_regs(u32 *regs)
842 return PVOP_CALL1(int, pv_cpu_ops.wrmsr_regs, regs);
845 /* These should all do BUG_ON(_err), but our headers are too tangled. */
846 #define rdmsr(msr, val1, val2) \
849 u64 _l = paravirt_read_msr(msr, &_err); \
854 #define wrmsr(msr, val1, val2) \
856 paravirt_write_msr(msr, val1, val2); \
859 #define rdmsrl(msr, val) \
862 val = paravirt_read_msr(msr, &_err); \
865 #define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
866 #define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b)
868 /* rdmsr with exception handling */
869 #define rdmsr_safe(msr, a, b) \
872 u64 _l = paravirt_read_msr(msr, &_err); \
878 #define rdmsr_safe_regs(regs) paravirt_rdmsr_regs(regs)
879 #define wrmsr_safe_regs(regs) paravirt_wrmsr_regs(regs)
881 static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
885 *p = paravirt_read_msr(msr, &err);
888 static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
892 *p = paravirt_read_msr_amd(msr, &err);
896 static inline u64 paravirt_read_tsc(void)
898 return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
901 #define rdtscl(low) \
903 u64 _l = paravirt_read_tsc(); \
907 #define rdtscll(val) (val = paravirt_read_tsc())
909 static inline unsigned long long paravirt_sched_clock(void)
911 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
913 #define calibrate_tsc() (pv_time_ops.get_tsc_khz())
915 static inline unsigned long long paravirt_read_pmc(int counter)
917 return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
920 #define rdpmc(counter, low, high) \
922 u64 _l = paravirt_read_pmc(counter); \
927 static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
929 return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
932 #define rdtscp(low, high, aux) \
935 unsigned long __val = paravirt_rdtscp(&__aux); \
936 (low) = (u32)__val; \
937 (high) = (u32)(__val >> 32); \
941 #define rdtscpll(val, aux) \
943 unsigned long __aux; \
944 val = paravirt_rdtscp(&__aux); \
948 static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
950 PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
953 static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
955 PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
958 static inline void load_TR_desc(void)
960 PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
962 static inline void load_gdt(const struct desc_ptr *dtr)
964 PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
966 static inline void load_idt(const struct desc_ptr *dtr)
968 PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
970 static inline void set_ldt(const void *addr, unsigned entries)
972 PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
974 static inline void store_gdt(struct desc_ptr *dtr)
976 PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
978 static inline void store_idt(struct desc_ptr *dtr)
980 PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
982 static inline unsigned long paravirt_store_tr(void)
984 return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
986 #define store_tr(tr) ((tr) = paravirt_store_tr())
987 static inline void load_TLS(struct thread_struct *t, unsigned cpu)
989 PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
993 static inline void load_gs_index(unsigned int gs)
995 PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
999 static inline void write_ldt_entry(struct desc_struct *dt, int entry,
1002 PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
1005 static inline void write_gdt_entry(struct desc_struct *dt, int entry,
1006 void *desc, int type)
1008 PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
1011 static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
1013 PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
1015 static inline void set_iopl_mask(unsigned mask)
1017 PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
1020 /* The paravirtualized I/O functions */
1021 static inline void slow_down_io(void)
1023 pv_cpu_ops.io_delay();
1024 #ifdef REALLY_SLOW_IO
1025 pv_cpu_ops.io_delay();
1026 pv_cpu_ops.io_delay();
1027 pv_cpu_ops.io_delay();
1031 #ifdef CONFIG_X86_LOCAL_APIC
1032 static inline void setup_boot_clock(void)
1034 PVOP_VCALL0(pv_apic_ops.setup_boot_clock);
1037 static inline void setup_secondary_clock(void)
1039 PVOP_VCALL0(pv_apic_ops.setup_secondary_clock);
1043 static inline void paravirt_post_allocator_init(void)
1045 if (pv_init_ops.post_allocator_init)
1046 (*pv_init_ops.post_allocator_init)();
1049 static inline void paravirt_pagetable_setup_start(pgd_t *base)
1051 (*pv_mmu_ops.pagetable_setup_start)(base);
1054 static inline void paravirt_pagetable_setup_done(pgd_t *base)
1056 (*pv_mmu_ops.pagetable_setup_done)(base);
1060 static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
1061 unsigned long start_esp)
1063 PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
1064 phys_apicid, start_eip, start_esp);
1068 static inline void paravirt_activate_mm(struct mm_struct *prev,
1069 struct mm_struct *next)
1071 PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
1074 static inline void arch_dup_mmap(struct mm_struct *oldmm,
1075 struct mm_struct *mm)
1077 PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
1080 static inline void arch_exit_mmap(struct mm_struct *mm)
1082 PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
1085 static inline void __flush_tlb(void)
1087 PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
1089 static inline void __flush_tlb_global(void)
1091 PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
1093 static inline void __flush_tlb_single(unsigned long addr)
1095 PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
1098 static inline void flush_tlb_others(const struct cpumask *cpumask,
1099 struct mm_struct *mm,
1102 PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va);
1105 static inline int paravirt_pgd_alloc(struct mm_struct *mm)
1107 return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
1110 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1112 PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
1115 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1117 PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
1119 static inline void paravirt_release_pte(unsigned long pfn)
1121 PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
1124 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1126 PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
1129 static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
1130 unsigned long start, unsigned long count)
1132 PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count);
1134 static inline void paravirt_release_pmd(unsigned long pfn)
1136 PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
1139 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1141 PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
1143 static inline void paravirt_release_pud(unsigned long pfn)
1145 PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
1148 #ifdef CONFIG_HIGHPTE
1149 static inline void *kmap_atomic_pte(struct page *page, enum km_type type)
1152 ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type);
1157 static inline void pte_update(struct mm_struct *mm, unsigned long addr,
1160 PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
1163 static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
1166 PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
1169 static inline pte_t __pte(pteval_t val)
1173 if (sizeof(pteval_t) > sizeof(long))
1174 ret = PVOP_CALLEE2(pteval_t,
1175 pv_mmu_ops.make_pte,
1176 val, (u64)val >> 32);
1178 ret = PVOP_CALLEE1(pteval_t,
1179 pv_mmu_ops.make_pte,
1182 return (pte_t) { .pte = ret };
1185 static inline pteval_t pte_val(pte_t pte)
1189 if (sizeof(pteval_t) > sizeof(long))
1190 ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
1191 pte.pte, (u64)pte.pte >> 32);
1193 ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
1199 static inline pgd_t __pgd(pgdval_t val)
1203 if (sizeof(pgdval_t) > sizeof(long))
1204 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
1205 val, (u64)val >> 32);
1207 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
1210 return (pgd_t) { ret };
1213 static inline pgdval_t pgd_val(pgd_t pgd)
1217 if (sizeof(pgdval_t) > sizeof(long))
1218 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
1219 pgd.pgd, (u64)pgd.pgd >> 32);
1221 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
1227 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1228 static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
1233 ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
1236 return (pte_t) { .pte = ret };
1239 static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
1240 pte_t *ptep, pte_t pte)
1242 if (sizeof(pteval_t) > sizeof(long))
1244 pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
1246 PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
1247 mm, addr, ptep, pte.pte);
1250 static inline void set_pte(pte_t *ptep, pte_t pte)
1252 if (sizeof(pteval_t) > sizeof(long))
1253 PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
1254 pte.pte, (u64)pte.pte >> 32);
1256 PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
1260 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1261 pte_t *ptep, pte_t pte)
1263 if (sizeof(pteval_t) > sizeof(long))
1265 pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
1267 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
1270 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
1272 pmdval_t val = native_pmd_val(pmd);
1274 if (sizeof(pmdval_t) > sizeof(long))
1275 PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
1277 PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
1280 #if PAGETABLE_LEVELS >= 3
1281 static inline pmd_t __pmd(pmdval_t val)
1285 if (sizeof(pmdval_t) > sizeof(long))
1286 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
1287 val, (u64)val >> 32);
1289 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
1292 return (pmd_t) { ret };
1295 static inline pmdval_t pmd_val(pmd_t pmd)
1299 if (sizeof(pmdval_t) > sizeof(long))
1300 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
1301 pmd.pmd, (u64)pmd.pmd >> 32);
1303 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
1309 static inline void set_pud(pud_t *pudp, pud_t pud)
1311 pudval_t val = native_pud_val(pud);
1313 if (sizeof(pudval_t) > sizeof(long))
1314 PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
1315 val, (u64)val >> 32);
1317 PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
1320 #if PAGETABLE_LEVELS == 4
1321 static inline pud_t __pud(pudval_t val)
1325 if (sizeof(pudval_t) > sizeof(long))
1326 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
1327 val, (u64)val >> 32);
1329 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
1332 return (pud_t) { ret };
1335 static inline pudval_t pud_val(pud_t pud)
1339 if (sizeof(pudval_t) > sizeof(long))
1340 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
1341 pud.pud, (u64)pud.pud >> 32);
1343 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
1349 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
1351 pgdval_t val = native_pgd_val(pgd);
1353 if (sizeof(pgdval_t) > sizeof(long))
1354 PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
1355 val, (u64)val >> 32);
1357 PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
1361 static inline void pgd_clear(pgd_t *pgdp)
1363 set_pgd(pgdp, __pgd(0));
1366 static inline void pud_clear(pud_t *pudp)
1368 set_pud(pudp, __pud(0));
1371 #endif /* PAGETABLE_LEVELS == 4 */
1373 #endif /* PAGETABLE_LEVELS >= 3 */
1375 #ifdef CONFIG_X86_PAE
1376 /* Special-case pte-setting operations for PAE, which can't update a
1377 64-bit pte atomically */
1378 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
1380 PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
1381 pte.pte, pte.pte >> 32);
1384 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
1387 PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
1390 static inline void pmd_clear(pmd_t *pmdp)
1392 PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
1394 #else /* !CONFIG_X86_PAE */
1395 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
1400 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
1403 set_pte_at(mm, addr, ptep, __pte(0));
1406 static inline void pmd_clear(pmd_t *pmdp)
1408 set_pmd(pmdp, __pmd(0));
1410 #endif /* CONFIG_X86_PAE */
1412 /* Lazy mode for batching updates / context switch */
1413 enum paravirt_lazy_mode {
1419 enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
1420 void paravirt_start_context_switch(struct task_struct *prev);
1421 void paravirt_end_context_switch(struct task_struct *next);
1423 void paravirt_enter_lazy_mmu(void);
1424 void paravirt_leave_lazy_mmu(void);
1426 #define __HAVE_ARCH_START_CONTEXT_SWITCH
1427 static inline void arch_start_context_switch(struct task_struct *prev)
1429 PVOP_VCALL1(pv_cpu_ops.start_context_switch, prev);
1432 static inline void arch_end_context_switch(struct task_struct *next)
1434 PVOP_VCALL1(pv_cpu_ops.end_context_switch, next);
1437 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
1438 static inline void arch_enter_lazy_mmu_mode(void)
1440 PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
1443 static inline void arch_leave_lazy_mmu_mode(void)
1445 PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
1448 void arch_flush_lazy_mmu_mode(void);
1450 static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
1451 phys_addr_t phys, pgprot_t flags)
1453 pv_mmu_ops.set_fixmap(idx, phys, flags);
1456 void _paravirt_nop(void);
1457 u32 _paravirt_ident_32(u32);
1458 u64 _paravirt_ident_64(u64);
1460 #define paravirt_nop ((void *)_paravirt_nop)
1462 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
1464 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
1466 return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
1469 static inline int __raw_spin_is_contended(struct raw_spinlock *lock)
1471 return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
1473 #define __raw_spin_is_contended __raw_spin_is_contended
1475 static __always_inline void __raw_spin_lock(struct raw_spinlock *lock)
1477 PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
1480 static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock,
1481 unsigned long flags)
1483 PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
1486 static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock)
1488 return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
1491 static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
1493 PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
1498 /* These all sit in the .parainstructions section to tell us what to patch. */
1499 struct paravirt_patch_site {
1500 u8 *instr; /* original instructions */
1501 u8 instrtype; /* type of this instruction */
1502 u8 len; /* length of original instruction */
1503 u16 clobbers; /* what registers you may clobber */
1506 extern struct paravirt_patch_site __parainstructions[],
1507 __parainstructions_end[];
1509 #ifdef CONFIG_X86_32
1510 #define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
1511 #define PV_RESTORE_REGS "popl %edx; popl %ecx;"
1513 /* save and restore all caller-save registers, except return value */
1514 #define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
1515 #define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
1517 #define PV_FLAGS_ARG "0"
1518 #define PV_EXTRA_CLOBBERS
1519 #define PV_VEXTRA_CLOBBERS
1521 /* save and restore all caller-save registers, except return value */
1522 #define PV_SAVE_ALL_CALLER_REGS \
1531 #define PV_RESTORE_ALL_CALLER_REGS \
1541 /* We save some registers, but all of them, that's too much. We clobber all
1542 * caller saved registers but the argument parameter */
1543 #define PV_SAVE_REGS "pushq %%rdi;"
1544 #define PV_RESTORE_REGS "popq %%rdi;"
1545 #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
1546 #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
1547 #define PV_FLAGS_ARG "D"
1551 * Generate a thunk around a function which saves all caller-save
1552 * registers except for the return value. This allows C functions to
1553 * be called from assembler code where fewer than normal registers are
1554 * available. It may also help code generation around calls from C
1555 * code if the common case doesn't use many registers.
1557 * When a callee is wrapped in a thunk, the caller can assume that all
1558 * arg regs and all scratch registers are preserved across the
1559 * call. The return value in rax/eax will not be saved, even for void
1562 #define PV_CALLEE_SAVE_REGS_THUNK(func) \
1563 extern typeof(func) __raw_callee_save_##func; \
1564 static void *__##func##__ __used = func; \
1566 asm(".pushsection .text;" \
1567 "__raw_callee_save_" #func ": " \
1568 PV_SAVE_ALL_CALLER_REGS \
1570 PV_RESTORE_ALL_CALLER_REGS \
1574 /* Get a reference to a callee-save function */
1575 #define PV_CALLEE_SAVE(func) \
1576 ((struct paravirt_callee_save) { __raw_callee_save_##func })
1578 /* Promise that "func" already uses the right calling convention */
1579 #define __PV_IS_CALLEE_SAVE(func) \
1580 ((struct paravirt_callee_save) { func })
1582 static inline unsigned long __raw_local_save_flags(void)
1586 asm volatile(paravirt_alt(PARAVIRT_CALL)
1588 : paravirt_type(pv_irq_ops.save_fl),
1589 paravirt_clobber(CLBR_EAX)
1594 static inline void raw_local_irq_restore(unsigned long f)
1596 asm volatile(paravirt_alt(PARAVIRT_CALL)
1599 paravirt_type(pv_irq_ops.restore_fl),
1600 paravirt_clobber(CLBR_EAX)
1604 static inline void raw_local_irq_disable(void)
1606 asm volatile(paravirt_alt(PARAVIRT_CALL)
1608 : paravirt_type(pv_irq_ops.irq_disable),
1609 paravirt_clobber(CLBR_EAX)
1610 : "memory", "eax", "cc");
1613 static inline void raw_local_irq_enable(void)
1615 asm volatile(paravirt_alt(PARAVIRT_CALL)
1617 : paravirt_type(pv_irq_ops.irq_enable),
1618 paravirt_clobber(CLBR_EAX)
1619 : "memory", "eax", "cc");
1622 static inline unsigned long __raw_local_irq_save(void)
1626 f = __raw_local_save_flags();
1627 raw_local_irq_disable();
1632 /* Make sure as little as possible of this mess escapes. */
1633 #undef PARAVIRT_CALL
1647 #else /* __ASSEMBLY__ */
1649 #define _PVSITE(ptype, clobbers, ops, word, algn) \
1653 .pushsection .parainstructions,"a"; \
1662 #define COND_PUSH(set, mask, reg) \
1663 .if ((~(set)) & mask); push %reg; .endif
1664 #define COND_POP(set, mask, reg) \
1665 .if ((~(set)) & mask); pop %reg; .endif
1667 #ifdef CONFIG_X86_64
1669 #define PV_SAVE_REGS(set) \
1670 COND_PUSH(set, CLBR_RAX, rax); \
1671 COND_PUSH(set, CLBR_RCX, rcx); \
1672 COND_PUSH(set, CLBR_RDX, rdx); \
1673 COND_PUSH(set, CLBR_RSI, rsi); \
1674 COND_PUSH(set, CLBR_RDI, rdi); \
1675 COND_PUSH(set, CLBR_R8, r8); \
1676 COND_PUSH(set, CLBR_R9, r9); \
1677 COND_PUSH(set, CLBR_R10, r10); \
1678 COND_PUSH(set, CLBR_R11, r11)
1679 #define PV_RESTORE_REGS(set) \
1680 COND_POP(set, CLBR_R11, r11); \
1681 COND_POP(set, CLBR_R10, r10); \
1682 COND_POP(set, CLBR_R9, r9); \
1683 COND_POP(set, CLBR_R8, r8); \
1684 COND_POP(set, CLBR_RDI, rdi); \
1685 COND_POP(set, CLBR_RSI, rsi); \
1686 COND_POP(set, CLBR_RDX, rdx); \
1687 COND_POP(set, CLBR_RCX, rcx); \
1688 COND_POP(set, CLBR_RAX, rax)
1690 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
1691 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
1692 #define PARA_INDIRECT(addr) *addr(%rip)
1694 #define PV_SAVE_REGS(set) \
1695 COND_PUSH(set, CLBR_EAX, eax); \
1696 COND_PUSH(set, CLBR_EDI, edi); \
1697 COND_PUSH(set, CLBR_ECX, ecx); \
1698 COND_PUSH(set, CLBR_EDX, edx)
1699 #define PV_RESTORE_REGS(set) \
1700 COND_POP(set, CLBR_EDX, edx); \
1701 COND_POP(set, CLBR_ECX, ecx); \
1702 COND_POP(set, CLBR_EDI, edi); \
1703 COND_POP(set, CLBR_EAX, eax)
1705 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
1706 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
1707 #define PARA_INDIRECT(addr) *%cs:addr
1710 #define INTERRUPT_RETURN \
1711 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
1712 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
1714 #define DISABLE_INTERRUPTS(clobbers) \
1715 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
1716 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
1717 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
1718 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
1720 #define ENABLE_INTERRUPTS(clobbers) \
1721 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
1722 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
1723 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
1724 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
1726 #define USERGS_SYSRET32 \
1727 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
1729 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
1731 #ifdef CONFIG_X86_32
1732 #define GET_CR0_INTO_EAX \
1733 push %ecx; push %edx; \
1734 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
1737 #define ENABLE_INTERRUPTS_SYSEXIT \
1738 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
1740 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1743 #else /* !CONFIG_X86_32 */
1746 * If swapgs is used while the userspace stack is still current,
1747 * there's no way to call a pvop. The PV replacement *must* be
1748 * inlined, or the swapgs instruction must be trapped and emulated.
1750 #define SWAPGS_UNSAFE_STACK \
1751 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
1755 * Note: swapgs is very special, and in practise is either going to be
1756 * implemented with a single "swapgs" instruction or something very
1757 * special. Either way, we don't need to save any registers for
1761 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
1762 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
1765 #define GET_CR2_INTO_RCX \
1766 call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); \
1770 #define PARAVIRT_ADJUST_EXCEPTION_FRAME \
1771 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
1773 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
1775 #define USERGS_SYSRET64 \
1776 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
1778 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
1780 #define ENABLE_INTERRUPTS_SYSEXIT32 \
1781 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
1783 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1784 #endif /* CONFIG_X86_32 */
1786 #endif /* __ASSEMBLY__ */
1787 #endif /* CONFIG_PARAVIRT */
1788 #endif /* _ASM_X86_PARAVIRT_H */