1 #ifndef _ASM_X86_PARAVIRT_H
2 #define _ASM_X86_PARAVIRT_H
3 /* Various instructions on x86 need to be replaced for
4 * para-virtualization: those hooks are defined here. */
7 #include <asm/pgtable_types.h>
10 /* Bitmask of what can be clobbered: usually at least eax. */
12 #define CLBR_EAX (1 << 0)
13 #define CLBR_ECX (1 << 1)
14 #define CLBR_EDX (1 << 2)
15 #define CLBR_EDI (1 << 3)
18 /* CLBR_ANY should match all regs platform has. For i386, that's just it */
19 #define CLBR_ANY ((1 << 4) - 1)
21 #define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX)
22 #define CLBR_RET_REG (CLBR_EAX | CLBR_EDX)
23 #define CLBR_SCRATCH (0)
25 #define CLBR_RAX CLBR_EAX
26 #define CLBR_RCX CLBR_ECX
27 #define CLBR_RDX CLBR_EDX
28 #define CLBR_RDI CLBR_EDI
29 #define CLBR_RSI (1 << 4)
30 #define CLBR_R8 (1 << 5)
31 #define CLBR_R9 (1 << 6)
32 #define CLBR_R10 (1 << 7)
33 #define CLBR_R11 (1 << 8)
35 #define CLBR_ANY ((1 << 9) - 1)
37 #define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
38 CLBR_RCX | CLBR_R8 | CLBR_R9)
39 #define CLBR_RET_REG (CLBR_RAX)
40 #define CLBR_SCRATCH (CLBR_R10 | CLBR_R11)
42 #include <asm/desc_defs.h>
45 #define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
48 #include <linux/types.h>
49 #include <linux/cpumask.h>
50 #include <asm/kmap_types.h>
51 #include <asm/desc_defs.h>
62 * Wrapper type for pointers to code which uses the non-standard
63 * calling convention. See PV_CALL_SAVE_REGS_THUNK below.
65 struct paravirt_callee_save {
71 unsigned int kernel_rpl;
72 int shared_kernel_pmd;
79 * Patch may replace one of the defined code sequences with
80 * arbitrary code, subject to the same register constraints.
81 * This generally means the code is not free to clobber any
82 * registers other than EAX. The patch function should return
83 * the number of bytes of code generated, as we nop pad the
84 * rest in generic code.
86 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
87 unsigned long addr, unsigned len);
89 /* Basic arch-specific setup */
90 void (*arch_setup)(void);
91 char *(*memory_setup)(void);
92 void (*post_allocator_init)(void);
94 /* Print a banner to identify the environment */
100 /* Set deferred update mode, used for batching operations. */
106 void (*time_init)(void);
108 /* Set and set time of day */
109 unsigned long (*get_wallclock)(void);
110 int (*set_wallclock)(unsigned long);
112 unsigned long long (*sched_clock)(void);
113 unsigned long (*get_tsc_khz)(void);
117 /* hooks for various privileged instructions */
118 unsigned long (*get_debugreg)(int regno);
119 void (*set_debugreg)(int regno, unsigned long value);
123 unsigned long (*read_cr0)(void);
124 void (*write_cr0)(unsigned long);
126 unsigned long (*read_cr4_safe)(void);
127 unsigned long (*read_cr4)(void);
128 void (*write_cr4)(unsigned long);
131 unsigned long (*read_cr8)(void);
132 void (*write_cr8)(unsigned long);
135 /* Segment descriptor handling */
136 void (*load_tr_desc)(void);
137 void (*load_gdt)(const struct desc_ptr *);
138 void (*load_idt)(const struct desc_ptr *);
139 void (*store_gdt)(struct desc_ptr *);
140 void (*store_idt)(struct desc_ptr *);
141 void (*set_ldt)(const void *desc, unsigned entries);
142 unsigned long (*store_tr)(void);
143 void (*load_tls)(struct thread_struct *t, unsigned int cpu);
145 void (*load_gs_index)(unsigned int idx);
147 void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
149 void (*write_gdt_entry)(struct desc_struct *,
150 int entrynum, const void *desc, int size);
151 void (*write_idt_entry)(gate_desc *,
152 int entrynum, const gate_desc *gate);
153 void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
154 void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
156 void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
158 void (*set_iopl_mask)(unsigned mask);
160 void (*wbinvd)(void);
161 void (*io_delay)(void);
163 /* cpuid emulation, mostly so that caps bits can be disabled */
164 void (*cpuid)(unsigned int *eax, unsigned int *ebx,
165 unsigned int *ecx, unsigned int *edx);
167 /* MSR, PMC and TSR operations.
168 err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
169 u64 (*read_msr)(unsigned int msr, int *err);
170 int (*rdmsr_regs)(u32 *regs);
171 int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
172 int (*wrmsr_regs)(u32 *regs);
174 u64 (*read_tsc)(void);
175 u64 (*read_pmc)(int counter);
176 unsigned long long (*read_tscp)(unsigned int *aux);
179 * Atomically enable interrupts and return to userspace. This
180 * is only ever used to return to 32-bit processes; in a
181 * 64-bit kernel, it's used for 32-on-64 compat processes, but
182 * never native 64-bit processes. (Jump, not call.)
184 void (*irq_enable_sysexit)(void);
187 * Switch to usermode gs and return to 64-bit usermode using
188 * sysret. Only used in 64-bit kernels to return to 64-bit
189 * processes. Usermode register state, including %rsp, must
190 * already be restored.
192 void (*usergs_sysret64)(void);
195 * Switch to usermode gs and return to 32-bit usermode using
196 * sysret. Used to return to 32-on-64 compat processes.
197 * Other usermode register state, including %esp, must already
200 void (*usergs_sysret32)(void);
202 /* Normal iret. Jump to this with the standard iret stack
206 void (*swapgs)(void);
208 void (*start_context_switch)(struct task_struct *prev);
209 void (*end_context_switch)(struct task_struct *next);
213 void (*init_IRQ)(void);
216 * Get/set interrupt state. save_fl and restore_fl are only
217 * expected to use X86_EFLAGS_IF; all other bits
218 * returned from save_fl are undefined, and may be ignored by
221 * NOTE: These functions callers expect the callee to preserve
222 * more registers than the standard C calling convention.
224 struct paravirt_callee_save save_fl;
225 struct paravirt_callee_save restore_fl;
226 struct paravirt_callee_save irq_disable;
227 struct paravirt_callee_save irq_enable;
229 void (*safe_halt)(void);
233 void (*adjust_exception_frame)(void);
238 #ifdef CONFIG_X86_LOCAL_APIC
239 void (*setup_boot_clock)(void);
240 void (*setup_secondary_clock)(void);
242 void (*startup_ipi_hook)(int phys_apicid,
243 unsigned long start_eip,
244 unsigned long start_esp);
250 * Called before/after init_mm pagetable setup. setup_start
251 * may reset %cr3, and may pre-install parts of the pagetable;
252 * pagetable setup is expected to preserve any existing
255 void (*pagetable_setup_start)(pgd_t *pgd_base);
256 void (*pagetable_setup_done)(pgd_t *pgd_base);
258 unsigned long (*read_cr2)(void);
259 void (*write_cr2)(unsigned long);
261 unsigned long (*read_cr3)(void);
262 void (*write_cr3)(unsigned long);
265 * Hooks for intercepting the creation/use/destruction of an
268 void (*activate_mm)(struct mm_struct *prev,
269 struct mm_struct *next);
270 void (*dup_mmap)(struct mm_struct *oldmm,
271 struct mm_struct *mm);
272 void (*exit_mmap)(struct mm_struct *mm);
276 void (*flush_tlb_user)(void);
277 void (*flush_tlb_kernel)(void);
278 void (*flush_tlb_single)(unsigned long addr);
279 void (*flush_tlb_others)(const struct cpumask *cpus,
280 struct mm_struct *mm,
283 /* Hooks for allocating and freeing a pagetable top-level */
284 int (*pgd_alloc)(struct mm_struct *mm);
285 void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
288 * Hooks for allocating/releasing pagetable pages when they're
289 * attached to a pagetable
291 void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
292 void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
293 void (*alloc_pmd_clone)(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count);
294 void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
295 void (*release_pte)(unsigned long pfn);
296 void (*release_pmd)(unsigned long pfn);
297 void (*release_pud)(unsigned long pfn);
299 /* Pagetable manipulation functions */
300 void (*set_pte)(pte_t *ptep, pte_t pteval);
301 void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
302 pte_t *ptep, pte_t pteval);
303 void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
304 void (*pte_update)(struct mm_struct *mm, unsigned long addr,
306 void (*pte_update_defer)(struct mm_struct *mm,
307 unsigned long addr, pte_t *ptep);
309 pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
311 void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
312 pte_t *ptep, pte_t pte);
314 struct paravirt_callee_save pte_val;
315 struct paravirt_callee_save make_pte;
317 struct paravirt_callee_save pgd_val;
318 struct paravirt_callee_save make_pgd;
320 #if PAGETABLE_LEVELS >= 3
321 #ifdef CONFIG_X86_PAE
322 void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
323 void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
325 void (*pmd_clear)(pmd_t *pmdp);
327 #endif /* CONFIG_X86_PAE */
329 void (*set_pud)(pud_t *pudp, pud_t pudval);
331 struct paravirt_callee_save pmd_val;
332 struct paravirt_callee_save make_pmd;
334 #if PAGETABLE_LEVELS == 4
335 struct paravirt_callee_save pud_val;
336 struct paravirt_callee_save make_pud;
338 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
339 #endif /* PAGETABLE_LEVELS == 4 */
340 #endif /* PAGETABLE_LEVELS >= 3 */
342 #ifdef CONFIG_HIGHPTE
343 void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
346 struct pv_lazy_ops lazy_mode;
350 /* Sometimes the physical address is a pfn, and sometimes its
351 an mfn. We can tell which is which from the index. */
352 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
353 phys_addr_t phys, pgprot_t flags);
358 int (*spin_is_locked)(struct raw_spinlock *lock);
359 int (*spin_is_contended)(struct raw_spinlock *lock);
360 void (*spin_lock)(struct raw_spinlock *lock);
361 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
362 int (*spin_trylock)(struct raw_spinlock *lock);
363 void (*spin_unlock)(struct raw_spinlock *lock);
366 /* This contains all the paravirt structures: we get a convenient
367 * number for each function using the offset which we use to indicate
369 struct paravirt_patch_template {
370 struct pv_init_ops pv_init_ops;
371 struct pv_time_ops pv_time_ops;
372 struct pv_cpu_ops pv_cpu_ops;
373 struct pv_irq_ops pv_irq_ops;
374 struct pv_apic_ops pv_apic_ops;
375 struct pv_mmu_ops pv_mmu_ops;
376 struct pv_lock_ops pv_lock_ops;
379 extern struct pv_info pv_info;
380 extern struct pv_init_ops pv_init_ops;
381 extern struct pv_time_ops pv_time_ops;
382 extern struct pv_cpu_ops pv_cpu_ops;
383 extern struct pv_irq_ops pv_irq_ops;
384 extern struct pv_apic_ops pv_apic_ops;
385 extern struct pv_mmu_ops pv_mmu_ops;
386 extern struct pv_lock_ops pv_lock_ops;
388 #define PARAVIRT_PATCH(x) \
389 (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
391 #define paravirt_type(op) \
392 [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
393 [paravirt_opptr] "i" (&(op))
394 #define paravirt_clobber(clobber) \
395 [paravirt_clobber] "i" (clobber)
398 * Generate some code, and mark it as patchable by the
399 * apply_paravirt() alternate instruction patcher.
401 #define _paravirt_alt(insn_string, type, clobber) \
402 "771:\n\t" insn_string "\n" "772:\n" \
403 ".pushsection .parainstructions,\"a\"\n" \
406 " .byte " type "\n" \
407 " .byte 772b-771b\n" \
408 " .short " clobber "\n" \
411 /* Generate patchable code, with the default asm parameters. */
412 #define paravirt_alt(insn_string) \
413 _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
415 /* Simple instruction patching code. */
416 #define DEF_NATIVE(ops, name, code) \
417 extern const char start_##ops##_##name[], end_##ops##_##name[]; \
418 asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
420 unsigned paravirt_patch_nop(void);
421 unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
422 unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
423 unsigned paravirt_patch_ignore(unsigned len);
424 unsigned paravirt_patch_call(void *insnbuf,
425 const void *target, u16 tgt_clobbers,
426 unsigned long addr, u16 site_clobbers,
428 unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
429 unsigned long addr, unsigned len);
430 unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
431 unsigned long addr, unsigned len);
433 unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
434 const char *start, const char *end);
436 unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
437 unsigned long addr, unsigned len);
439 int paravirt_disable_iospace(void);
442 * This generates an indirect call based on the operation type number.
443 * The type number, computed in PARAVIRT_PATCH, is derived from the
444 * offset into the paravirt_patch_template structure, and can therefore be
445 * freely converted back into a structure offset.
447 #define PARAVIRT_CALL "call *%c[paravirt_opptr];"
450 * These macros are intended to wrap calls through one of the paravirt
451 * ops structs, so that they can be later identified and patched at
454 * Normally, a call to a pv_op function is a simple indirect call:
455 * (pv_op_struct.operations)(args...).
457 * Unfortunately, this is a relatively slow operation for modern CPUs,
458 * because it cannot necessarily determine what the destination
459 * address is. In this case, the address is a runtime constant, so at
460 * the very least we can patch the call to e a simple direct call, or
461 * ideally, patch an inline implementation into the callsite. (Direct
462 * calls are essentially free, because the call and return addresses
463 * are completely predictable.)
465 * For i386, these macros rely on the standard gcc "regparm(3)" calling
466 * convention, in which the first three arguments are placed in %eax,
467 * %edx, %ecx (in that order), and the remaining arguments are placed
468 * on the stack. All caller-save registers (eax,edx,ecx) are expected
469 * to be modified (either clobbered or used for return values).
470 * X86_64, on the other hand, already specifies a register-based calling
471 * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
472 * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
473 * special handling for dealing with 4 arguments, unlike i386.
474 * However, x86_64 also have to clobber all caller saved registers, which
475 * unfortunately, are quite a bit (r8 - r11)
477 * The call instruction itself is marked by placing its start address
478 * and size into the .parainstructions section, so that
479 * apply_paravirt() in arch/i386/kernel/alternative.c can do the
480 * appropriate patching under the control of the backend pv_init_ops
483 * Unfortunately there's no way to get gcc to generate the args setup
484 * for the call, and then allow the call itself to be generated by an
485 * inline asm. Because of this, we must do the complete arg setup and
486 * return value handling from within these macros. This is fairly
489 * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
490 * It could be extended to more arguments, but there would be little
491 * to be gained from that. For each number of arguments, there are
492 * the two VCALL and CALL variants for void and non-void functions.
494 * When there is a return value, the invoker of the macro must specify
495 * the return type. The macro then uses sizeof() on that type to
496 * determine whether its a 32 or 64 bit value, and places the return
497 * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
498 * 64-bit). For x86_64 machines, it just returns at %rax regardless of
499 * the return value size.
501 * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
502 * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
505 * Small structures are passed and returned in registers. The macro
506 * calling convention can't directly deal with this, so the wrapper
507 * functions must do this.
509 * These PVOP_* macros are only defined within this header. This
510 * means that all uses must be wrapped in inline functions. This also
511 * makes sure the incoming and outgoing types are always correct.
514 #define PVOP_VCALL_ARGS \
515 unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx
516 #define PVOP_CALL_ARGS PVOP_VCALL_ARGS
518 #define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x))
519 #define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x))
520 #define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x))
522 #define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
524 #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
526 #define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx)
527 #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
529 #define EXTRA_CLOBBERS
530 #define VEXTRA_CLOBBERS
531 #else /* CONFIG_X86_64 */
532 #define PVOP_VCALL_ARGS \
533 unsigned long __edi = __edi, __esi = __esi, \
534 __edx = __edx, __ecx = __ecx
535 #define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax
537 #define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
538 #define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
539 #define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x))
540 #define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x))
542 #define PVOP_VCALL_CLOBBERS "=D" (__edi), \
543 "=S" (__esi), "=d" (__edx), \
545 #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
547 #define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
548 #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
550 #define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
551 #define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
552 #endif /* CONFIG_X86_32 */
554 #ifdef CONFIG_PARAVIRT_DEBUG
555 #define PVOP_TEST_NULL(op) BUG_ON(op == NULL)
557 #define PVOP_TEST_NULL(op) ((void)op)
560 #define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \
565 PVOP_TEST_NULL(op); \
566 /* This is 32-bit specific, but is okay in 64-bit */ \
567 /* since this condition will never hold */ \
568 if (sizeof(rettype) > sizeof(unsigned long)) { \
570 paravirt_alt(PARAVIRT_CALL) \
573 : paravirt_type(op), \
574 paravirt_clobber(clbr), \
576 : "memory", "cc" extra_clbr); \
577 __ret = (rettype)((((u64)__edx) << 32) | __eax); \
580 paravirt_alt(PARAVIRT_CALL) \
583 : paravirt_type(op), \
584 paravirt_clobber(clbr), \
586 : "memory", "cc" extra_clbr); \
587 __ret = (rettype)__eax; \
592 #define __PVOP_CALL(rettype, op, pre, post, ...) \
593 ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \
594 EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
596 #define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \
597 ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
598 PVOP_CALLEE_CLOBBERS, , \
599 pre, post, ##__VA_ARGS__)
602 #define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \
605 PVOP_TEST_NULL(op); \
607 paravirt_alt(PARAVIRT_CALL) \
610 : paravirt_type(op), \
611 paravirt_clobber(clbr), \
613 : "memory", "cc" extra_clbr); \
616 #define __PVOP_VCALL(op, pre, post, ...) \
617 ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \
619 pre, post, ##__VA_ARGS__)
621 #define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \
622 ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
623 PVOP_VCALLEE_CLOBBERS, , \
624 pre, post, ##__VA_ARGS__)
628 #define PVOP_CALL0(rettype, op) \
629 __PVOP_CALL(rettype, op, "", "")
630 #define PVOP_VCALL0(op) \
631 __PVOP_VCALL(op, "", "")
633 #define PVOP_CALLEE0(rettype, op) \
634 __PVOP_CALLEESAVE(rettype, op, "", "")
635 #define PVOP_VCALLEE0(op) \
636 __PVOP_VCALLEESAVE(op, "", "")
639 #define PVOP_CALL1(rettype, op, arg1) \
640 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
641 #define PVOP_VCALL1(op, arg1) \
642 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
644 #define PVOP_CALLEE1(rettype, op, arg1) \
645 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
646 #define PVOP_VCALLEE1(op, arg1) \
647 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
650 #define PVOP_CALL2(rettype, op, arg1, arg2) \
651 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
652 PVOP_CALL_ARG2(arg2))
653 #define PVOP_VCALL2(op, arg1, arg2) \
654 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
655 PVOP_CALL_ARG2(arg2))
657 #define PVOP_CALLEE2(rettype, op, arg1, arg2) \
658 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
659 PVOP_CALL_ARG2(arg2))
660 #define PVOP_VCALLEE2(op, arg1, arg2) \
661 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \
662 PVOP_CALL_ARG2(arg2))
665 #define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
666 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
667 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
668 #define PVOP_VCALL3(op, arg1, arg2, arg3) \
669 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
670 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
672 /* This is the only difference in x86_64. We can make it much simpler */
674 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
675 __PVOP_CALL(rettype, op, \
676 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
677 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
678 PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
679 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
681 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
682 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
683 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
685 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
686 __PVOP_CALL(rettype, op, "", "", \
687 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
688 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
689 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
690 __PVOP_VCALL(op, "", "", \
691 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
692 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
695 static inline int paravirt_enabled(void)
697 return pv_info.paravirt_enabled;
700 static inline void load_sp0(struct tss_struct *tss,
701 struct thread_struct *thread)
703 PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
706 #define ARCH_SETUP pv_init_ops.arch_setup();
707 static inline unsigned long get_wallclock(void)
709 return PVOP_CALL0(unsigned long, pv_time_ops.get_wallclock);
712 static inline int set_wallclock(unsigned long nowtime)
714 return PVOP_CALL1(int, pv_time_ops.set_wallclock, nowtime);
717 static inline void (*choose_time_init(void))(void)
719 return pv_time_ops.time_init;
722 /* The paravirtualized CPUID instruction. */
723 static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
724 unsigned int *ecx, unsigned int *edx)
726 PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
730 * These special macros can be used to get or set a debugging register
732 static inline unsigned long paravirt_get_debugreg(int reg)
734 return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
736 #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
737 static inline void set_debugreg(unsigned long val, int reg)
739 PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
742 static inline void clts(void)
744 PVOP_VCALL0(pv_cpu_ops.clts);
747 static inline unsigned long read_cr0(void)
749 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
752 static inline void write_cr0(unsigned long x)
754 PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
757 static inline unsigned long read_cr2(void)
759 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
762 static inline void write_cr2(unsigned long x)
764 PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
767 static inline unsigned long read_cr3(void)
769 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
772 static inline void write_cr3(unsigned long x)
774 PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
777 static inline unsigned long read_cr4(void)
779 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
781 static inline unsigned long read_cr4_safe(void)
783 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
786 static inline void write_cr4(unsigned long x)
788 PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
792 static inline unsigned long read_cr8(void)
794 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
797 static inline void write_cr8(unsigned long x)
799 PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
803 static inline void raw_safe_halt(void)
805 PVOP_VCALL0(pv_irq_ops.safe_halt);
808 static inline void halt(void)
810 PVOP_VCALL0(pv_irq_ops.safe_halt);
813 static inline void wbinvd(void)
815 PVOP_VCALL0(pv_cpu_ops.wbinvd);
818 #define get_kernel_rpl() (pv_info.kernel_rpl)
820 static inline u64 paravirt_read_msr(unsigned msr, int *err)
822 return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
825 static inline int paravirt_rdmsr_regs(u32 *regs)
827 return PVOP_CALL1(int, pv_cpu_ops.rdmsr_regs, regs);
830 static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
832 return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
835 static inline int paravirt_wrmsr_regs(u32 *regs)
837 return PVOP_CALL1(int, pv_cpu_ops.wrmsr_regs, regs);
840 /* These should all do BUG_ON(_err), but our headers are too tangled. */
841 #define rdmsr(msr, val1, val2) \
844 u64 _l = paravirt_read_msr(msr, &_err); \
849 #define wrmsr(msr, val1, val2) \
851 paravirt_write_msr(msr, val1, val2); \
854 #define rdmsrl(msr, val) \
857 val = paravirt_read_msr(msr, &_err); \
860 #define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
861 #define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b)
863 /* rdmsr with exception handling */
864 #define rdmsr_safe(msr, a, b) \
867 u64 _l = paravirt_read_msr(msr, &_err); \
873 #define rdmsr_safe_regs(regs) paravirt_rdmsr_regs(regs)
874 #define wrmsr_safe_regs(regs) paravirt_wrmsr_regs(regs)
876 static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
880 *p = paravirt_read_msr(msr, &err);
883 static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
889 gprs[7] = 0x9c5a203a;
891 err = paravirt_rdmsr_regs(gprs);
893 *p = gprs[0] | ((u64)gprs[2] << 32);
898 static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
905 gprs[7] = 0x9c5a203a;
907 return paravirt_wrmsr_regs(gprs);
910 static inline u64 paravirt_read_tsc(void)
912 return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
915 #define rdtscl(low) \
917 u64 _l = paravirt_read_tsc(); \
921 #define rdtscll(val) (val = paravirt_read_tsc())
923 static inline unsigned long long paravirt_sched_clock(void)
925 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
927 #define calibrate_tsc() (pv_time_ops.get_tsc_khz())
929 static inline unsigned long long paravirt_read_pmc(int counter)
931 return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
934 #define rdpmc(counter, low, high) \
936 u64 _l = paravirt_read_pmc(counter); \
941 static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
943 return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
946 #define rdtscp(low, high, aux) \
949 unsigned long __val = paravirt_rdtscp(&__aux); \
950 (low) = (u32)__val; \
951 (high) = (u32)(__val >> 32); \
955 #define rdtscpll(val, aux) \
957 unsigned long __aux; \
958 val = paravirt_rdtscp(&__aux); \
962 static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
964 PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
967 static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
969 PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
972 static inline void load_TR_desc(void)
974 PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
976 static inline void load_gdt(const struct desc_ptr *dtr)
978 PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
980 static inline void load_idt(const struct desc_ptr *dtr)
982 PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
984 static inline void set_ldt(const void *addr, unsigned entries)
986 PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
988 static inline void store_gdt(struct desc_ptr *dtr)
990 PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
992 static inline void store_idt(struct desc_ptr *dtr)
994 PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
996 static inline unsigned long paravirt_store_tr(void)
998 return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
1000 #define store_tr(tr) ((tr) = paravirt_store_tr())
1001 static inline void load_TLS(struct thread_struct *t, unsigned cpu)
1003 PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
1006 #ifdef CONFIG_X86_64
1007 static inline void load_gs_index(unsigned int gs)
1009 PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
1013 static inline void write_ldt_entry(struct desc_struct *dt, int entry,
1016 PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
1019 static inline void write_gdt_entry(struct desc_struct *dt, int entry,
1020 void *desc, int type)
1022 PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
1025 static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
1027 PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
1029 static inline void set_iopl_mask(unsigned mask)
1031 PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
1034 /* The paravirtualized I/O functions */
1035 static inline void slow_down_io(void)
1037 pv_cpu_ops.io_delay();
1038 #ifdef REALLY_SLOW_IO
1039 pv_cpu_ops.io_delay();
1040 pv_cpu_ops.io_delay();
1041 pv_cpu_ops.io_delay();
1045 #ifdef CONFIG_X86_LOCAL_APIC
1046 static inline void setup_boot_clock(void)
1048 PVOP_VCALL0(pv_apic_ops.setup_boot_clock);
1051 static inline void setup_secondary_clock(void)
1053 PVOP_VCALL0(pv_apic_ops.setup_secondary_clock);
1057 static inline void paravirt_post_allocator_init(void)
1059 if (pv_init_ops.post_allocator_init)
1060 (*pv_init_ops.post_allocator_init)();
1063 static inline void paravirt_pagetable_setup_start(pgd_t *base)
1065 (*pv_mmu_ops.pagetable_setup_start)(base);
1068 static inline void paravirt_pagetable_setup_done(pgd_t *base)
1070 (*pv_mmu_ops.pagetable_setup_done)(base);
1074 static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
1075 unsigned long start_esp)
1077 PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
1078 phys_apicid, start_eip, start_esp);
1082 static inline void paravirt_activate_mm(struct mm_struct *prev,
1083 struct mm_struct *next)
1085 PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
1088 static inline void arch_dup_mmap(struct mm_struct *oldmm,
1089 struct mm_struct *mm)
1091 PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
1094 static inline void arch_exit_mmap(struct mm_struct *mm)
1096 PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
1099 static inline void __flush_tlb(void)
1101 PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
1103 static inline void __flush_tlb_global(void)
1105 PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
1107 static inline void __flush_tlb_single(unsigned long addr)
1109 PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
1112 static inline void flush_tlb_others(const struct cpumask *cpumask,
1113 struct mm_struct *mm,
1116 PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va);
1119 static inline int paravirt_pgd_alloc(struct mm_struct *mm)
1121 return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
1124 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1126 PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
1129 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1131 PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
1133 static inline void paravirt_release_pte(unsigned long pfn)
1135 PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
1138 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1140 PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
1143 static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
1144 unsigned long start, unsigned long count)
1146 PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count);
1148 static inline void paravirt_release_pmd(unsigned long pfn)
1150 PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
1153 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1155 PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
1157 static inline void paravirt_release_pud(unsigned long pfn)
1159 PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
1162 #ifdef CONFIG_HIGHPTE
1163 static inline void *kmap_atomic_pte(struct page *page, enum km_type type)
1166 ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type);
1171 static inline void pte_update(struct mm_struct *mm, unsigned long addr,
1174 PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
1177 static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
1180 PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
1183 static inline pte_t __pte(pteval_t val)
1187 if (sizeof(pteval_t) > sizeof(long))
1188 ret = PVOP_CALLEE2(pteval_t,
1189 pv_mmu_ops.make_pte,
1190 val, (u64)val >> 32);
1192 ret = PVOP_CALLEE1(pteval_t,
1193 pv_mmu_ops.make_pte,
1196 return (pte_t) { .pte = ret };
1199 static inline pteval_t pte_val(pte_t pte)
1203 if (sizeof(pteval_t) > sizeof(long))
1204 ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
1205 pte.pte, (u64)pte.pte >> 32);
1207 ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
1213 static inline pgd_t __pgd(pgdval_t val)
1217 if (sizeof(pgdval_t) > sizeof(long))
1218 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
1219 val, (u64)val >> 32);
1221 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
1224 return (pgd_t) { ret };
1227 static inline pgdval_t pgd_val(pgd_t pgd)
1231 if (sizeof(pgdval_t) > sizeof(long))
1232 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
1233 pgd.pgd, (u64)pgd.pgd >> 32);
1235 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
1241 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1242 static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
1247 ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
1250 return (pte_t) { .pte = ret };
1253 static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
1254 pte_t *ptep, pte_t pte)
1256 if (sizeof(pteval_t) > sizeof(long))
1258 pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
1260 PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
1261 mm, addr, ptep, pte.pte);
1264 static inline void set_pte(pte_t *ptep, pte_t pte)
1266 if (sizeof(pteval_t) > sizeof(long))
1267 PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
1268 pte.pte, (u64)pte.pte >> 32);
1270 PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
1274 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1275 pte_t *ptep, pte_t pte)
1277 if (sizeof(pteval_t) > sizeof(long))
1279 pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
1281 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
1284 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
1286 pmdval_t val = native_pmd_val(pmd);
1288 if (sizeof(pmdval_t) > sizeof(long))
1289 PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
1291 PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
1294 #if PAGETABLE_LEVELS >= 3
1295 static inline pmd_t __pmd(pmdval_t val)
1299 if (sizeof(pmdval_t) > sizeof(long))
1300 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
1301 val, (u64)val >> 32);
1303 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
1306 return (pmd_t) { ret };
1309 static inline pmdval_t pmd_val(pmd_t pmd)
1313 if (sizeof(pmdval_t) > sizeof(long))
1314 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
1315 pmd.pmd, (u64)pmd.pmd >> 32);
1317 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
1323 static inline void set_pud(pud_t *pudp, pud_t pud)
1325 pudval_t val = native_pud_val(pud);
1327 if (sizeof(pudval_t) > sizeof(long))
1328 PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
1329 val, (u64)val >> 32);
1331 PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
1334 #if PAGETABLE_LEVELS == 4
1335 static inline pud_t __pud(pudval_t val)
1339 if (sizeof(pudval_t) > sizeof(long))
1340 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
1341 val, (u64)val >> 32);
1343 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
1346 return (pud_t) { ret };
1349 static inline pudval_t pud_val(pud_t pud)
1353 if (sizeof(pudval_t) > sizeof(long))
1354 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
1355 pud.pud, (u64)pud.pud >> 32);
1357 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
1363 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
1365 pgdval_t val = native_pgd_val(pgd);
1367 if (sizeof(pgdval_t) > sizeof(long))
1368 PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
1369 val, (u64)val >> 32);
1371 PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
1375 static inline void pgd_clear(pgd_t *pgdp)
1377 set_pgd(pgdp, __pgd(0));
1380 static inline void pud_clear(pud_t *pudp)
1382 set_pud(pudp, __pud(0));
1385 #endif /* PAGETABLE_LEVELS == 4 */
1387 #endif /* PAGETABLE_LEVELS >= 3 */
1389 #ifdef CONFIG_X86_PAE
1390 /* Special-case pte-setting operations for PAE, which can't update a
1391 64-bit pte atomically */
1392 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
1394 PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
1395 pte.pte, pte.pte >> 32);
1398 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
1401 PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
1404 static inline void pmd_clear(pmd_t *pmdp)
1406 PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
1408 #else /* !CONFIG_X86_PAE */
1409 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
1414 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
1417 set_pte_at(mm, addr, ptep, __pte(0));
1420 static inline void pmd_clear(pmd_t *pmdp)
1422 set_pmd(pmdp, __pmd(0));
1424 #endif /* CONFIG_X86_PAE */
1426 /* Lazy mode for batching updates / context switch */
1427 enum paravirt_lazy_mode {
1433 enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
1434 void paravirt_start_context_switch(struct task_struct *prev);
1435 void paravirt_end_context_switch(struct task_struct *next);
1437 void paravirt_enter_lazy_mmu(void);
1438 void paravirt_leave_lazy_mmu(void);
1440 #define __HAVE_ARCH_START_CONTEXT_SWITCH
1441 static inline void arch_start_context_switch(struct task_struct *prev)
1443 PVOP_VCALL1(pv_cpu_ops.start_context_switch, prev);
1446 static inline void arch_end_context_switch(struct task_struct *next)
1448 PVOP_VCALL1(pv_cpu_ops.end_context_switch, next);
1451 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
1452 static inline void arch_enter_lazy_mmu_mode(void)
1454 PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
1457 static inline void arch_leave_lazy_mmu_mode(void)
1459 PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
1462 void arch_flush_lazy_mmu_mode(void);
1464 static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
1465 phys_addr_t phys, pgprot_t flags)
1467 pv_mmu_ops.set_fixmap(idx, phys, flags);
1470 void _paravirt_nop(void);
1471 u32 _paravirt_ident_32(u32);
1472 u64 _paravirt_ident_64(u64);
1474 #define paravirt_nop ((void *)_paravirt_nop)
1476 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
1478 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
1480 return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
1483 static inline int __raw_spin_is_contended(struct raw_spinlock *lock)
1485 return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
1487 #define __raw_spin_is_contended __raw_spin_is_contended
1489 static __always_inline void __raw_spin_lock(struct raw_spinlock *lock)
1491 PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
1494 static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock,
1495 unsigned long flags)
1497 PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
1500 static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock)
1502 return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
1505 static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
1507 PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
1512 /* These all sit in the .parainstructions section to tell us what to patch. */
1513 struct paravirt_patch_site {
1514 u8 *instr; /* original instructions */
1515 u8 instrtype; /* type of this instruction */
1516 u8 len; /* length of original instruction */
1517 u16 clobbers; /* what registers you may clobber */
1520 extern struct paravirt_patch_site __parainstructions[],
1521 __parainstructions_end[];
1523 #ifdef CONFIG_X86_32
1524 #define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
1525 #define PV_RESTORE_REGS "popl %edx; popl %ecx;"
1527 /* save and restore all caller-save registers, except return value */
1528 #define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
1529 #define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
1531 #define PV_FLAGS_ARG "0"
1532 #define PV_EXTRA_CLOBBERS
1533 #define PV_VEXTRA_CLOBBERS
1535 /* save and restore all caller-save registers, except return value */
1536 #define PV_SAVE_ALL_CALLER_REGS \
1545 #define PV_RESTORE_ALL_CALLER_REGS \
1555 /* We save some registers, but all of them, that's too much. We clobber all
1556 * caller saved registers but the argument parameter */
1557 #define PV_SAVE_REGS "pushq %%rdi;"
1558 #define PV_RESTORE_REGS "popq %%rdi;"
1559 #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
1560 #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
1561 #define PV_FLAGS_ARG "D"
1565 * Generate a thunk around a function which saves all caller-save
1566 * registers except for the return value. This allows C functions to
1567 * be called from assembler code where fewer than normal registers are
1568 * available. It may also help code generation around calls from C
1569 * code if the common case doesn't use many registers.
1571 * When a callee is wrapped in a thunk, the caller can assume that all
1572 * arg regs and all scratch registers are preserved across the
1573 * call. The return value in rax/eax will not be saved, even for void
1576 #define PV_CALLEE_SAVE_REGS_THUNK(func) \
1577 extern typeof(func) __raw_callee_save_##func; \
1578 static void *__##func##__ __used = func; \
1580 asm(".pushsection .text;" \
1581 "__raw_callee_save_" #func ": " \
1582 PV_SAVE_ALL_CALLER_REGS \
1584 PV_RESTORE_ALL_CALLER_REGS \
1588 /* Get a reference to a callee-save function */
1589 #define PV_CALLEE_SAVE(func) \
1590 ((struct paravirt_callee_save) { __raw_callee_save_##func })
1592 /* Promise that "func" already uses the right calling convention */
1593 #define __PV_IS_CALLEE_SAVE(func) \
1594 ((struct paravirt_callee_save) { func })
1596 static inline unsigned long __raw_local_save_flags(void)
1600 asm volatile(paravirt_alt(PARAVIRT_CALL)
1602 : paravirt_type(pv_irq_ops.save_fl),
1603 paravirt_clobber(CLBR_EAX)
1608 static inline void raw_local_irq_restore(unsigned long f)
1610 asm volatile(paravirt_alt(PARAVIRT_CALL)
1613 paravirt_type(pv_irq_ops.restore_fl),
1614 paravirt_clobber(CLBR_EAX)
1618 static inline void raw_local_irq_disable(void)
1620 asm volatile(paravirt_alt(PARAVIRT_CALL)
1622 : paravirt_type(pv_irq_ops.irq_disable),
1623 paravirt_clobber(CLBR_EAX)
1624 : "memory", "eax", "cc");
1627 static inline void raw_local_irq_enable(void)
1629 asm volatile(paravirt_alt(PARAVIRT_CALL)
1631 : paravirt_type(pv_irq_ops.irq_enable),
1632 paravirt_clobber(CLBR_EAX)
1633 : "memory", "eax", "cc");
1636 static inline unsigned long __raw_local_irq_save(void)
1640 f = __raw_local_save_flags();
1641 raw_local_irq_disable();
1646 /* Make sure as little as possible of this mess escapes. */
1647 #undef PARAVIRT_CALL
1661 #else /* __ASSEMBLY__ */
1663 #define _PVSITE(ptype, clobbers, ops, word, algn) \
1667 .pushsection .parainstructions,"a"; \
1676 #define COND_PUSH(set, mask, reg) \
1677 .if ((~(set)) & mask); push %reg; .endif
1678 #define COND_POP(set, mask, reg) \
1679 .if ((~(set)) & mask); pop %reg; .endif
1681 #ifdef CONFIG_X86_64
1683 #define PV_SAVE_REGS(set) \
1684 COND_PUSH(set, CLBR_RAX, rax); \
1685 COND_PUSH(set, CLBR_RCX, rcx); \
1686 COND_PUSH(set, CLBR_RDX, rdx); \
1687 COND_PUSH(set, CLBR_RSI, rsi); \
1688 COND_PUSH(set, CLBR_RDI, rdi); \
1689 COND_PUSH(set, CLBR_R8, r8); \
1690 COND_PUSH(set, CLBR_R9, r9); \
1691 COND_PUSH(set, CLBR_R10, r10); \
1692 COND_PUSH(set, CLBR_R11, r11)
1693 #define PV_RESTORE_REGS(set) \
1694 COND_POP(set, CLBR_R11, r11); \
1695 COND_POP(set, CLBR_R10, r10); \
1696 COND_POP(set, CLBR_R9, r9); \
1697 COND_POP(set, CLBR_R8, r8); \
1698 COND_POP(set, CLBR_RDI, rdi); \
1699 COND_POP(set, CLBR_RSI, rsi); \
1700 COND_POP(set, CLBR_RDX, rdx); \
1701 COND_POP(set, CLBR_RCX, rcx); \
1702 COND_POP(set, CLBR_RAX, rax)
1704 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
1705 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
1706 #define PARA_INDIRECT(addr) *addr(%rip)
1708 #define PV_SAVE_REGS(set) \
1709 COND_PUSH(set, CLBR_EAX, eax); \
1710 COND_PUSH(set, CLBR_EDI, edi); \
1711 COND_PUSH(set, CLBR_ECX, ecx); \
1712 COND_PUSH(set, CLBR_EDX, edx)
1713 #define PV_RESTORE_REGS(set) \
1714 COND_POP(set, CLBR_EDX, edx); \
1715 COND_POP(set, CLBR_ECX, ecx); \
1716 COND_POP(set, CLBR_EDI, edi); \
1717 COND_POP(set, CLBR_EAX, eax)
1719 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
1720 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
1721 #define PARA_INDIRECT(addr) *%cs:addr
1724 #define INTERRUPT_RETURN \
1725 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
1726 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
1728 #define DISABLE_INTERRUPTS(clobbers) \
1729 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
1730 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
1731 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
1732 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
1734 #define ENABLE_INTERRUPTS(clobbers) \
1735 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
1736 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
1737 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
1738 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
1740 #define USERGS_SYSRET32 \
1741 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
1743 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
1745 #ifdef CONFIG_X86_32
1746 #define GET_CR0_INTO_EAX \
1747 push %ecx; push %edx; \
1748 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
1751 #define ENABLE_INTERRUPTS_SYSEXIT \
1752 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
1754 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1757 #else /* !CONFIG_X86_32 */
1760 * If swapgs is used while the userspace stack is still current,
1761 * there's no way to call a pvop. The PV replacement *must* be
1762 * inlined, or the swapgs instruction must be trapped and emulated.
1764 #define SWAPGS_UNSAFE_STACK \
1765 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
1769 * Note: swapgs is very special, and in practise is either going to be
1770 * implemented with a single "swapgs" instruction or something very
1771 * special. Either way, we don't need to save any registers for
1775 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
1776 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
1779 #define GET_CR2_INTO_RCX \
1780 call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); \
1784 #define PARAVIRT_ADJUST_EXCEPTION_FRAME \
1785 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
1787 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
1789 #define USERGS_SYSRET64 \
1790 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
1792 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
1794 #define ENABLE_INTERRUPTS_SYSEXIT32 \
1795 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
1797 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1798 #endif /* CONFIG_X86_32 */
1800 #endif /* __ASSEMBLY__ */
1801 #endif /* CONFIG_PARAVIRT */
1802 #endif /* _ASM_X86_PARAVIRT_H */