14ce05dfe04e7d2a5be094205bd4cab67694a6a2
[firefly-linux-kernel-4.4.55.git] / arch / x86 / include / asm / paravirt.h
1 #ifndef _ASM_X86_PARAVIRT_H
2 #define _ASM_X86_PARAVIRT_H
3 /* Various instructions on x86 need to be replaced for
4  * para-virtualization: those hooks are defined here. */
5
6 #ifdef CONFIG_PARAVIRT
7 #include <asm/pgtable_types.h>
8 #include <asm/asm.h>
9
10 #include <asm/paravirt_types.h>
11
12 #ifndef __ASSEMBLY__
13 #include <linux/bug.h>
14 #include <linux/types.h>
15 #include <linux/cpumask.h>
16
17 static inline int paravirt_enabled(void)
18 {
19         return pv_info.paravirt_enabled;
20 }
21
22 static inline void load_sp0(struct tss_struct *tss,
23                              struct thread_struct *thread)
24 {
25         PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
26 }
27
28 /* The paravirtualized CPUID instruction. */
29 static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
30                            unsigned int *ecx, unsigned int *edx)
31 {
32         PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
33 }
34
35 /*
36  * These special macros can be used to get or set a debugging register
37  */
38 static inline unsigned long paravirt_get_debugreg(int reg)
39 {
40         return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
41 }
42 #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
43 static inline void set_debugreg(unsigned long val, int reg)
44 {
45         PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
46 }
47
48 static inline void clts(void)
49 {
50         PVOP_VCALL0(pv_cpu_ops.clts);
51 }
52
53 static inline unsigned long read_cr0(void)
54 {
55         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
56 }
57
58 static inline void write_cr0(unsigned long x)
59 {
60         PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
61 }
62
63 static inline unsigned long read_cr2(void)
64 {
65         return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
66 }
67
68 static inline void write_cr2(unsigned long x)
69 {
70         PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
71 }
72
73 static inline unsigned long read_cr3(void)
74 {
75         return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
76 }
77
78 static inline void write_cr3(unsigned long x)
79 {
80         PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
81 }
82
83 static inline unsigned long read_cr4(void)
84 {
85         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
86 }
87 static inline unsigned long read_cr4_safe(void)
88 {
89         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
90 }
91
92 static inline void write_cr4(unsigned long x)
93 {
94         PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
95 }
96
97 #ifdef CONFIG_X86_64
98 static inline unsigned long read_cr8(void)
99 {
100         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
101 }
102
103 static inline void write_cr8(unsigned long x)
104 {
105         PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
106 }
107 #endif
108
109 static inline void arch_safe_halt(void)
110 {
111         PVOP_VCALL0(pv_irq_ops.safe_halt);
112 }
113
114 static inline void halt(void)
115 {
116         PVOP_VCALL0(pv_irq_ops.halt);
117 }
118
119 static inline void wbinvd(void)
120 {
121         PVOP_VCALL0(pv_cpu_ops.wbinvd);
122 }
123
124 #define get_kernel_rpl()  (pv_info.kernel_rpl)
125
126 static inline u64 paravirt_read_msr(unsigned msr, int *err)
127 {
128         return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
129 }
130
131 static inline int paravirt_rdmsr_regs(u32 *regs)
132 {
133         return PVOP_CALL1(int, pv_cpu_ops.rdmsr_regs, regs);
134 }
135
136 static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
137 {
138         return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
139 }
140
141 static inline int paravirt_wrmsr_regs(u32 *regs)
142 {
143         return PVOP_CALL1(int, pv_cpu_ops.wrmsr_regs, regs);
144 }
145
146 /* These should all do BUG_ON(_err), but our headers are too tangled. */
147 #define rdmsr(msr, val1, val2)                  \
148 do {                                            \
149         int _err;                               \
150         u64 _l = paravirt_read_msr(msr, &_err); \
151         val1 = (u32)_l;                         \
152         val2 = _l >> 32;                        \
153 } while (0)
154
155 #define wrmsr(msr, val1, val2)                  \
156 do {                                            \
157         paravirt_write_msr(msr, val1, val2);    \
158 } while (0)
159
160 #define rdmsrl(msr, val)                        \
161 do {                                            \
162         int _err;                               \
163         val = paravirt_read_msr(msr, &_err);    \
164 } while (0)
165
166 #define wrmsrl(msr, val)        wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
167 #define wrmsr_safe(msr, a, b)   paravirt_write_msr(msr, a, b)
168
169 /* rdmsr with exception handling */
170 #define rdmsr_safe(msr, a, b)                   \
171 ({                                              \
172         int _err;                               \
173         u64 _l = paravirt_read_msr(msr, &_err); \
174         (*a) = (u32)_l;                         \
175         (*b) = _l >> 32;                        \
176         _err;                                   \
177 })
178
179 #define rdmsr_safe_regs(regs)   paravirt_rdmsr_regs(regs)
180 #define wrmsr_safe_regs(regs)   paravirt_wrmsr_regs(regs)
181
182 static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
183 {
184         int err;
185
186         *p = paravirt_read_msr(msr, &err);
187         return err;
188 }
189 static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
190 {
191         u32 gprs[8] = { 0 };
192         int err;
193
194         gprs[1] = msr;
195         gprs[7] = 0x9c5a203a;
196
197         err = paravirt_rdmsr_regs(gprs);
198
199         *p = gprs[0] | ((u64)gprs[2] << 32);
200
201         return err;
202 }
203
204 static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
205 {
206         u32 gprs[8] = { 0 };
207
208         gprs[0] = (u32)val;
209         gprs[1] = msr;
210         gprs[2] = val >> 32;
211         gprs[7] = 0x9c5a203a;
212
213         return paravirt_wrmsr_regs(gprs);
214 }
215
216 static inline u64 paravirt_read_tsc(void)
217 {
218         return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
219 }
220
221 #define rdtscl(low)                             \
222 do {                                            \
223         u64 _l = paravirt_read_tsc();           \
224         low = (int)_l;                          \
225 } while (0)
226
227 #define rdtscll(val) (val = paravirt_read_tsc())
228
229 static inline unsigned long long paravirt_sched_clock(void)
230 {
231         return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
232 }
233
234 struct static_key;
235 extern struct static_key paravirt_steal_enabled;
236 extern struct static_key paravirt_steal_rq_enabled;
237
238 static inline u64 paravirt_steal_clock(int cpu)
239 {
240         return PVOP_CALL1(u64, pv_time_ops.steal_clock, cpu);
241 }
242
243 static inline unsigned long long paravirt_read_pmc(int counter)
244 {
245         return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
246 }
247
248 #define rdpmc(counter, low, high)               \
249 do {                                            \
250         u64 _l = paravirt_read_pmc(counter);    \
251         low = (u32)_l;                          \
252         high = _l >> 32;                        \
253 } while (0)
254
255 #define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
256
257 static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
258 {
259         return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
260 }
261
262 #define rdtscp(low, high, aux)                          \
263 do {                                                    \
264         int __aux;                                      \
265         unsigned long __val = paravirt_rdtscp(&__aux);  \
266         (low) = (u32)__val;                             \
267         (high) = (u32)(__val >> 32);                    \
268         (aux) = __aux;                                  \
269 } while (0)
270
271 #define rdtscpll(val, aux)                              \
272 do {                                                    \
273         unsigned long __aux;                            \
274         val = paravirt_rdtscp(&__aux);                  \
275         (aux) = __aux;                                  \
276 } while (0)
277
278 static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
279 {
280         PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
281 }
282
283 static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
284 {
285         PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
286 }
287
288 static inline void load_TR_desc(void)
289 {
290         PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
291 }
292 static inline void load_gdt(const struct desc_ptr *dtr)
293 {
294         PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
295 }
296 static inline void load_idt(const struct desc_ptr *dtr)
297 {
298         PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
299 }
300 static inline void set_ldt(const void *addr, unsigned entries)
301 {
302         PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
303 }
304 static inline void store_gdt(struct desc_ptr *dtr)
305 {
306         PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
307 }
308 static inline void store_idt(struct desc_ptr *dtr)
309 {
310         PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
311 }
312 static inline unsigned long paravirt_store_tr(void)
313 {
314         return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
315 }
316 #define store_tr(tr)    ((tr) = paravirt_store_tr())
317 static inline void load_TLS(struct thread_struct *t, unsigned cpu)
318 {
319         PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
320 }
321
322 #ifdef CONFIG_X86_64
323 static inline void load_gs_index(unsigned int gs)
324 {
325         PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
326 }
327 #endif
328
329 static inline void write_ldt_entry(struct desc_struct *dt, int entry,
330                                    const void *desc)
331 {
332         PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
333 }
334
335 static inline void write_gdt_entry(struct desc_struct *dt, int entry,
336                                    void *desc, int type)
337 {
338         PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
339 }
340
341 static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
342 {
343         PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
344 }
345 static inline void set_iopl_mask(unsigned mask)
346 {
347         PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
348 }
349
350 /* The paravirtualized I/O functions */
351 static inline void slow_down_io(void)
352 {
353         pv_cpu_ops.io_delay();
354 #ifdef REALLY_SLOW_IO
355         pv_cpu_ops.io_delay();
356         pv_cpu_ops.io_delay();
357         pv_cpu_ops.io_delay();
358 #endif
359 }
360
361 #ifdef CONFIG_SMP
362 static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
363                                     unsigned long start_esp)
364 {
365         PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
366                     phys_apicid, start_eip, start_esp);
367 }
368 #endif
369
370 static inline void paravirt_activate_mm(struct mm_struct *prev,
371                                         struct mm_struct *next)
372 {
373         PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
374 }
375
376 static inline void arch_dup_mmap(struct mm_struct *oldmm,
377                                  struct mm_struct *mm)
378 {
379         PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
380 }
381
382 static inline void arch_exit_mmap(struct mm_struct *mm)
383 {
384         PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
385 }
386
387 static inline void __flush_tlb(void)
388 {
389         PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
390 }
391 static inline void __flush_tlb_global(void)
392 {
393         PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
394 }
395 static inline void __flush_tlb_single(unsigned long addr)
396 {
397         PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
398 }
399
400 static inline void flush_tlb_others(const struct cpumask *cpumask,
401                                     struct mm_struct *mm,
402                                     unsigned long va)
403 {
404         PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va);
405 }
406
407 static inline int paravirt_pgd_alloc(struct mm_struct *mm)
408 {
409         return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
410 }
411
412 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
413 {
414         PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
415 }
416
417 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
418 {
419         PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
420 }
421 static inline void paravirt_release_pte(unsigned long pfn)
422 {
423         PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
424 }
425
426 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
427 {
428         PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
429 }
430
431 static inline void paravirt_release_pmd(unsigned long pfn)
432 {
433         PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
434 }
435
436 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
437 {
438         PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
439 }
440 static inline void paravirt_release_pud(unsigned long pfn)
441 {
442         PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
443 }
444
445 static inline void pte_update(struct mm_struct *mm, unsigned long addr,
446                               pte_t *ptep)
447 {
448         PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
449 }
450 static inline void pmd_update(struct mm_struct *mm, unsigned long addr,
451                               pmd_t *pmdp)
452 {
453         PVOP_VCALL3(pv_mmu_ops.pmd_update, mm, addr, pmdp);
454 }
455
456 static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
457                                     pte_t *ptep)
458 {
459         PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
460 }
461
462 static inline void pmd_update_defer(struct mm_struct *mm, unsigned long addr,
463                                     pmd_t *pmdp)
464 {
465         PVOP_VCALL3(pv_mmu_ops.pmd_update_defer, mm, addr, pmdp);
466 }
467
468 static inline pte_t __pte(pteval_t val)
469 {
470         pteval_t ret;
471
472         if (sizeof(pteval_t) > sizeof(long))
473                 ret = PVOP_CALLEE2(pteval_t,
474                                    pv_mmu_ops.make_pte,
475                                    val, (u64)val >> 32);
476         else
477                 ret = PVOP_CALLEE1(pteval_t,
478                                    pv_mmu_ops.make_pte,
479                                    val);
480
481         return (pte_t) { .pte = ret };
482 }
483
484 static inline pteval_t pte_val(pte_t pte)
485 {
486         pteval_t ret;
487
488         if (sizeof(pteval_t) > sizeof(long))
489                 ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
490                                    pte.pte, (u64)pte.pte >> 32);
491         else
492                 ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
493                                    pte.pte);
494
495         return ret;
496 }
497
498 static inline pgd_t __pgd(pgdval_t val)
499 {
500         pgdval_t ret;
501
502         if (sizeof(pgdval_t) > sizeof(long))
503                 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
504                                    val, (u64)val >> 32);
505         else
506                 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
507                                    val);
508
509         return (pgd_t) { ret };
510 }
511
512 static inline pgdval_t pgd_val(pgd_t pgd)
513 {
514         pgdval_t ret;
515
516         if (sizeof(pgdval_t) > sizeof(long))
517                 ret =  PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
518                                     pgd.pgd, (u64)pgd.pgd >> 32);
519         else
520                 ret =  PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
521                                     pgd.pgd);
522
523         return ret;
524 }
525
526 #define  __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
527 static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
528                                            pte_t *ptep)
529 {
530         pteval_t ret;
531
532         ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
533                          mm, addr, ptep);
534
535         return (pte_t) { .pte = ret };
536 }
537
538 static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
539                                            pte_t *ptep, pte_t pte)
540 {
541         if (sizeof(pteval_t) > sizeof(long))
542                 /* 5 arg words */
543                 pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
544         else
545                 PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
546                             mm, addr, ptep, pte.pte);
547 }
548
549 static inline void set_pte(pte_t *ptep, pte_t pte)
550 {
551         if (sizeof(pteval_t) > sizeof(long))
552                 PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
553                             pte.pte, (u64)pte.pte >> 32);
554         else
555                 PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
556                             pte.pte);
557 }
558
559 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
560                               pte_t *ptep, pte_t pte)
561 {
562         if (sizeof(pteval_t) > sizeof(long))
563                 /* 5 arg words */
564                 pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
565         else
566                 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
567 }
568
569 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
570 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
571                               pmd_t *pmdp, pmd_t pmd)
572 {
573         if (sizeof(pmdval_t) > sizeof(long))
574                 /* 5 arg words */
575                 pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd);
576         else
577                 PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp,
578                             native_pmd_val(pmd));
579 }
580 #endif
581
582 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
583 {
584         pmdval_t val = native_pmd_val(pmd);
585
586         if (sizeof(pmdval_t) > sizeof(long))
587                 PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
588         else
589                 PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
590 }
591
592 #if PAGETABLE_LEVELS >= 3
593 static inline pmd_t __pmd(pmdval_t val)
594 {
595         pmdval_t ret;
596
597         if (sizeof(pmdval_t) > sizeof(long))
598                 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
599                                    val, (u64)val >> 32);
600         else
601                 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
602                                    val);
603
604         return (pmd_t) { ret };
605 }
606
607 static inline pmdval_t pmd_val(pmd_t pmd)
608 {
609         pmdval_t ret;
610
611         if (sizeof(pmdval_t) > sizeof(long))
612                 ret =  PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
613                                     pmd.pmd, (u64)pmd.pmd >> 32);
614         else
615                 ret =  PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
616                                     pmd.pmd);
617
618         return ret;
619 }
620
621 static inline void set_pud(pud_t *pudp, pud_t pud)
622 {
623         pudval_t val = native_pud_val(pud);
624
625         if (sizeof(pudval_t) > sizeof(long))
626                 PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
627                             val, (u64)val >> 32);
628         else
629                 PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
630                             val);
631 }
632 #if PAGETABLE_LEVELS == 4
633 static inline pud_t __pud(pudval_t val)
634 {
635         pudval_t ret;
636
637         if (sizeof(pudval_t) > sizeof(long))
638                 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
639                                    val, (u64)val >> 32);
640         else
641                 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
642                                    val);
643
644         return (pud_t) { ret };
645 }
646
647 static inline pudval_t pud_val(pud_t pud)
648 {
649         pudval_t ret;
650
651         if (sizeof(pudval_t) > sizeof(long))
652                 ret =  PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
653                                     pud.pud, (u64)pud.pud >> 32);
654         else
655                 ret =  PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
656                                     pud.pud);
657
658         return ret;
659 }
660
661 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
662 {
663         pgdval_t val = native_pgd_val(pgd);
664
665         if (sizeof(pgdval_t) > sizeof(long))
666                 PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
667                             val, (u64)val >> 32);
668         else
669                 PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
670                             val);
671 }
672
673 static inline void pgd_clear(pgd_t *pgdp)
674 {
675         set_pgd(pgdp, __pgd(0));
676 }
677
678 static inline void pud_clear(pud_t *pudp)
679 {
680         set_pud(pudp, __pud(0));
681 }
682
683 #endif  /* PAGETABLE_LEVELS == 4 */
684
685 #endif  /* PAGETABLE_LEVELS >= 3 */
686
687 #ifdef CONFIG_X86_PAE
688 /* Special-case pte-setting operations for PAE, which can't update a
689    64-bit pte atomically */
690 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
691 {
692         PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
693                     pte.pte, pte.pte >> 32);
694 }
695
696 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
697                              pte_t *ptep)
698 {
699         PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
700 }
701
702 static inline void pmd_clear(pmd_t *pmdp)
703 {
704         PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
705 }
706 #else  /* !CONFIG_X86_PAE */
707 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
708 {
709         set_pte(ptep, pte);
710 }
711
712 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
713                              pte_t *ptep)
714 {
715         set_pte_at(mm, addr, ptep, __pte(0));
716 }
717
718 static inline void pmd_clear(pmd_t *pmdp)
719 {
720         set_pmd(pmdp, __pmd(0));
721 }
722 #endif  /* CONFIG_X86_PAE */
723
724 #define  __HAVE_ARCH_START_CONTEXT_SWITCH
725 static inline void arch_start_context_switch(struct task_struct *prev)
726 {
727         PVOP_VCALL1(pv_cpu_ops.start_context_switch, prev);
728 }
729
730 static inline void arch_end_context_switch(struct task_struct *next)
731 {
732         PVOP_VCALL1(pv_cpu_ops.end_context_switch, next);
733 }
734
735 #define  __HAVE_ARCH_ENTER_LAZY_MMU_MODE
736 static inline void arch_enter_lazy_mmu_mode(void)
737 {
738         PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
739 }
740
741 static inline void arch_leave_lazy_mmu_mode(void)
742 {
743         PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
744 }
745
746 void arch_flush_lazy_mmu_mode(void);
747
748 static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
749                                 phys_addr_t phys, pgprot_t flags)
750 {
751         pv_mmu_ops.set_fixmap(idx, phys, flags);
752 }
753
754 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
755
756 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
757 {
758         return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
759 }
760
761 static inline int arch_spin_is_contended(struct arch_spinlock *lock)
762 {
763         return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
764 }
765 #define arch_spin_is_contended  arch_spin_is_contended
766
767 static __always_inline void arch_spin_lock(struct arch_spinlock *lock)
768 {
769         PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
770 }
771
772 static __always_inline void arch_spin_lock_flags(struct arch_spinlock *lock,
773                                                   unsigned long flags)
774 {
775         PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
776 }
777
778 static __always_inline int arch_spin_trylock(struct arch_spinlock *lock)
779 {
780         return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
781 }
782
783 static __always_inline void arch_spin_unlock(struct arch_spinlock *lock)
784 {
785         PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
786 }
787
788 #endif
789
790 #ifdef CONFIG_X86_32
791 #define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
792 #define PV_RESTORE_REGS "popl %edx; popl %ecx;"
793
794 /* save and restore all caller-save registers, except return value */
795 #define PV_SAVE_ALL_CALLER_REGS         "pushl %ecx;"
796 #define PV_RESTORE_ALL_CALLER_REGS      "popl  %ecx;"
797
798 #define PV_FLAGS_ARG "0"
799 #define PV_EXTRA_CLOBBERS
800 #define PV_VEXTRA_CLOBBERS
801 #else
802 /* save and restore all caller-save registers, except return value */
803 #define PV_SAVE_ALL_CALLER_REGS                                         \
804         "push %rcx;"                                                    \
805         "push %rdx;"                                                    \
806         "push %rsi;"                                                    \
807         "push %rdi;"                                                    \
808         "push %r8;"                                                     \
809         "push %r9;"                                                     \
810         "push %r10;"                                                    \
811         "push %r11;"
812 #define PV_RESTORE_ALL_CALLER_REGS                                      \
813         "pop %r11;"                                                     \
814         "pop %r10;"                                                     \
815         "pop %r9;"                                                      \
816         "pop %r8;"                                                      \
817         "pop %rdi;"                                                     \
818         "pop %rsi;"                                                     \
819         "pop %rdx;"                                                     \
820         "pop %rcx;"
821
822 /* We save some registers, but all of them, that's too much. We clobber all
823  * caller saved registers but the argument parameter */
824 #define PV_SAVE_REGS "pushq %%rdi;"
825 #define PV_RESTORE_REGS "popq %%rdi;"
826 #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
827 #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
828 #define PV_FLAGS_ARG "D"
829 #endif
830
831 /*
832  * Generate a thunk around a function which saves all caller-save
833  * registers except for the return value.  This allows C functions to
834  * be called from assembler code where fewer than normal registers are
835  * available.  It may also help code generation around calls from C
836  * code if the common case doesn't use many registers.
837  *
838  * When a callee is wrapped in a thunk, the caller can assume that all
839  * arg regs and all scratch registers are preserved across the
840  * call. The return value in rax/eax will not be saved, even for void
841  * functions.
842  */
843 #define PV_CALLEE_SAVE_REGS_THUNK(func)                                 \
844         extern typeof(func) __raw_callee_save_##func;                   \
845         static void *__##func##__ __used = func;                        \
846                                                                         \
847         asm(".pushsection .text;"                                       \
848             "__raw_callee_save_" #func ": "                             \
849             PV_SAVE_ALL_CALLER_REGS                                     \
850             "call " #func ";"                                           \
851             PV_RESTORE_ALL_CALLER_REGS                                  \
852             "ret;"                                                      \
853             ".popsection")
854
855 /* Get a reference to a callee-save function */
856 #define PV_CALLEE_SAVE(func)                                            \
857         ((struct paravirt_callee_save) { __raw_callee_save_##func })
858
859 /* Promise that "func" already uses the right calling convention */
860 #define __PV_IS_CALLEE_SAVE(func)                       \
861         ((struct paravirt_callee_save) { func })
862
863 static inline notrace unsigned long arch_local_save_flags(void)
864 {
865         return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
866 }
867
868 static inline notrace void arch_local_irq_restore(unsigned long f)
869 {
870         PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
871 }
872
873 static inline notrace void arch_local_irq_disable(void)
874 {
875         PVOP_VCALLEE0(pv_irq_ops.irq_disable);
876 }
877
878 static inline notrace void arch_local_irq_enable(void)
879 {
880         PVOP_VCALLEE0(pv_irq_ops.irq_enable);
881 }
882
883 static inline notrace unsigned long arch_local_irq_save(void)
884 {
885         unsigned long f;
886
887         f = arch_local_save_flags();
888         arch_local_irq_disable();
889         return f;
890 }
891
892
893 /* Make sure as little as possible of this mess escapes. */
894 #undef PARAVIRT_CALL
895 #undef __PVOP_CALL
896 #undef __PVOP_VCALL
897 #undef PVOP_VCALL0
898 #undef PVOP_CALL0
899 #undef PVOP_VCALL1
900 #undef PVOP_CALL1
901 #undef PVOP_VCALL2
902 #undef PVOP_CALL2
903 #undef PVOP_VCALL3
904 #undef PVOP_CALL3
905 #undef PVOP_VCALL4
906 #undef PVOP_CALL4
907
908 extern void default_banner(void);
909
910 #else  /* __ASSEMBLY__ */
911
912 #define _PVSITE(ptype, clobbers, ops, word, algn)       \
913 771:;                                           \
914         ops;                                    \
915 772:;                                           \
916         .pushsection .parainstructions,"a";     \
917          .align algn;                           \
918          word 771b;                             \
919          .byte ptype;                           \
920          .byte 772b-771b;                       \
921          .short clobbers;                       \
922         .popsection
923
924
925 #define COND_PUSH(set, mask, reg)                       \
926         .if ((~(set)) & mask); push %reg; .endif
927 #define COND_POP(set, mask, reg)                        \
928         .if ((~(set)) & mask); pop %reg; .endif
929
930 #ifdef CONFIG_X86_64
931
932 #define PV_SAVE_REGS(set)                       \
933         COND_PUSH(set, CLBR_RAX, rax);          \
934         COND_PUSH(set, CLBR_RCX, rcx);          \
935         COND_PUSH(set, CLBR_RDX, rdx);          \
936         COND_PUSH(set, CLBR_RSI, rsi);          \
937         COND_PUSH(set, CLBR_RDI, rdi);          \
938         COND_PUSH(set, CLBR_R8, r8);            \
939         COND_PUSH(set, CLBR_R9, r9);            \
940         COND_PUSH(set, CLBR_R10, r10);          \
941         COND_PUSH(set, CLBR_R11, r11)
942 #define PV_RESTORE_REGS(set)                    \
943         COND_POP(set, CLBR_R11, r11);           \
944         COND_POP(set, CLBR_R10, r10);           \
945         COND_POP(set, CLBR_R9, r9);             \
946         COND_POP(set, CLBR_R8, r8);             \
947         COND_POP(set, CLBR_RDI, rdi);           \
948         COND_POP(set, CLBR_RSI, rsi);           \
949         COND_POP(set, CLBR_RDX, rdx);           \
950         COND_POP(set, CLBR_RCX, rcx);           \
951         COND_POP(set, CLBR_RAX, rax)
952
953 #define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 8)
954 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
955 #define PARA_INDIRECT(addr)     *addr(%rip)
956 #else
957 #define PV_SAVE_REGS(set)                       \
958         COND_PUSH(set, CLBR_EAX, eax);          \
959         COND_PUSH(set, CLBR_EDI, edi);          \
960         COND_PUSH(set, CLBR_ECX, ecx);          \
961         COND_PUSH(set, CLBR_EDX, edx)
962 #define PV_RESTORE_REGS(set)                    \
963         COND_POP(set, CLBR_EDX, edx);           \
964         COND_POP(set, CLBR_ECX, ecx);           \
965         COND_POP(set, CLBR_EDI, edi);           \
966         COND_POP(set, CLBR_EAX, eax)
967
968 #define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 4)
969 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
970 #define PARA_INDIRECT(addr)     *%cs:addr
971 #endif
972
973 #define INTERRUPT_RETURN                                                \
974         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE,       \
975                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
976
977 #define DISABLE_INTERRUPTS(clobbers)                                    \
978         PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
979                   PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);            \
980                   call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable);    \
981                   PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
982
983 #define ENABLE_INTERRUPTS(clobbers)                                     \
984         PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers,  \
985                   PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);            \
986                   call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable);     \
987                   PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
988
989 #define USERGS_SYSRET32                                                 \
990         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32),       \
991                   CLBR_NONE,                                            \
992                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
993
994 #ifdef CONFIG_X86_32
995 #define GET_CR0_INTO_EAX                                \
996         push %ecx; push %edx;                           \
997         call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
998         pop %edx; pop %ecx
999
1000 #define ENABLE_INTERRUPTS_SYSEXIT                                       \
1001         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit),    \
1002                   CLBR_NONE,                                            \
1003                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1004
1005
1006 #else   /* !CONFIG_X86_32 */
1007
1008 /*
1009  * If swapgs is used while the userspace stack is still current,
1010  * there's no way to call a pvop.  The PV replacement *must* be
1011  * inlined, or the swapgs instruction must be trapped and emulated.
1012  */
1013 #define SWAPGS_UNSAFE_STACK                                             \
1014         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,     \
1015                   swapgs)
1016
1017 /*
1018  * Note: swapgs is very special, and in practise is either going to be
1019  * implemented with a single "swapgs" instruction or something very
1020  * special.  Either way, we don't need to save any registers for
1021  * it.
1022  */
1023 #define SWAPGS                                                          \
1024         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,     \
1025                   call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs)          \
1026                  )
1027
1028 #define GET_CR2_INTO_RAX                                \
1029         call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2)
1030
1031 #define PARAVIRT_ADJUST_EXCEPTION_FRAME                                 \
1032         PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
1033                   CLBR_NONE,                                            \
1034                   call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
1035
1036 #define USERGS_SYSRET64                                                 \
1037         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64),       \
1038                   CLBR_NONE,                                            \
1039                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
1040
1041 #define ENABLE_INTERRUPTS_SYSEXIT32                                     \
1042         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit),    \
1043                   CLBR_NONE,                                            \
1044                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1045 #endif  /* CONFIG_X86_32 */
1046
1047 #endif /* __ASSEMBLY__ */
1048 #else  /* CONFIG_PARAVIRT */
1049 # define default_banner x86_init_noop
1050 #endif /* !CONFIG_PARAVIRT */
1051 #endif /* _ASM_X86_PARAVIRT_H */