From: Linus Torvalds Date: Thu, 12 Aug 2010 16:09:41 +0000 (-0700) Subject: Merge branch 'stable/xen-swiotlb-0.8.6' of git://git.kernel.org/pub/scm/linux/kernel... X-Git-Tag: firefly_0821_release~9833^2~808 X-Git-Url: http://plrg.eecs.uci.edu/git/?a=commitdiff_plain;h=26f0cf91813bdc8e61595f8ad6660251e2ee9cf6;p=firefly-linux-kernel-4.4.55.git Merge branch 'stable/xen-swiotlb-0.8.6' of git://git./linux/kernel/git/konrad/xen * 'stable/xen-swiotlb-0.8.6' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen: x86: Detect whether we should use Xen SWIOTLB. pci-swiotlb-xen: Add glue code to setup dma_ops utilizing xen_swiotlb_* functions. swiotlb-xen: SWIOTLB library for Xen PV guest with PCI passthrough. xen/mmu: inhibit vmap aliases rather than trying to clear them out vmap: add flag to allow lazy unmap to be disabled at runtime xen: Add xen_create_contiguous_region xen: Rename the balloon lock xen: Allow unprivileged Xen domains to create iomap pages xen: use _PAGE_IOMAP in ioremap to do machine mappings Fix up trivial conflicts (adding both xen swiotlb and xen pci platform driver setup close to each other) in drivers/xen/{Kconfig,Makefile} and include/xen/xen-ops.h --- 26f0cf91813bdc8e61595f8ad6660251e2ee9cf6 diff --cc arch/x86/xen/mmu.c index 413b19b3d0fe,ef5728dde8f3..42086ac406af --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@@ -56,10 -59,11 +59,12 @@@ #include #include + #include #include #include +#include #include + #include #include #include "multicalls.h" @@@ -1940,42 -2024,206 +2025,240 @@@ void __init xen_init_mmu_ops(void x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start; x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done; pv_mmu_ops = xen_mmu_ops; + + vmap_lazy_unmap = false; + } + + /* Protected by xen_reservation_lock. */ + #define MAX_CONTIG_ORDER 9 /* 2MB */ + static unsigned long discontig_frames[1< MAX_CONTIG_ORDER)) + return -ENOMEM; + + memset((void *) vstart, 0, PAGE_SIZE << order); + + spin_lock_irqsave(&xen_reservation_lock, flags); + + /* 1. Zap current PTEs, remembering MFNs. */ + xen_zap_pfn_range(vstart, order, in_frames, NULL); + + /* 2. Get a new contiguous memory extent. */ + out_frame = virt_to_pfn(vstart); + success = xen_exchange_memory(1UL << order, 0, in_frames, + 1, order, &out_frame, + address_bits); + + /* 3. Map the new extent in place of old pages. */ + if (success) + xen_remap_exchanged_ptes(vstart, order, NULL, out_frame); + else + xen_remap_exchanged_ptes(vstart, order, in_frames, 0); + + spin_unlock_irqrestore(&xen_reservation_lock, flags); + + return success ? 0 : -ENOMEM; + } + EXPORT_SYMBOL_GPL(xen_create_contiguous_region); + + void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order) + { + unsigned long *out_frames = discontig_frames, in_frame; + unsigned long flags; + int success; + + if (xen_feature(XENFEAT_auto_translated_physmap)) + return; + + if (unlikely(order > MAX_CONTIG_ORDER)) + return; + + memset((void *) vstart, 0, PAGE_SIZE << order); + + spin_lock_irqsave(&xen_reservation_lock, flags); + + /* 1. Find start MFN of contiguous extent. */ + in_frame = virt_to_mfn(vstart); + + /* 2. Zap current PTEs. */ + xen_zap_pfn_range(vstart, order, NULL, out_frames); + + /* 3. Do the exchange for non-contiguous MFNs. */ + success = xen_exchange_memory(1, order, &in_frame, 1UL << order, + 0, out_frames, 0); + + /* 4. Map new pages in place of old pages. */ + if (success) + xen_remap_exchanged_ptes(vstart, order, out_frames, 0); + else + xen_remap_exchanged_ptes(vstart, order, NULL, in_frame); + + spin_unlock_irqrestore(&xen_reservation_lock, flags); } + EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region); +#ifdef CONFIG_XEN_PVHVM +static void xen_hvm_exit_mmap(struct mm_struct *mm) +{ + struct xen_hvm_pagetable_dying a; + int rc; + + a.domid = DOMID_SELF; + a.gpa = __pa(mm->pgd); + rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a); + WARN_ON_ONCE(rc < 0); +} + +static int is_pagetable_dying_supported(void) +{ + struct xen_hvm_pagetable_dying a; + int rc = 0; + + a.domid = DOMID_SELF; + a.gpa = 0x00; + rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a); + if (rc < 0) { + printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n"); + return 0; + } + return 1; +} + +void __init xen_hvm_init_mmu_ops(void) +{ + if (is_pagetable_dying_supported()) + pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap; +} +#endif + #ifdef CONFIG_XEN_DEBUG_FS static struct dentry *d_mmu_debug; diff --cc drivers/xen/Kconfig index 0a8826936639,97199c2a64a0..60d71e9abe9f --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@@ -62,13 -62,8 +62,18 @@@ config XEN_SYS_HYPERVISO virtual environment, /sys/hypervisor will still be present, but will have no xen contents. +config XEN_PLATFORM_PCI + tristate "xen platform pci device driver" + depends on XEN_PVHVM + default m + help + Driver for the Xen PCI Platform device: it is responsible for + initializing xenbus and grant_table when running in a Xen HVM + domain. As a consequence this driver is required to run any Xen PV + frontend on Xen HVM. ++ + config SWIOTLB_XEN + def_bool y + depends on SWIOTLB + endmenu diff --cc drivers/xen/Makefile index e392fb776af3,85f84cff8104..fcaf838f54be --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile @@@ -10,4 -10,4 +10,5 @@@ obj-$(CONFIG_XEN_BALLOON) += balloon. obj-$(CONFIG_XEN_DEV_EVTCHN) += evtchn.o obj-$(CONFIG_XENFS) += xenfs/ obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o +obj-$(CONFIG_XEN_PLATFORM_PCI) += platform-pci.o + obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o diff --cc include/xen/xen-ops.h index 46bc81ef74c6,d789c937c48a..351f4051f6d8 --- a/include/xen/xen-ops.h +++ b/include/xen/xen-ops.h @@@ -15,6 -14,10 +15,12 @@@ void xen_mm_unpin_all(void) void xen_timer_resume(void); void xen_arch_resume(void); +int xen_setup_shutdown_event(void); + + extern unsigned long *xen_contiguous_bitmap; + int xen_create_contiguous_region(unsigned long vstart, unsigned int order, + unsigned int address_bits); + + void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order); + #endif /* INCLUDE_XEN_OPS_H */