2 * Machine specific setup for xen
4 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
7 #include <linux/module.h>
8 #include <linux/sched.h>
11 #include <linux/memblock.h>
12 #include <linux/cpuidle.h>
13 #include <linux/cpufreq.h>
18 #include <asm/setup.h>
21 #include <asm/xen/hypervisor.h>
22 #include <asm/xen/hypercall.h>
26 #include <xen/interface/callback.h>
27 #include <xen/interface/memory.h>
28 #include <xen/interface/physdev.h>
29 #include <xen/features.h>
35 /* These are code, but not functions. Defined in entry.S */
36 extern const char xen_hypervisor_callback[];
37 extern const char xen_failsafe_callback[];
39 extern asmlinkage void nmi(void);
41 extern void xen_sysenter_target(void);
42 extern void xen_syscall_target(void);
43 extern void xen_syscall32_target(void);
45 /* Amount of extra memory space we add to the e820 ranges */
46 struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
48 /* Number of pages released from the initial allocation. */
49 unsigned long xen_released_pages;
52 * Buffer used to remap identity mapped pages. We only need the virtual space.
53 * The physical page behind this address is remapped as needed to different
56 #define REMAP_SIZE (P2M_PER_PAGE - 3)
58 unsigned long next_area_mfn;
59 unsigned long target_pfn;
61 unsigned long mfns[REMAP_SIZE];
62 } xen_remap_buf __initdata __aligned(PAGE_SIZE);
63 static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
66 * The maximum amount of extra memory compared to the base size. The
67 * main scaling factor is the size of struct page. At extreme ratios
68 * of base:extra, all the base memory can be filled with page
69 * structures for the extra memory, leaving no space for anything
72 * 10x seems like a reasonable balance between scaling flexibility and
73 * leaving a practically usable system.
75 #define EXTRA_MEM_RATIO (10)
77 static void __init xen_add_extra_mem(u64 start, u64 size)
82 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
84 if (xen_extra_mem[i].size == 0) {
85 xen_extra_mem[i].start = start;
86 xen_extra_mem[i].size = size;
89 /* Append to existing region. */
90 if (xen_extra_mem[i].start + xen_extra_mem[i].size == start) {
91 xen_extra_mem[i].size += size;
95 if (i == XEN_EXTRA_MEM_MAX_REGIONS)
96 printk(KERN_WARNING "Warning: not enough extra memory regions\n");
98 memblock_reserve(start, size);
100 xen_max_p2m_pfn = PFN_DOWN(start + size);
101 for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) {
102 unsigned long mfn = pfn_to_mfn(pfn);
104 if (WARN_ONCE(mfn == pfn, "Trying to over-write 1-1 mapping (pfn: %lx)\n", pfn))
106 WARN_ONCE(mfn != INVALID_P2M_ENTRY, "Trying to remove %lx which has %lx mfn!\n",
109 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
114 * Finds the next RAM pfn available in the E820 map after min_pfn.
115 * This function updates min_pfn with the pfn found and returns
116 * the size of that range or zero if not found.
118 static unsigned long __init xen_find_pfn_range(
119 const struct e820entry *list, size_t map_size,
120 unsigned long *min_pfn)
122 const struct e820entry *entry;
124 unsigned long done = 0;
126 for (i = 0, entry = list; i < map_size; i++, entry++) {
130 if (entry->type != E820_RAM)
133 e_pfn = PFN_DOWN(entry->addr + entry->size);
135 /* We only care about E820 after this */
136 if (e_pfn < *min_pfn)
139 s_pfn = PFN_UP(entry->addr);
141 /* If min_pfn falls within the E820 entry, we want to start
142 * at the min_pfn PFN.
144 if (s_pfn <= *min_pfn) {
145 done = e_pfn - *min_pfn;
147 done = e_pfn - s_pfn;
156 static int __init xen_free_mfn(unsigned long mfn)
158 struct xen_memory_reservation reservation = {
164 set_xen_guest_handle(reservation.extent_start, &mfn);
165 reservation.nr_extents = 1;
167 return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
171 * This releases a chunk of memory and then does the identity map. It's used
172 * as a fallback if the remapping fails.
174 static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
175 unsigned long end_pfn, unsigned long nr_pages, unsigned long *identity,
176 unsigned long *released)
178 unsigned long len = 0;
179 unsigned long pfn, end;
182 WARN_ON(start_pfn > end_pfn);
184 end = min(end_pfn, nr_pages);
185 for (pfn = start_pfn; pfn < end; pfn++) {
186 unsigned long mfn = pfn_to_mfn(pfn);
188 /* Make sure pfn exists to start with */
189 if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
192 ret = xen_free_mfn(mfn);
193 WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
196 if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
203 /* Need to release pages first */
205 *identity += set_phys_range_identity(start_pfn, end_pfn);
209 * Helper function to update the p2m and m2p tables and kernel mapping.
211 static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
213 struct mmu_update update = {
214 .ptr = ((unsigned long long)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
219 if (!set_phys_to_machine(pfn, mfn)) {
220 WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n",
226 if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) {
227 WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n",
232 /* Update kernel mapping, but not for highmem. */
233 if ((pfn << PAGE_SHIFT) >= __pa(high_memory))
236 if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
237 mfn_pte(mfn, PAGE_KERNEL), 0)) {
238 WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld\n",
245 * This function updates the p2m and m2p tables with an identity map from
246 * start_pfn to start_pfn+size and prepares remapping the underlying RAM of the
247 * original allocation at remap_pfn. The information needed for remapping is
248 * saved in the memory itself to avoid the need for allocating buffers. The
249 * complete remap information is contained in a list of MFNs each containing
250 * up to REMAP_SIZE MFNs and the start target PFN for doing the remap.
251 * This enables us to preserve the original mfn sequence while doing the
252 * remapping at a time when the memory management is capable of allocating
253 * virtual and physical memory in arbitrary amounts, see 'xen_remap_memory' and
256 static void __init xen_do_set_identity_and_remap_chunk(
257 unsigned long start_pfn, unsigned long size, unsigned long remap_pfn)
259 unsigned long buf = (unsigned long)&xen_remap_buf;
260 unsigned long mfn_save, mfn;
261 unsigned long ident_pfn_iter, remap_pfn_iter;
262 unsigned long ident_end_pfn = start_pfn + size;
263 unsigned long left = size;
264 unsigned long ident_cnt = 0;
265 unsigned int i, chunk;
269 BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
271 /* Don't use memory until remapped */
272 memblock_reserve(PFN_PHYS(remap_pfn), PFN_PHYS(size));
274 mfn_save = virt_to_mfn(buf);
276 for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn;
277 ident_pfn_iter < ident_end_pfn;
278 ident_pfn_iter += REMAP_SIZE, remap_pfn_iter += REMAP_SIZE) {
279 chunk = (left < REMAP_SIZE) ? left : REMAP_SIZE;
281 /* Map first pfn to xen_remap_buf */
282 mfn = pfn_to_mfn(ident_pfn_iter);
283 set_pte_mfn(buf, mfn, PAGE_KERNEL);
285 /* Save mapping information in page */
286 xen_remap_buf.next_area_mfn = xen_remap_mfn;
287 xen_remap_buf.target_pfn = remap_pfn_iter;
288 xen_remap_buf.size = chunk;
289 for (i = 0; i < chunk; i++)
290 xen_remap_buf.mfns[i] = pfn_to_mfn(ident_pfn_iter + i);
292 /* Put remap buf into list. */
295 /* Set identity map */
296 ident_cnt += set_phys_range_identity(ident_pfn_iter,
297 ident_pfn_iter + chunk);
302 /* Restore old xen_remap_buf mapping */
303 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
307 * This function takes a contiguous pfn range that needs to be identity mapped
310 * 1) Finds a new range of pfns to use to remap based on E820 and remap_pfn.
311 * 2) Calls the do_ function to actually do the mapping/remapping work.
313 * The goal is to not allocate additional memory but to remap the existing
314 * pages. In the case of an error the underlying memory is simply released back
315 * to Xen and not remapped.
317 static unsigned long __init xen_set_identity_and_remap_chunk(
318 const struct e820entry *list, size_t map_size, unsigned long start_pfn,
319 unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn,
320 unsigned long *identity, unsigned long *released)
324 unsigned long n = end_pfn - start_pfn;
327 unsigned long cur_pfn = start_pfn + i;
328 unsigned long left = n - i;
329 unsigned long size = left;
330 unsigned long remap_range_size;
332 /* Do not remap pages beyond the current allocation */
333 if (cur_pfn >= nr_pages) {
334 /* Identity map remaining pages */
335 *identity += set_phys_range_identity(cur_pfn,
339 if (cur_pfn + size > nr_pages)
340 size = nr_pages - cur_pfn;
342 remap_range_size = xen_find_pfn_range(list, map_size,
344 if (!remap_range_size) {
345 pr_warning("Unable to find available pfn range, not remapping identity pages\n");
346 xen_set_identity_and_release_chunk(cur_pfn,
347 cur_pfn + left, nr_pages, identity, released);
350 /* Adjust size to fit in current e820 RAM region */
351 if (size > remap_range_size)
352 size = remap_range_size;
354 xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn);
356 /* Update variables to reflect new mappings. */
363 * If the PFNs are currently mapped, the VA mapping also needs
364 * to be updated to be 1:1.
366 for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
367 (void)HYPERVISOR_update_va_mapping(
368 (unsigned long)__va(pfn << PAGE_SHIFT),
369 mfn_pte(pfn, PAGE_KERNEL_IO), 0);
374 static unsigned long __init xen_set_identity_and_remap(
375 const struct e820entry *list, size_t map_size, unsigned long nr_pages,
376 unsigned long *released)
378 phys_addr_t start = 0;
379 unsigned long identity = 0;
380 unsigned long last_pfn = nr_pages;
381 const struct e820entry *entry;
382 unsigned long num_released = 0;
386 * Combine non-RAM regions and gaps until a RAM region (or the
387 * end of the map) is reached, then set the 1:1 map and
388 * remap the memory in those non-RAM regions.
390 * The combined non-RAM regions are rounded to a whole number
391 * of pages so any partial pages are accessible via the 1:1
392 * mapping. This is needed for some BIOSes that put (for
393 * example) the DMI tables in a reserved region that begins on
394 * a non-page boundary.
396 for (i = 0, entry = list; i < map_size; i++, entry++) {
397 phys_addr_t end = entry->addr + entry->size;
398 if (entry->type == E820_RAM || i == map_size - 1) {
399 unsigned long start_pfn = PFN_DOWN(start);
400 unsigned long end_pfn = PFN_UP(end);
402 if (entry->type == E820_RAM)
403 end_pfn = PFN_UP(entry->addr);
405 if (start_pfn < end_pfn)
406 last_pfn = xen_set_identity_and_remap_chunk(
407 list, map_size, start_pfn,
408 end_pfn, nr_pages, last_pfn,
409 &identity, &num_released);
414 *released = num_released;
416 pr_info("Set %ld page(s) to 1-1 mapping\n", identity);
417 pr_info("Released %ld page(s)\n", num_released);
423 * Remap the memory prepared in xen_do_set_identity_and_remap_chunk().
424 * The remap information (which mfn remap to which pfn) is contained in the
425 * to be remapped memory itself in a linked list anchored at xen_remap_mfn.
426 * This scheme allows to remap the different chunks in arbitrary order while
427 * the resulting mapping will be independant from the order.
429 void __init xen_remap_memory(void)
431 unsigned long buf = (unsigned long)&xen_remap_buf;
432 unsigned long mfn_save, mfn, pfn;
433 unsigned long remapped = 0;
435 unsigned long pfn_s = ~0UL;
436 unsigned long len = 0;
438 mfn_save = virt_to_mfn(buf);
440 while (xen_remap_mfn != INVALID_P2M_ENTRY) {
441 /* Map the remap information */
442 set_pte_mfn(buf, xen_remap_mfn, PAGE_KERNEL);
444 BUG_ON(xen_remap_mfn != xen_remap_buf.mfns[0]);
446 pfn = xen_remap_buf.target_pfn;
447 for (i = 0; i < xen_remap_buf.size; i++) {
448 mfn = xen_remap_buf.mfns[i];
449 xen_update_mem_tables(pfn, mfn);
453 if (pfn_s == ~0UL || pfn == pfn_s) {
454 pfn_s = xen_remap_buf.target_pfn;
455 len += xen_remap_buf.size;
456 } else if (pfn_s + len == xen_remap_buf.target_pfn) {
457 len += xen_remap_buf.size;
459 memblock_free(PFN_PHYS(pfn_s), PFN_PHYS(len));
460 pfn_s = xen_remap_buf.target_pfn;
461 len = xen_remap_buf.size;
465 xen_remap_mfn = xen_remap_buf.next_area_mfn;
468 if (pfn_s != ~0UL && len)
469 memblock_free(PFN_PHYS(pfn_s), PFN_PHYS(len));
471 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
473 pr_info("Remapped %ld page(s)\n", remapped);
476 static unsigned long __init xen_get_max_pages(void)
478 unsigned long max_pages = MAX_DOMAIN_PAGES;
479 domid_t domid = DOMID_SELF;
483 * For the initial domain we use the maximum reservation as
486 * For guest domains the current maximum reservation reflects
487 * the current maximum rather than the static maximum. In this
488 * case the e820 map provided to us will cover the static
491 if (xen_initial_domain()) {
492 ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
497 return min(max_pages, MAX_DOMAIN_PAGES);
500 static void xen_align_and_add_e820_region(u64 start, u64 size, int type)
502 u64 end = start + size;
504 /* Align RAM regions to page boundaries. */
505 if (type == E820_RAM) {
506 start = PAGE_ALIGN(start);
507 end &= ~((u64)PAGE_SIZE - 1);
510 e820_add_region(start, end - start, type);
513 void xen_ignore_unusable(struct e820entry *list, size_t map_size)
515 struct e820entry *entry;
518 for (i = 0, entry = list; i < map_size; i++, entry++) {
519 if (entry->type == E820_UNUSABLE)
520 entry->type = E820_RAM;
525 * machine_specific_memory_setup - Hook for machine specific memory setup.
527 char * __init xen_memory_setup(void)
529 static struct e820entry map[E820MAX] __initdata;
531 unsigned long max_pfn = xen_start_info->nr_pages;
532 unsigned long long mem_end;
534 struct xen_memory_map memmap;
535 unsigned long max_pages;
536 unsigned long last_pfn = 0;
537 unsigned long extra_pages = 0;
541 max_pfn = min(MAX_DOMAIN_PAGES, max_pfn);
542 mem_end = PFN_PHYS(max_pfn);
544 memmap.nr_entries = E820MAX;
545 set_xen_guest_handle(memmap.buffer, map);
547 op = xen_initial_domain() ?
548 XENMEM_machine_memory_map :
550 rc = HYPERVISOR_memory_op(op, &memmap);
552 BUG_ON(xen_initial_domain());
553 memmap.nr_entries = 1;
555 map[0].size = mem_end;
556 /* 8MB slack (to balance backend allocations). */
557 map[0].size += 8ULL << 20;
558 map[0].type = E820_RAM;
562 BUG_ON(memmap.nr_entries == 0);
565 * Xen won't allow a 1:1 mapping to be created to UNUSABLE
566 * regions, so if we're using the machine memory map leave the
567 * region as RAM as it is in the pseudo-physical map.
569 * UNUSABLE regions in domUs are not handled and will need
570 * a patch in the future.
572 if (xen_initial_domain())
573 xen_ignore_unusable(map, memmap.nr_entries);
575 /* Make sure the Xen-supplied memory map is well-ordered. */
576 sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);
578 max_pages = xen_get_max_pages();
579 if (max_pages > max_pfn)
580 extra_pages += max_pages - max_pfn;
583 * Set identity map on non-RAM pages and prepare remapping the
586 last_pfn = xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn,
587 &xen_released_pages);
589 extra_pages += xen_released_pages;
591 if (last_pfn > max_pfn) {
592 max_pfn = min(MAX_DOMAIN_PAGES, last_pfn);
593 mem_end = PFN_PHYS(max_pfn);
596 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
597 * factor the base size. On non-highmem systems, the base
598 * size is the full initial memory allocation; on highmem it
599 * is limited to the max size of lowmem, so that it doesn't
600 * get completely filled.
602 * In principle there could be a problem in lowmem systems if
603 * the initial memory is also very large with respect to
604 * lowmem, but we won't try to deal with that here.
606 extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
609 while (i < memmap.nr_entries) {
610 u64 addr = map[i].addr;
611 u64 size = map[i].size;
612 u32 type = map[i].type;
614 if (type == E820_RAM) {
615 if (addr < mem_end) {
616 size = min(size, mem_end - addr);
617 } else if (extra_pages) {
618 size = min(size, (u64)extra_pages * PAGE_SIZE);
619 extra_pages -= size / PAGE_SIZE;
620 xen_add_extra_mem(addr, size);
622 type = E820_UNUSABLE;
625 xen_align_and_add_e820_region(addr, size, type);
629 if (map[i].size == 0)
634 * Set the rest as identity mapped, in case PCI BARs are
637 * PFNs above MAX_P2M_PFN are considered identity mapped as
640 set_phys_range_identity(map[i-1].addr / PAGE_SIZE, ~0ul);
643 * In domU, the ISA region is normal, usable memory, but we
644 * reserve ISA memory anyway because too many things poke
647 e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
654 * See comment above "struct start_info" in <xen/interface/xen.h>
655 * We tried to make the the memblock_reserve more selective so
656 * that it would be clear what region is reserved. Sadly we ran
657 * in the problem wherein on a 64-bit hypervisor with a 32-bit
658 * initial domain, the pt_base has the cr3 value which is not
659 * neccessarily where the pagetable starts! As Jan put it: "
660 * Actually, the adjustment turns out to be correct: The page
661 * tables for a 32-on-64 dom0 get allocated in the order "first L1",
662 * "first L2", "first L3", so the offset to the page table base is
663 * indeed 2. When reading xen/include/public/xen.h's comment
664 * very strictly, this is not a violation (since there nothing is said
665 * that the first thing in the page table space is pointed to by
666 * pt_base; I admit that this seems to be implied though, namely
667 * do I think that it is implied that the page table space is the
668 * range [pt_base, pt_base + nt_pt_frames), whereas that
669 * range here indeed is [pt_base - 2, pt_base - 2 + nt_pt_frames),
670 * which - without a priori knowledge - the kernel would have
671 * difficulty to figure out)." - so lets just fall back to the
672 * easy way and reserve the whole region.
674 memblock_reserve(__pa(xen_start_info->mfn_list),
675 xen_start_info->pt_base - xen_start_info->mfn_list);
677 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
683 * Machine specific memory setup for auto-translated guests.
685 char * __init xen_auto_xlated_memory_setup(void)
687 static struct e820entry map[E820MAX] __initdata;
689 struct xen_memory_map memmap;
693 memmap.nr_entries = E820MAX;
694 set_xen_guest_handle(memmap.buffer, map);
696 rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
698 panic("No memory map (%d)\n", rc);
700 sanitize_e820_map(map, ARRAY_SIZE(map), &memmap.nr_entries);
702 for (i = 0; i < memmap.nr_entries; i++)
703 e820_add_region(map[i].addr, map[i].size, map[i].type);
705 memblock_reserve(__pa(xen_start_info->mfn_list),
706 xen_start_info->pt_base - xen_start_info->mfn_list);
712 * Set the bit indicating "nosegneg" library variants should be used.
713 * We only need to bother in pure 32-bit mode; compat 32-bit processes
714 * can have un-truncated segments, so wrapping around is allowed.
716 static void __init fiddle_vdso(void)
720 * This could be called before selected_vdso32 is initialized, so
721 * just fiddle with both possible images. vdso_image_32_syscall
722 * can't be selected, since it only exists on 64-bit systems.
725 mask = vdso_image_32_int80.data +
726 vdso_image_32_int80.sym_VDSO32_NOTE_MASK;
727 *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
728 mask = vdso_image_32_sysenter.data +
729 vdso_image_32_sysenter.sym_VDSO32_NOTE_MASK;
730 *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
734 static int register_callback(unsigned type, const void *func)
736 struct callback_register callback = {
738 .address = XEN_CALLBACK(__KERNEL_CS, func),
739 .flags = CALLBACKF_mask_events,
742 return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
745 void xen_enable_sysenter(void)
748 unsigned sysenter_feature;
751 sysenter_feature = X86_FEATURE_SEP;
753 sysenter_feature = X86_FEATURE_SYSENTER32;
756 if (!boot_cpu_has(sysenter_feature))
759 ret = register_callback(CALLBACKTYPE_sysenter, xen_sysenter_target);
761 setup_clear_cpu_cap(sysenter_feature);
764 void xen_enable_syscall(void)
769 ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target);
771 printk(KERN_ERR "Failed to set syscall callback: %d\n", ret);
772 /* Pretty fatal; 64-bit userspace has no other
773 mechanism for syscalls. */
776 if (boot_cpu_has(X86_FEATURE_SYSCALL32)) {
777 ret = register_callback(CALLBACKTYPE_syscall32,
778 xen_syscall32_target);
780 setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
782 #endif /* CONFIG_X86_64 */
785 void __init xen_pvmmu_arch_setup(void)
787 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
788 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
790 HYPERVISOR_vm_assist(VMASST_CMD_enable,
791 VMASST_TYPE_pae_extended_cr3);
793 if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) ||
794 register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
797 xen_enable_sysenter();
798 xen_enable_syscall();
801 /* This function is not called for HVM domains */
802 void __init xen_arch_setup(void)
804 xen_panic_handler_init();
805 if (!xen_feature(XENFEAT_auto_translated_physmap))
806 xen_pvmmu_arch_setup();
809 if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
810 printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
815 memcpy(boot_command_line, xen_start_info->cmd_line,
816 MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
817 COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
819 /* Set up idle, making sure it calls safe_halt() pvop */
822 WARN_ON(xen_set_default_idle());