xen/smp: Move the common CPU init code a bit to prep for PVH patch.
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Mon, 22 Oct 2012 15:35:16 +0000 (11:35 -0400)
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Wed, 20 Feb 2013 02:59:47 +0000 (21:59 -0500)
The PV and PVH code CPU init code share some functionality. The
PVH code ("xen/pvh: Extend vcpu_guest_context, p2m, event, and XenBus")
sets some of these up, but not all. To make it easier to read, this
patch removes the PV specific out of the generic way.

No functional change - just code movement.

Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
[v2: Fixed compile errors noticed by Fengguang Wu build system]
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
arch/x86/xen/smp.c

index 34bc4cee8887b2c2d974c99ca81b78577f719ff9..09ea61d2e02f7f8993c1f08ab77d3d9460bb8447 100644 (file)
@@ -300,8 +300,6 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
        gdt = get_cpu_gdt_table(cpu);
 
        ctxt->flags = VGCF_IN_KERNEL;
-       ctxt->user_regs.ds = __USER_DS;
-       ctxt->user_regs.es = __USER_DS;
        ctxt->user_regs.ss = __KERNEL_DS;
 #ifdef CONFIG_X86_32
        ctxt->user_regs.fs = __KERNEL_PERCPU;
@@ -310,35 +308,41 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
        ctxt->gs_base_kernel = per_cpu_offset(cpu);
 #endif
        ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
-       ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
 
        memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
 
-       xen_copy_trap_info(ctxt->trap_ctxt);
+       {
+               ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
+               ctxt->user_regs.ds = __USER_DS;
+               ctxt->user_regs.es = __USER_DS;
 
-       ctxt->ldt_ents = 0;
+               xen_copy_trap_info(ctxt->trap_ctxt);
 
-       BUG_ON((unsigned long)gdt & ~PAGE_MASK);
+               ctxt->ldt_ents = 0;
 
-       gdt_mfn = arbitrary_virt_to_mfn(gdt);
-       make_lowmem_page_readonly(gdt);
-       make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));
+               BUG_ON((unsigned long)gdt & ~PAGE_MASK);
 
-       ctxt->gdt_frames[0] = gdt_mfn;
-       ctxt->gdt_ents      = GDT_ENTRIES;
+               gdt_mfn = arbitrary_virt_to_mfn(gdt);
+               make_lowmem_page_readonly(gdt);
+               make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));
 
-       ctxt->user_regs.cs = __KERNEL_CS;
-       ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
+               ctxt->gdt_frames[0] = gdt_mfn;
+               ctxt->gdt_ents      = GDT_ENTRIES;
 
-       ctxt->kernel_ss = __KERNEL_DS;
-       ctxt->kernel_sp = idle->thread.sp0;
+               ctxt->kernel_ss = __KERNEL_DS;
+               ctxt->kernel_sp = idle->thread.sp0;
 
 #ifdef CONFIG_X86_32
-       ctxt->event_callback_cs     = __KERNEL_CS;
-       ctxt->failsafe_callback_cs  = __KERNEL_CS;
+               ctxt->event_callback_cs     = __KERNEL_CS;
+               ctxt->failsafe_callback_cs  = __KERNEL_CS;
 #endif
-       ctxt->event_callback_eip    = (unsigned long)xen_hypervisor_callback;
-       ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback;
+               ctxt->event_callback_eip    =
+                                       (unsigned long)xen_hypervisor_callback;
+               ctxt->failsafe_callback_eip =
+                                       (unsigned long)xen_failsafe_callback;
+       }
+       ctxt->user_regs.cs = __KERNEL_CS;
+       ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
 
        per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
        ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));