2 * arch/s390/kernel/smp.c
4 * Copyright IBM Corp. 1999, 2009
5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 * Heiko Carstens (heiko.carstens@de.ibm.com)
9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar
13 * We work with logical cpu numbering everywhere we can. The only
14 * functions using the real cpu address (got from STAP) are the sigp
15 * functions. For all other functions we use the identity mapping.
16 * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is
17 * used e.g. to find the idle task belonging to a logical cpu. Every array
18 * in the kernel is sorted by the logical cpu number and not by the physical
19 * one which is causing all the confusion with __cpu_logical_map and
20 * cpu_number_map in other architectures.
23 #define KMSG_COMPONENT "cpu"
24 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
26 #include <linux/workqueue.h>
27 #include <linux/module.h>
28 #include <linux/init.h>
30 #include <linux/err.h>
31 #include <linux/spinlock.h>
32 #include <linux/kernel_stat.h>
33 #include <linux/delay.h>
34 #include <linux/cache.h>
35 #include <linux/interrupt.h>
36 #include <linux/irqflags.h>
37 #include <linux/cpu.h>
38 #include <linux/timex.h>
39 #include <linux/bootmem.h>
40 #include <linux/slab.h>
41 #include <linux/crash_dump.h>
42 #include <asm/asm-offsets.h>
44 #include <asm/setup.h>
46 #include <asm/pgalloc.h>
48 #include <asm/cpcmd.h>
49 #include <asm/tlbflush.h>
50 #include <asm/timer.h>
51 #include <asm/lowcore.h>
53 #include <asm/cputime.h>
58 /* logical cpu to cpu address */
59 unsigned short __cpu_logical_map[NR_CPUS];
61 static struct task_struct *current_set[NR_CPUS];
63 static u8 smp_cpu_type;
64 static int smp_use_sigp_detection;
71 DEFINE_MUTEX(smp_cpu_state_mutex);
72 static int smp_cpu_state[NR_CPUS];
74 static DEFINE_PER_CPU(struct cpu, cpu_devices);
76 static void smp_ext_bitcall(int, int);
78 static int raw_cpu_stopped(int cpu)
82 switch (raw_sigp_ps(&status, 0, cpu, sigp_sense)) {
83 case sigp_status_stored:
84 /* Check for stopped and check stop state */
94 static inline int cpu_stopped(int cpu)
96 return raw_cpu_stopped(cpu_logical_map(cpu));
100 * Ensure that PSW restart is done on an online CPU
102 void smp_restart_with_online_cpu(void)
106 for_each_online_cpu(cpu) {
107 if (stap() == __cpu_logical_map[cpu]) {
108 /* We are online: Enable DAT again and return */
109 __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
113 /* We are not online: Do PSW restart on an online CPU */
114 while (sigp(cpu, sigp_restart) == sigp_busy)
116 /* And stop ourself */
117 while (raw_sigp(stap(), sigp_stop) == sigp_busy)
122 void smp_switch_to_ipl_cpu(void (*func)(void *), void *data)
124 struct _lowcore *lc, *current_lc;
125 struct stack_frame *sf;
126 struct pt_regs *regs;
129 if (smp_processor_id() == 0)
131 __load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE |
132 PSW_MASK_EA | PSW_MASK_BA);
133 /* Disable lowcore protection */
134 __ctl_clear_bit(0, 28);
135 current_lc = lowcore_ptr[smp_processor_id()];
139 lc->restart_psw.mask =
140 PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA;
141 lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) smp_restart_cpu;
143 smp_switch_to_cpu(func, data, 0, stap(), __cpu_logical_map[0]);
144 while (sigp(0, sigp_stop_and_store_status) == sigp_busy)
146 sp = lc->panic_stack;
147 sp -= sizeof(struct pt_regs);
148 regs = (struct pt_regs *) sp;
149 memcpy(®s->gprs, ¤t_lc->gpregs_save_area, sizeof(regs->gprs));
150 regs->psw = current_lc->psw_save_area;
151 sp -= STACK_FRAME_OVERHEAD;
152 sf = (struct stack_frame *) sp;
154 smp_switch_to_cpu(func, data, sp, stap(), __cpu_logical_map[0]);
157 static void smp_stop_cpu(void)
159 while (sigp(smp_processor_id(), sigp_stop) == sigp_busy)
163 void smp_send_stop(void)
169 /* Disable all interrupts/machine checks */
170 __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
171 trace_hardirqs_off();
173 cpumask_copy(&cpumask, cpu_online_mask);
174 cpumask_clear_cpu(smp_processor_id(), &cpumask);
176 if (oops_in_progress) {
178 * Give the other cpus the opportunity to complete
179 * outstanding interrupts before stopping them.
181 end = get_clock() + (1000000UL << 12);
182 for_each_cpu(cpu, &cpumask) {
183 set_bit(ec_stop_cpu, (unsigned long *)
184 &lowcore_ptr[cpu]->ext_call_fast);
185 while (sigp(cpu, sigp_emergency_signal) == sigp_busy &&
189 while (get_clock() < end) {
190 for_each_cpu(cpu, &cpumask)
191 if (cpu_stopped(cpu))
192 cpumask_clear_cpu(cpu, &cpumask);
193 if (cpumask_empty(&cpumask))
199 /* stop all processors */
200 for_each_cpu(cpu, &cpumask) {
201 while (sigp(cpu, sigp_stop) == sigp_busy)
203 while (!cpu_stopped(cpu))
209 * This is the main routine where commands issued by other
213 static void do_ext_call_interrupt(unsigned int ext_int_code,
214 unsigned int param32, unsigned long param64)
218 if ((ext_int_code & 0xffff) == 0x1202)
219 kstat_cpu(smp_processor_id()).irqs[EXTINT_EXC]++;
221 kstat_cpu(smp_processor_id()).irqs[EXTINT_EMS]++;
223 * handle bit signal external calls
225 bits = xchg(&S390_lowcore.ext_call_fast, 0);
227 if (test_bit(ec_stop_cpu, &bits))
230 if (test_bit(ec_schedule, &bits))
233 if (test_bit(ec_call_function, &bits))
234 generic_smp_call_function_interrupt();
236 if (test_bit(ec_call_function_single, &bits))
237 generic_smp_call_function_single_interrupt();
242 * Send an external call sigp to another cpu and return without waiting
243 * for its completion.
245 static void smp_ext_bitcall(int cpu, int sig)
250 * Set signaling bit in lowcore of target cpu and kick it
252 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
254 order = smp_vcpu_scheduled(cpu) ?
255 sigp_external_call : sigp_emergency_signal;
256 if (sigp(cpu, order) != sigp_busy)
262 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
266 for_each_cpu(cpu, mask)
267 smp_ext_bitcall(cpu, ec_call_function);
270 void arch_send_call_function_single_ipi(int cpu)
272 smp_ext_bitcall(cpu, ec_call_function_single);
277 * this function sends a 'purge tlb' signal to another CPU.
279 static void smp_ptlb_callback(void *info)
284 void smp_ptlb_all(void)
286 on_each_cpu(smp_ptlb_callback, NULL, 1);
288 EXPORT_SYMBOL(smp_ptlb_all);
289 #endif /* ! CONFIG_64BIT */
292 * this function sends a 'reschedule' IPI to another CPU.
293 * it goes straight through and wastes no time serializing
294 * anything. Worst case is that we lose a reschedule ...
296 void smp_send_reschedule(int cpu)
298 smp_ext_bitcall(cpu, ec_schedule);
302 * parameter area for the set/clear control bit callbacks
304 struct ec_creg_mask_parms {
305 unsigned long orvals[16];
306 unsigned long andvals[16];
310 * callback for setting/clearing control bits
312 static void smp_ctl_bit_callback(void *info)
314 struct ec_creg_mask_parms *pp = info;
315 unsigned long cregs[16];
318 __ctl_store(cregs, 0, 15);
319 for (i = 0; i <= 15; i++)
320 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
321 __ctl_load(cregs, 0, 15);
325 * Set a bit in a control register of all cpus
327 void smp_ctl_set_bit(int cr, int bit)
329 struct ec_creg_mask_parms parms;
331 memset(&parms.orvals, 0, sizeof(parms.orvals));
332 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
333 parms.orvals[cr] = 1UL << bit;
334 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
336 EXPORT_SYMBOL(smp_ctl_set_bit);
339 * Clear a bit in a control register of all cpus
341 void smp_ctl_clear_bit(int cr, int bit)
343 struct ec_creg_mask_parms parms;
345 memset(&parms.orvals, 0, sizeof(parms.orvals));
346 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
347 parms.andvals[cr] = ~(1UL << bit);
348 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
350 EXPORT_SYMBOL(smp_ctl_clear_bit);
352 #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_CRASH_DUMP)
354 static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
356 if (ipl_info.type != IPL_TYPE_FCP_DUMP && !OLDMEM_BASE)
358 if (is_kdump_kernel())
360 if (cpu >= NR_CPUS) {
361 pr_warning("CPU %i exceeds the maximum %i and is excluded from "
362 "the dump\n", cpu, NR_CPUS - 1);
365 zfcpdump_save_areas[cpu] = kmalloc(sizeof(struct save_area), GFP_KERNEL);
366 while (raw_sigp(phy_cpu, sigp_stop_and_store_status) == sigp_busy)
368 memcpy_real(zfcpdump_save_areas[cpu],
369 (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE,
370 sizeof(struct save_area));
373 struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
374 EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
378 static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { }
380 #endif /* CONFIG_ZFCPDUMP */
382 static int cpu_known(int cpu_id)
386 for_each_present_cpu(cpu) {
387 if (__cpu_logical_map[cpu] == cpu_id)
393 static int smp_rescan_cpus_sigp(cpumask_t avail)
395 int cpu_id, logical_cpu;
397 logical_cpu = cpumask_first(&avail);
398 if (logical_cpu >= nr_cpu_ids)
400 for (cpu_id = 0; cpu_id <= MAX_CPU_ADDRESS; cpu_id++) {
401 if (cpu_known(cpu_id))
403 __cpu_logical_map[logical_cpu] = cpu_id;
404 cpu_set_polarization(logical_cpu, POLARIZATION_UNKNOWN);
405 if (!cpu_stopped(logical_cpu))
407 set_cpu_present(logical_cpu, true);
408 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
409 logical_cpu = cpumask_next(logical_cpu, &avail);
410 if (logical_cpu >= nr_cpu_ids)
416 static int smp_rescan_cpus_sclp(cpumask_t avail)
418 struct sclp_cpu_info *info;
419 int cpu_id, logical_cpu, cpu;
422 logical_cpu = cpumask_first(&avail);
423 if (logical_cpu >= nr_cpu_ids)
425 info = kmalloc(sizeof(*info), GFP_KERNEL);
428 rc = sclp_get_cpu_info(info);
431 for (cpu = 0; cpu < info->combined; cpu++) {
432 if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
434 cpu_id = info->cpu[cpu].address;
435 if (cpu_known(cpu_id))
437 __cpu_logical_map[logical_cpu] = cpu_id;
438 cpu_set_polarization(logical_cpu, POLARIZATION_UNKNOWN);
439 set_cpu_present(logical_cpu, true);
440 if (cpu >= info->configured)
441 smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
443 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
444 logical_cpu = cpumask_next(logical_cpu, &avail);
445 if (logical_cpu >= nr_cpu_ids)
453 static int __smp_rescan_cpus(void)
457 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
458 if (smp_use_sigp_detection)
459 return smp_rescan_cpus_sigp(avail);
461 return smp_rescan_cpus_sclp(avail);
464 static void __init smp_detect_cpus(void)
466 unsigned int cpu, c_cpus, s_cpus;
467 struct sclp_cpu_info *info;
468 u16 boot_cpu_addr, cpu_addr;
472 boot_cpu_addr = __cpu_logical_map[0];
473 info = kmalloc(sizeof(*info), GFP_KERNEL);
475 panic("smp_detect_cpus failed to allocate memory\n");
476 #ifdef CONFIG_CRASH_DUMP
477 if (OLDMEM_BASE && !is_kdump_kernel()) {
478 struct save_area *save_area;
480 save_area = kmalloc(sizeof(*save_area), GFP_KERNEL);
482 panic("could not allocate memory for save area\n");
483 copy_oldmem_page(1, (void *) save_area, sizeof(*save_area),
485 zfcpdump_save_areas[0] = save_area;
488 /* Use sigp detection algorithm if sclp doesn't work. */
489 if (sclp_get_cpu_info(info)) {
490 smp_use_sigp_detection = 1;
491 for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) {
492 if (cpu == boot_cpu_addr)
494 if (!raw_cpu_stopped(cpu))
496 smp_get_save_area(c_cpus, cpu);
502 if (info->has_cpu_type) {
503 for (cpu = 0; cpu < info->combined; cpu++) {
504 if (info->cpu[cpu].address == boot_cpu_addr) {
505 smp_cpu_type = info->cpu[cpu].type;
511 for (cpu = 0; cpu < info->combined; cpu++) {
512 if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
514 cpu_addr = info->cpu[cpu].address;
515 if (cpu_addr == boot_cpu_addr)
517 if (!raw_cpu_stopped(cpu_addr)) {
521 smp_get_save_area(c_cpus, cpu_addr);
526 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
533 * Activate a secondary processor.
535 int __cpuinit start_secondary(void *cpuvoid)
543 notify_cpu_starting(smp_processor_id());
545 set_cpu_online(smp_processor_id(), true);
547 __ctl_clear_bit(0, 28); /* Disable lowcore protection */
548 S390_lowcore.restart_psw.mask =
549 PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA;
550 S390_lowcore.restart_psw.addr =
551 PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler;
552 __ctl_set_bit(0, 28); /* Enable lowcore protection */
554 /* cpu_idle will call schedule for us */
560 struct work_struct work;
561 struct task_struct *idle;
562 struct completion done;
566 static void __cpuinit smp_fork_idle(struct work_struct *work)
568 struct create_idle *c_idle;
570 c_idle = container_of(work, struct create_idle, work);
571 c_idle->idle = fork_idle(c_idle->cpu);
572 complete(&c_idle->done);
575 static int __cpuinit smp_alloc_lowcore(int cpu)
577 unsigned long async_stack, panic_stack;
578 struct _lowcore *lowcore;
580 lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
583 async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
584 panic_stack = __get_free_page(GFP_KERNEL);
585 if (!panic_stack || !async_stack)
587 memcpy(lowcore, &S390_lowcore, 512);
588 memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512);
589 lowcore->async_stack = async_stack + ASYNC_SIZE;
590 lowcore->panic_stack = panic_stack + PAGE_SIZE;
591 lowcore->restart_psw.mask =
592 PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA;
593 lowcore->restart_psw.addr =
594 PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
595 if (user_mode != HOME_SPACE_MODE)
596 lowcore->restart_psw.mask |= PSW_ASC_HOME;
598 if (MACHINE_HAS_IEEE) {
599 unsigned long save_area;
601 save_area = get_zeroed_page(GFP_KERNEL);
604 lowcore->extended_save_area_addr = (u32) save_area;
607 if (vdso_alloc_per_cpu(cpu, lowcore))
610 lowcore_ptr[cpu] = lowcore;
614 free_page(panic_stack);
615 free_pages(async_stack, ASYNC_ORDER);
616 free_pages((unsigned long) lowcore, LC_ORDER);
620 static void smp_free_lowcore(int cpu)
622 struct _lowcore *lowcore;
624 lowcore = lowcore_ptr[cpu];
626 if (MACHINE_HAS_IEEE)
627 free_page((unsigned long) lowcore->extended_save_area_addr);
629 vdso_free_per_cpu(cpu, lowcore);
631 free_page(lowcore->panic_stack - PAGE_SIZE);
632 free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER);
633 free_pages((unsigned long) lowcore, LC_ORDER);
634 lowcore_ptr[cpu] = NULL;
637 /* Upping and downing of CPUs */
638 int __cpuinit __cpu_up(unsigned int cpu)
640 struct _lowcore *cpu_lowcore;
641 struct create_idle c_idle;
642 struct task_struct *idle;
643 struct stack_frame *sf;
647 if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED)
649 idle = current_set[cpu];
651 c_idle.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done);
652 INIT_WORK_ONSTACK(&c_idle.work, smp_fork_idle);
654 schedule_work(&c_idle.work);
655 wait_for_completion(&c_idle.done);
656 if (IS_ERR(c_idle.idle))
657 return PTR_ERR(c_idle.idle);
659 current_set[cpu] = c_idle.idle;
661 init_idle(idle, cpu);
662 if (smp_alloc_lowcore(cpu))
665 ccode = sigp(cpu, sigp_initial_cpu_reset);
666 if (ccode == sigp_busy)
668 if (ccode == sigp_not_operational)
670 } while (ccode == sigp_busy);
672 lowcore = (u32)(unsigned long)lowcore_ptr[cpu];
673 while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy)
676 cpu_lowcore = lowcore_ptr[cpu];
677 cpu_lowcore->kernel_stack = (unsigned long)
678 task_stack_page(idle) + THREAD_SIZE;
679 cpu_lowcore->thread_info = (unsigned long) task_thread_info(idle);
680 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
681 - sizeof(struct pt_regs)
682 - sizeof(struct stack_frame));
683 memset(sf, 0, sizeof(struct stack_frame));
684 sf->gprs[9] = (unsigned long) sf;
685 cpu_lowcore->gpregs_save_area[15] = (unsigned long) sf;
686 __ctl_store(cpu_lowcore->cregs_save_area, 0, 15);
687 atomic_inc(&init_mm.context.attach_count);
690 : : "a" (&cpu_lowcore->access_regs_save_area) : "memory");
691 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
692 cpu_lowcore->current_task = (unsigned long) idle;
693 cpu_lowcore->cpu_nr = cpu;
694 cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce;
695 cpu_lowcore->machine_flags = S390_lowcore.machine_flags;
696 cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func;
697 memcpy(cpu_lowcore->stfle_fac_list, S390_lowcore.stfle_fac_list,
701 while (sigp(cpu, sigp_restart) == sigp_busy)
704 while (!cpu_online(cpu))
709 smp_free_lowcore(cpu);
713 static int __init setup_possible_cpus(char *s)
717 pcpus = simple_strtoul(s, NULL, 0);
718 init_cpu_possible(cpumask_of(0));
719 for (cpu = 1; cpu < pcpus && cpu < nr_cpu_ids; cpu++)
720 set_cpu_possible(cpu, true);
723 early_param("possible_cpus", setup_possible_cpus);
725 #ifdef CONFIG_HOTPLUG_CPU
727 int __cpu_disable(void)
729 struct ec_creg_mask_parms cr_parms;
730 int cpu = smp_processor_id();
732 set_cpu_online(cpu, false);
734 /* Disable pfault pseudo page faults on this cpu. */
737 memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals));
738 memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals));
740 /* disable all external interrupts */
741 cr_parms.orvals[0] = 0;
742 cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 11 |
743 1 << 10 | 1 << 9 | 1 << 6 | 1 << 5 |
745 /* disable all I/O interrupts */
746 cr_parms.orvals[6] = 0;
747 cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 |
748 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24);
749 /* disable most machine checks */
750 cr_parms.orvals[14] = 0;
751 cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 |
754 smp_ctl_bit_callback(&cr_parms);
759 void __cpu_die(unsigned int cpu)
761 /* Wait until target cpu is down */
762 while (!cpu_stopped(cpu))
764 while (sigp_p(0, cpu, sigp_set_prefix) == sigp_busy)
766 smp_free_lowcore(cpu);
767 atomic_dec(&init_mm.context.attach_count);
770 void __noreturn cpu_die(void)
773 while (sigp(smp_processor_id(), sigp_stop) == sigp_busy)
778 #endif /* CONFIG_HOTPLUG_CPU */
780 void __init smp_prepare_cpus(unsigned int max_cpus)
783 unsigned long save_area = 0;
785 unsigned long async_stack, panic_stack;
786 struct _lowcore *lowcore;
790 /* request the 0x1201 emergency signal external interrupt */
791 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
792 panic("Couldn't request external interrupt 0x1201");
793 /* request the 0x1202 external call external interrupt */
794 if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0)
795 panic("Couldn't request external interrupt 0x1202");
797 /* Reallocate current lowcore, but keep its contents. */
798 lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
799 panic_stack = __get_free_page(GFP_KERNEL);
800 async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
801 BUG_ON(!lowcore || !panic_stack || !async_stack);
803 if (MACHINE_HAS_IEEE)
804 save_area = get_zeroed_page(GFP_KERNEL);
807 local_mcck_disable();
808 lowcore_ptr[smp_processor_id()] = lowcore;
809 *lowcore = S390_lowcore;
810 lowcore->panic_stack = panic_stack + PAGE_SIZE;
811 lowcore->async_stack = async_stack + ASYNC_SIZE;
813 if (MACHINE_HAS_IEEE)
814 lowcore->extended_save_area_addr = (u32) save_area;
816 set_prefix((u32)(unsigned long) lowcore);
820 if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore))
825 void __init smp_prepare_boot_cpu(void)
827 BUG_ON(smp_processor_id() != 0);
829 current_thread_info()->cpu = 0;
830 set_cpu_present(0, true);
831 set_cpu_online(0, true);
832 S390_lowcore.percpu_offset = __per_cpu_offset[0];
833 current_set[0] = current;
834 smp_cpu_state[0] = CPU_STATE_CONFIGURED;
835 cpu_set_polarization(0, POLARIZATION_UNKNOWN);
838 void __init smp_cpus_done(unsigned int max_cpus)
842 void __init smp_setup_processor_id(void)
844 S390_lowcore.cpu_nr = 0;
845 __cpu_logical_map[0] = stap();
849 * the frequency of the profiling timer can be changed
850 * by writing a multiplier value into /proc/profile.
852 * usually you want to run this on all CPUs ;)
854 int setup_profiling_timer(unsigned int multiplier)
859 #ifdef CONFIG_HOTPLUG_CPU
860 static ssize_t cpu_configure_show(struct device *dev,
861 struct device_attribute *attr, char *buf)
865 mutex_lock(&smp_cpu_state_mutex);
866 count = sprintf(buf, "%d\n", smp_cpu_state[dev->id]);
867 mutex_unlock(&smp_cpu_state_mutex);
871 static ssize_t cpu_configure_store(struct device *dev,
872 struct device_attribute *attr,
873 const char *buf, size_t count)
879 if (sscanf(buf, "%d %c", &val, &delim) != 1)
881 if (val != 0 && val != 1)
885 mutex_lock(&smp_cpu_state_mutex);
887 /* disallow configuration changes of online cpus and cpu 0 */
888 if (cpu_online(cpu) || cpu == 0)
893 if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) {
894 rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]);
896 smp_cpu_state[cpu] = CPU_STATE_STANDBY;
897 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
898 topology_expect_change();
903 if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) {
904 rc = sclp_cpu_configure(__cpu_logical_map[cpu]);
906 smp_cpu_state[cpu] = CPU_STATE_CONFIGURED;
907 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
908 topology_expect_change();
916 mutex_unlock(&smp_cpu_state_mutex);
918 return rc ? rc : count;
920 static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
921 #endif /* CONFIG_HOTPLUG_CPU */
923 static ssize_t show_cpu_address(struct device *dev,
924 struct device_attribute *attr, char *buf)
926 return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]);
928 static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
930 static struct attribute *cpu_common_attrs[] = {
931 #ifdef CONFIG_HOTPLUG_CPU
932 &dev_attr_configure.attr,
934 &dev_attr_address.attr,
938 static struct attribute_group cpu_common_attr_group = {
939 .attrs = cpu_common_attrs,
942 static ssize_t show_capability(struct device *dev,
943 struct device_attribute *attr, char *buf)
945 unsigned int capability;
948 rc = get_cpu_capability(&capability);
951 return sprintf(buf, "%u\n", capability);
953 static DEVICE_ATTR(capability, 0444, show_capability, NULL);
955 static ssize_t show_idle_count(struct device *dev,
956 struct device_attribute *attr, char *buf)
958 struct s390_idle_data *idle;
959 unsigned long long idle_count;
960 unsigned int sequence;
962 idle = &per_cpu(s390_idle, dev->id);
964 sequence = idle->sequence;
968 idle_count = idle->idle_count;
969 if (idle->idle_enter)
972 if (idle->sequence != sequence)
974 return sprintf(buf, "%llu\n", idle_count);
976 static DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
978 static ssize_t show_idle_time(struct device *dev,
979 struct device_attribute *attr, char *buf)
981 struct s390_idle_data *idle;
982 unsigned long long now, idle_time, idle_enter;
983 unsigned int sequence;
985 idle = &per_cpu(s390_idle, dev->id);
988 sequence = idle->sequence;
992 idle_time = idle->idle_time;
993 idle_enter = idle->idle_enter;
994 if (idle_enter != 0ULL && idle_enter < now)
995 idle_time += now - idle_enter;
997 if (idle->sequence != sequence)
999 return sprintf(buf, "%llu\n", idle_time >> 12);
1001 static DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
1003 static struct attribute *cpu_online_attrs[] = {
1004 &dev_attr_capability.attr,
1005 &dev_attr_idle_count.attr,
1006 &dev_attr_idle_time_us.attr,
1010 static struct attribute_group cpu_online_attr_group = {
1011 .attrs = cpu_online_attrs,
1014 static int __cpuinit smp_cpu_notify(struct notifier_block *self,
1015 unsigned long action, void *hcpu)
1017 unsigned int cpu = (unsigned int)(long)hcpu;
1018 struct cpu *c = &per_cpu(cpu_devices, cpu);
1019 struct device *s = &c->dev;
1020 struct s390_idle_data *idle;
1025 case CPU_ONLINE_FROZEN:
1026 idle = &per_cpu(s390_idle, cpu);
1027 memset(idle, 0, sizeof(struct s390_idle_data));
1028 err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1031 case CPU_DEAD_FROZEN:
1032 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1035 return notifier_from_errno(err);
1038 static struct notifier_block __cpuinitdata smp_cpu_nb = {
1039 .notifier_call = smp_cpu_notify,
1042 static int __devinit smp_add_present_cpu(int cpu)
1044 struct cpu *c = &per_cpu(cpu_devices, cpu);
1045 struct device *s = &c->dev;
1048 c->hotpluggable = 1;
1049 rc = register_cpu(c, cpu);
1052 rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
1055 if (cpu_online(cpu)) {
1056 rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1060 rc = topology_cpu_init(c);
1066 if (cpu_online(cpu))
1067 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1069 sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
1071 #ifdef CONFIG_HOTPLUG_CPU
1078 #ifdef CONFIG_HOTPLUG_CPU
1080 int __ref smp_rescan_cpus(void)
1087 mutex_lock(&smp_cpu_state_mutex);
1088 cpumask_copy(&newcpus, cpu_present_mask);
1089 rc = __smp_rescan_cpus();
1092 cpumask_andnot(&newcpus, cpu_present_mask, &newcpus);
1093 for_each_cpu(cpu, &newcpus) {
1094 rc = smp_add_present_cpu(cpu);
1096 set_cpu_present(cpu, false);
1100 mutex_unlock(&smp_cpu_state_mutex);
1102 if (!cpumask_empty(&newcpus))
1103 topology_schedule_update();
1107 static ssize_t __ref rescan_store(struct device *dev,
1108 struct device_attribute *attr,
1114 rc = smp_rescan_cpus();
1115 return rc ? rc : count;
1117 static DEVICE_ATTR(rescan, 0200, NULL, rescan_store);
1118 #endif /* CONFIG_HOTPLUG_CPU */
1120 static int __init s390_smp_init(void)
1124 register_cpu_notifier(&smp_cpu_nb);
1125 #ifdef CONFIG_HOTPLUG_CPU
1126 rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan);
1130 for_each_present_cpu(cpu) {
1131 rc = smp_add_present_cpu(cpu);
1137 subsys_initcall(s390_smp_init);