2 * Copyright IBM Corp. 2007, 2011
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 #define KMSG_COMPONENT "cpu"
7 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9 #include <linux/workqueue.h>
10 #include <linux/bootmem.h>
11 #include <linux/cpuset.h>
12 #include <linux/device.h>
13 #include <linux/export.h>
14 #include <linux/kernel.h>
15 #include <linux/sched.h>
16 #include <linux/init.h>
17 #include <linux/delay.h>
18 #include <linux/cpu.h>
19 #include <linux/smp.h>
21 #include <asm/sysinfo.h>
23 #define PTF_HORIZONTAL (0UL)
24 #define PTF_VERTICAL (1UL)
25 #define PTF_CHECK (2UL)
28 struct mask_info *next;
33 static void set_topology_timer(void);
34 static void topology_work_fn(struct work_struct *work);
35 static struct sysinfo_15_1_x *tl_info;
37 static int topology_enabled = 1;
38 static DECLARE_WORK(topology_work, topology_work_fn);
40 /* topology_lock protects the socket and book linked lists */
41 static DEFINE_SPINLOCK(topology_lock);
42 static struct mask_info socket_info;
43 static struct mask_info book_info;
45 struct cpu_topology_s390 cpu_topology[NR_CPUS];
46 EXPORT_SYMBOL_GPL(cpu_topology);
48 static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
52 cpumask_copy(&mask, cpumask_of(cpu));
53 if (!topology_enabled || !MACHINE_HAS_TOPOLOGY)
55 for (; info; info = info->next) {
56 if (cpumask_test_cpu(cpu, &info->mask))
62 static cpumask_t cpu_thread_map(unsigned int cpu)
67 cpumask_copy(&mask, cpumask_of(cpu));
68 if (!topology_enabled || !MACHINE_HAS_TOPOLOGY)
70 cpu -= cpu % (smp_cpu_mtid + 1);
71 for (i = 0; i <= smp_cpu_mtid; i++)
72 if (cpu_present(cpu + i))
73 cpumask_set_cpu(cpu + i, &mask);
77 static struct mask_info *add_cpus_to_mask(struct topology_core *tl_core,
78 struct mask_info *book,
79 struct mask_info *socket,
80 int one_socket_per_cpu)
84 for_each_set_bit(core, &tl_core->mask[0], TOPOLOGY_CORE_BITS) {
88 rcore = TOPOLOGY_CORE_BITS - 1 - core + tl_core->origin;
89 lcpu = smp_find_processor_id(rcore << smp_cpu_mt_shift);
92 for (i = 0; i <= smp_cpu_mtid; i++) {
93 cpu_topology[lcpu + i].book_id = book->id;
94 cpu_topology[lcpu + i].core_id = rcore;
95 cpu_topology[lcpu + i].thread_id = lcpu + i;
96 cpumask_set_cpu(lcpu + i, &book->mask);
97 cpumask_set_cpu(lcpu + i, &socket->mask);
98 if (one_socket_per_cpu)
99 cpu_topology[lcpu + i].socket_id = rcore;
101 cpu_topology[lcpu + i].socket_id = socket->id;
102 smp_cpu_set_polarization(lcpu + i, tl_core->pp);
104 if (one_socket_per_cpu)
105 socket = socket->next;
110 static void clear_masks(void)
112 struct mask_info *info;
116 cpumask_clear(&info->mask);
121 cpumask_clear(&info->mask);
126 static union topology_entry *next_tle(union topology_entry *tle)
129 return (union topology_entry *)((struct topology_core *)tle + 1);
130 return (union topology_entry *)((struct topology_container *)tle + 1);
133 static void __tl_to_masks_generic(struct sysinfo_15_1_x *info)
135 struct mask_info *socket = &socket_info;
136 struct mask_info *book = &book_info;
137 union topology_entry *tle, *end;
140 end = (union topology_entry *)((unsigned long)info + info->length);
145 book->id = tle->container.id;
148 socket = socket->next;
149 socket->id = tle->container.id;
152 add_cpus_to_mask(&tle->cpu, book, socket, 0);
162 static void __tl_to_masks_z10(struct sysinfo_15_1_x *info)
164 struct mask_info *socket = &socket_info;
165 struct mask_info *book = &book_info;
166 union topology_entry *tle, *end;
169 end = (union topology_entry *)((unsigned long)info + info->length);
174 book->id = tle->container.id;
177 socket = add_cpus_to_mask(&tle->cpu, book, socket, 1);
187 static void tl_to_masks(struct sysinfo_15_1_x *info)
191 spin_lock_irq(&topology_lock);
194 switch (cpu_id.machine) {
197 __tl_to_masks_z10(info);
200 __tl_to_masks_generic(info);
202 spin_unlock_irq(&topology_lock);
205 static void topology_update_polarization_simple(void)
209 mutex_lock(&smp_cpu_state_mutex);
210 for_each_possible_cpu(cpu)
211 smp_cpu_set_polarization(cpu, POLARIZATION_HRZ);
212 mutex_unlock(&smp_cpu_state_mutex);
215 static int ptf(unsigned long fc)
220 " .insn rre,0xb9a20000,%1,%1\n"
228 int topology_set_cpu_management(int fc)
232 if (!MACHINE_HAS_TOPOLOGY)
235 rc = ptf(PTF_VERTICAL);
237 rc = ptf(PTF_HORIZONTAL);
240 for_each_possible_cpu(cpu)
241 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
245 static void update_cpu_masks(void)
250 spin_lock_irqsave(&topology_lock, flags);
251 for_each_possible_cpu(cpu) {
252 cpu_topology[cpu].thread_mask = cpu_thread_map(cpu);
253 cpu_topology[cpu].core_mask = cpu_group_map(&socket_info, cpu);
254 cpu_topology[cpu].book_mask = cpu_group_map(&book_info, cpu);
255 if (!MACHINE_HAS_TOPOLOGY) {
256 cpu_topology[cpu].thread_id = cpu;
257 cpu_topology[cpu].core_id = cpu;
258 cpu_topology[cpu].socket_id = cpu;
259 cpu_topology[cpu].book_id = cpu;
262 spin_unlock_irqrestore(&topology_lock, flags);
265 void store_topology(struct sysinfo_15_1_x *info)
267 if (topology_max_mnest >= 3)
268 stsi(info, 15, 1, 3);
270 stsi(info, 15, 1, 2);
273 int arch_update_cpu_topology(void)
275 struct sysinfo_15_1_x *info = tl_info;
279 if (!MACHINE_HAS_TOPOLOGY) {
281 topology_update_polarization_simple();
284 store_topology(info);
287 for_each_online_cpu(cpu) {
288 dev = get_cpu_device(cpu);
289 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
294 static void topology_work_fn(struct work_struct *work)
296 rebuild_sched_domains();
299 void topology_schedule_update(void)
301 schedule_work(&topology_work);
304 static void topology_timer_fn(unsigned long ignored)
307 topology_schedule_update();
308 set_topology_timer();
311 static struct timer_list topology_timer =
312 TIMER_DEFERRED_INITIALIZER(topology_timer_fn, 0, 0);
314 static atomic_t topology_poll = ATOMIC_INIT(0);
316 static void set_topology_timer(void)
318 if (atomic_add_unless(&topology_poll, -1, 0))
319 mod_timer(&topology_timer, jiffies + HZ / 10);
321 mod_timer(&topology_timer, jiffies + HZ * 60);
324 void topology_expect_change(void)
326 if (!MACHINE_HAS_TOPOLOGY)
328 /* This is racy, but it doesn't matter since it is just a heuristic.
329 * Worst case is that we poll in a higher frequency for a bit longer.
331 if (atomic_read(&topology_poll) > 60)
333 atomic_add(60, &topology_poll);
334 set_topology_timer();
337 static int __init early_parse_topology(char *p)
339 if (strncmp(p, "off", 3))
341 topology_enabled = 0;
344 early_param("topology", early_parse_topology);
346 static void __init alloc_masks(struct sysinfo_15_1_x *info,
347 struct mask_info *mask, int offset)
351 nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
352 for (i = 0; i < info->mnest - offset; i++)
353 nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
354 nr_masks = max(nr_masks, 1);
355 for (i = 0; i < nr_masks; i++) {
356 mask->next = alloc_bootmem_align(
357 roundup_pow_of_two(sizeof(struct mask_info)),
358 roundup_pow_of_two(sizeof(struct mask_info)));
363 void __init s390_init_cpu_topology(void)
365 struct sysinfo_15_1_x *info;
368 if (!MACHINE_HAS_TOPOLOGY)
370 tl_info = alloc_bootmem_pages(PAGE_SIZE);
372 store_topology(info);
373 pr_info("The CPU configuration topology of the machine is:");
374 for (i = 0; i < TOPOLOGY_NR_MAG; i++)
375 printk(KERN_CONT " %d", info->mag[i]);
376 printk(KERN_CONT " / %d\n", info->mnest);
377 alloc_masks(info, &socket_info, 1);
378 alloc_masks(info, &book_info, 2);
381 static int cpu_management;
383 static ssize_t dispatching_show(struct device *dev,
384 struct device_attribute *attr,
389 mutex_lock(&smp_cpu_state_mutex);
390 count = sprintf(buf, "%d\n", cpu_management);
391 mutex_unlock(&smp_cpu_state_mutex);
395 static ssize_t dispatching_store(struct device *dev,
396 struct device_attribute *attr,
403 if (sscanf(buf, "%d %c", &val, &delim) != 1)
405 if (val != 0 && val != 1)
409 mutex_lock(&smp_cpu_state_mutex);
410 if (cpu_management == val)
412 rc = topology_set_cpu_management(val);
415 cpu_management = val;
416 topology_expect_change();
418 mutex_unlock(&smp_cpu_state_mutex);
420 return rc ? rc : count;
422 static DEVICE_ATTR(dispatching, 0644, dispatching_show,
425 static ssize_t cpu_polarization_show(struct device *dev,
426 struct device_attribute *attr, char *buf)
431 mutex_lock(&smp_cpu_state_mutex);
432 switch (smp_cpu_get_polarization(cpu)) {
433 case POLARIZATION_HRZ:
434 count = sprintf(buf, "horizontal\n");
436 case POLARIZATION_VL:
437 count = sprintf(buf, "vertical:low\n");
439 case POLARIZATION_VM:
440 count = sprintf(buf, "vertical:medium\n");
442 case POLARIZATION_VH:
443 count = sprintf(buf, "vertical:high\n");
446 count = sprintf(buf, "unknown\n");
449 mutex_unlock(&smp_cpu_state_mutex);
452 static DEVICE_ATTR(polarization, 0444, cpu_polarization_show, NULL);
454 static struct attribute *topology_cpu_attrs[] = {
455 &dev_attr_polarization.attr,
459 static struct attribute_group topology_cpu_attr_group = {
460 .attrs = topology_cpu_attrs,
463 int topology_cpu_init(struct cpu *cpu)
465 return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
468 const struct cpumask *cpu_thread_mask(int cpu)
470 return &cpu_topology[cpu].thread_mask;
474 const struct cpumask *cpu_coregroup_mask(int cpu)
476 return &cpu_topology[cpu].core_mask;
479 static const struct cpumask *cpu_book_mask(int cpu)
481 return &cpu_topology[cpu].book_mask;
484 static struct sched_domain_topology_level s390_topology[] = {
485 { cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
486 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
487 { cpu_book_mask, SD_INIT_NAME(BOOK) },
488 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
492 static int __init topology_init(void)
494 if (MACHINE_HAS_TOPOLOGY)
495 set_topology_timer();
497 topology_update_polarization_simple();
498 return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
500 device_initcall(topology_init);
502 static int __init early_topology_init(void)
504 set_sched_topology(s390_topology);
507 early_initcall(early_topology_init);