Merge remote-tracking branches 'asoc/fix/blackfin', 'asoc/fix/da9055', 'asoc/fix...
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / intel_pstate.c
1 /*
2  * intel_pstate.c: Native P state management for Intel processors
3  *
4  * (C) Copyright 2012 Intel Corporation
5  * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; version 2
10  * of the License.
11  */
12
13 #include <linux/kernel.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/module.h>
16 #include <linux/ktime.h>
17 #include <linux/hrtimer.h>
18 #include <linux/tick.h>
19 #include <linux/slab.h>
20 #include <linux/sched.h>
21 #include <linux/list.h>
22 #include <linux/cpu.h>
23 #include <linux/cpufreq.h>
24 #include <linux/sysfs.h>
25 #include <linux/types.h>
26 #include <linux/fs.h>
27 #include <linux/debugfs.h>
28 #include <linux/acpi.h>
29 #include <trace/events/power.h>
30
31 #include <asm/div64.h>
32 #include <asm/msr.h>
33 #include <asm/cpu_device_id.h>
34
35 #define SAMPLE_COUNT            3
36
37 #define BYT_RATIOS      0x66a
38 #define BYT_VIDS        0x66b
39
40 #define FRAC_BITS 8
41 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
42 #define fp_toint(X) ((X) >> FRAC_BITS)
43
44 static inline int32_t mul_fp(int32_t x, int32_t y)
45 {
46         return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
47 }
48
49 static inline int32_t div_fp(int32_t x, int32_t y)
50 {
51         return div_s64((int64_t)x << FRAC_BITS, (int64_t)y);
52 }
53
54 struct sample {
55         int32_t core_pct_busy;
56         u64 aperf;
57         u64 mperf;
58         unsigned long long tsc;
59         int freq;
60 };
61
62 struct pstate_data {
63         int     current_pstate;
64         int     min_pstate;
65         int     max_pstate;
66         int     turbo_pstate;
67 };
68
69 struct vid_data {
70         int32_t min;
71         int32_t max;
72         int32_t ratio;
73 };
74
75 struct _pid {
76         int setpoint;
77         int32_t integral;
78         int32_t p_gain;
79         int32_t i_gain;
80         int32_t d_gain;
81         int deadband;
82         int32_t last_err;
83 };
84
85 struct cpudata {
86         int cpu;
87
88         char name[64];
89
90         struct timer_list timer;
91
92         struct pstate_data pstate;
93         struct vid_data vid;
94         struct _pid pid;
95
96         u64     prev_aperf;
97         u64     prev_mperf;
98         unsigned long long prev_tsc;
99         int     sample_ptr;
100         struct sample samples[SAMPLE_COUNT];
101 };
102
103 static struct cpudata **all_cpu_data;
104 struct pstate_adjust_policy {
105         int sample_rate_ms;
106         int deadband;
107         int setpoint;
108         int p_gain_pct;
109         int d_gain_pct;
110         int i_gain_pct;
111 };
112
113 struct pstate_funcs {
114         int (*get_max)(void);
115         int (*get_min)(void);
116         int (*get_turbo)(void);
117         void (*set)(struct cpudata*, int pstate);
118         void (*get_vid)(struct cpudata *);
119 };
120
121 struct cpu_defaults {
122         struct pstate_adjust_policy pid_policy;
123         struct pstate_funcs funcs;
124 };
125
126 static struct pstate_adjust_policy pid_params;
127 static struct pstate_funcs pstate_funcs;
128
129 struct perf_limits {
130         int no_turbo;
131         int max_perf_pct;
132         int min_perf_pct;
133         int32_t max_perf;
134         int32_t min_perf;
135         int max_policy_pct;
136         int max_sysfs_pct;
137 };
138
139 static struct perf_limits limits = {
140         .no_turbo = 0,
141         .max_perf_pct = 100,
142         .max_perf = int_tofp(1),
143         .min_perf_pct = 0,
144         .min_perf = 0,
145         .max_policy_pct = 100,
146         .max_sysfs_pct = 100,
147 };
148
149 static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
150                         int deadband, int integral) {
151         pid->setpoint = setpoint;
152         pid->deadband  = deadband;
153         pid->integral  = int_tofp(integral);
154         pid->last_err  = setpoint - busy;
155 }
156
157 static inline void pid_p_gain_set(struct _pid *pid, int percent)
158 {
159         pid->p_gain = div_fp(int_tofp(percent), int_tofp(100));
160 }
161
162 static inline void pid_i_gain_set(struct _pid *pid, int percent)
163 {
164         pid->i_gain = div_fp(int_tofp(percent), int_tofp(100));
165 }
166
167 static inline void pid_d_gain_set(struct _pid *pid, int percent)
168 {
169
170         pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
171 }
172
173 static signed int pid_calc(struct _pid *pid, int32_t busy)
174 {
175         signed int result;
176         int32_t pterm, dterm, fp_error;
177         int32_t integral_limit;
178
179         fp_error = int_tofp(pid->setpoint) - busy;
180
181         if (abs(fp_error) <= int_tofp(pid->deadband))
182                 return 0;
183
184         pterm = mul_fp(pid->p_gain, fp_error);
185
186         pid->integral += fp_error;
187
188         /* limit the integral term */
189         integral_limit = int_tofp(30);
190         if (pid->integral > integral_limit)
191                 pid->integral = integral_limit;
192         if (pid->integral < -integral_limit)
193                 pid->integral = -integral_limit;
194
195         dterm = mul_fp(pid->d_gain, fp_error - pid->last_err);
196         pid->last_err = fp_error;
197
198         result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
199
200         return (signed int)fp_toint(result);
201 }
202
203 static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu)
204 {
205         pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct);
206         pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct);
207         pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct);
208
209         pid_reset(&cpu->pid,
210                 pid_params.setpoint,
211                 100,
212                 pid_params.deadband,
213                 0);
214 }
215
216 static inline void intel_pstate_reset_all_pid(void)
217 {
218         unsigned int cpu;
219         for_each_online_cpu(cpu) {
220                 if (all_cpu_data[cpu])
221                         intel_pstate_busy_pid_reset(all_cpu_data[cpu]);
222         }
223 }
224
225 /************************** debugfs begin ************************/
226 static int pid_param_set(void *data, u64 val)
227 {
228         *(u32 *)data = val;
229         intel_pstate_reset_all_pid();
230         return 0;
231 }
232 static int pid_param_get(void *data, u64 *val)
233 {
234         *val = *(u32 *)data;
235         return 0;
236 }
237 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get,
238                         pid_param_set, "%llu\n");
239
240 struct pid_param {
241         char *name;
242         void *value;
243 };
244
245 static struct pid_param pid_files[] = {
246         {"sample_rate_ms", &pid_params.sample_rate_ms},
247         {"d_gain_pct", &pid_params.d_gain_pct},
248         {"i_gain_pct", &pid_params.i_gain_pct},
249         {"deadband", &pid_params.deadband},
250         {"setpoint", &pid_params.setpoint},
251         {"p_gain_pct", &pid_params.p_gain_pct},
252         {NULL, NULL}
253 };
254
255 static struct dentry *debugfs_parent;
256 static void intel_pstate_debug_expose_params(void)
257 {
258         int i = 0;
259
260         debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
261         if (IS_ERR_OR_NULL(debugfs_parent))
262                 return;
263         while (pid_files[i].name) {
264                 debugfs_create_file(pid_files[i].name, 0660,
265                                 debugfs_parent, pid_files[i].value,
266                                 &fops_pid_param);
267                 i++;
268         }
269 }
270
271 /************************** debugfs end ************************/
272
273 /************************** sysfs begin ************************/
274 #define show_one(file_name, object)                                     \
275         static ssize_t show_##file_name                                 \
276         (struct kobject *kobj, struct attribute *attr, char *buf)       \
277         {                                                               \
278                 return sprintf(buf, "%u\n", limits.object);             \
279         }
280
281 static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
282                                 const char *buf, size_t count)
283 {
284         unsigned int input;
285         int ret;
286         ret = sscanf(buf, "%u", &input);
287         if (ret != 1)
288                 return -EINVAL;
289         limits.no_turbo = clamp_t(int, input, 0 , 1);
290
291         return count;
292 }
293
294 static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
295                                 const char *buf, size_t count)
296 {
297         unsigned int input;
298         int ret;
299         ret = sscanf(buf, "%u", &input);
300         if (ret != 1)
301                 return -EINVAL;
302
303         limits.max_sysfs_pct = clamp_t(int, input, 0 , 100);
304         limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
305         limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
306         return count;
307 }
308
309 static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
310                                 const char *buf, size_t count)
311 {
312         unsigned int input;
313         int ret;
314         ret = sscanf(buf, "%u", &input);
315         if (ret != 1)
316                 return -EINVAL;
317         limits.min_perf_pct = clamp_t(int, input, 0 , 100);
318         limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
319
320         return count;
321 }
322
323 show_one(no_turbo, no_turbo);
324 show_one(max_perf_pct, max_perf_pct);
325 show_one(min_perf_pct, min_perf_pct);
326
327 define_one_global_rw(no_turbo);
328 define_one_global_rw(max_perf_pct);
329 define_one_global_rw(min_perf_pct);
330
331 static struct attribute *intel_pstate_attributes[] = {
332         &no_turbo.attr,
333         &max_perf_pct.attr,
334         &min_perf_pct.attr,
335         NULL
336 };
337
338 static struct attribute_group intel_pstate_attr_group = {
339         .attrs = intel_pstate_attributes,
340 };
341 static struct kobject *intel_pstate_kobject;
342
343 static void intel_pstate_sysfs_expose_params(void)
344 {
345         int rc;
346
347         intel_pstate_kobject = kobject_create_and_add("intel_pstate",
348                                                 &cpu_subsys.dev_root->kobj);
349         BUG_ON(!intel_pstate_kobject);
350         rc = sysfs_create_group(intel_pstate_kobject,
351                                 &intel_pstate_attr_group);
352         BUG_ON(rc);
353 }
354
355 /************************** sysfs end ************************/
356 static int byt_get_min_pstate(void)
357 {
358         u64 value;
359         rdmsrl(BYT_RATIOS, value);
360         return value & 0xFF;
361 }
362
363 static int byt_get_max_pstate(void)
364 {
365         u64 value;
366         rdmsrl(BYT_RATIOS, value);
367         return (value >> 16) & 0xFF;
368 }
369
370 static void byt_set_pstate(struct cpudata *cpudata, int pstate)
371 {
372         u64 val;
373         int32_t vid_fp;
374         u32 vid;
375
376         val = pstate << 8;
377         if (limits.no_turbo)
378                 val |= (u64)1 << 32;
379
380         vid_fp = cpudata->vid.min + mul_fp(
381                 int_tofp(pstate - cpudata->pstate.min_pstate),
382                 cpudata->vid.ratio);
383
384         vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
385         vid = fp_toint(vid_fp);
386
387         val |= vid;
388
389         wrmsrl(MSR_IA32_PERF_CTL, val);
390 }
391
392 static void byt_get_vid(struct cpudata *cpudata)
393 {
394         u64 value;
395
396         rdmsrl(BYT_VIDS, value);
397         cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
398         cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
399         cpudata->vid.ratio = div_fp(
400                 cpudata->vid.max - cpudata->vid.min,
401                 int_tofp(cpudata->pstate.max_pstate -
402                         cpudata->pstate.min_pstate));
403 }
404
405
406 static int core_get_min_pstate(void)
407 {
408         u64 value;
409         rdmsrl(MSR_PLATFORM_INFO, value);
410         return (value >> 40) & 0xFF;
411 }
412
413 static int core_get_max_pstate(void)
414 {
415         u64 value;
416         rdmsrl(MSR_PLATFORM_INFO, value);
417         return (value >> 8) & 0xFF;
418 }
419
420 static int core_get_turbo_pstate(void)
421 {
422         u64 value;
423         int nont, ret;
424         rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
425         nont = core_get_max_pstate();
426         ret = ((value) & 255);
427         if (ret <= nont)
428                 ret = nont;
429         return ret;
430 }
431
432 static void core_set_pstate(struct cpudata *cpudata, int pstate)
433 {
434         u64 val;
435
436         val = pstate << 8;
437         if (limits.no_turbo)
438                 val |= (u64)1 << 32;
439
440         wrmsrl(MSR_IA32_PERF_CTL, val);
441 }
442
443 static struct cpu_defaults core_params = {
444         .pid_policy = {
445                 .sample_rate_ms = 10,
446                 .deadband = 0,
447                 .setpoint = 97,
448                 .p_gain_pct = 20,
449                 .d_gain_pct = 0,
450                 .i_gain_pct = 0,
451         },
452         .funcs = {
453                 .get_max = core_get_max_pstate,
454                 .get_min = core_get_min_pstate,
455                 .get_turbo = core_get_turbo_pstate,
456                 .set = core_set_pstate,
457         },
458 };
459
460 static struct cpu_defaults byt_params = {
461         .pid_policy = {
462                 .sample_rate_ms = 10,
463                 .deadband = 0,
464                 .setpoint = 97,
465                 .p_gain_pct = 14,
466                 .d_gain_pct = 0,
467                 .i_gain_pct = 4,
468         },
469         .funcs = {
470                 .get_max = byt_get_max_pstate,
471                 .get_min = byt_get_min_pstate,
472                 .get_turbo = byt_get_max_pstate,
473                 .set = byt_set_pstate,
474                 .get_vid = byt_get_vid,
475         },
476 };
477
478
479 static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
480 {
481         int max_perf = cpu->pstate.turbo_pstate;
482         int max_perf_adj;
483         int min_perf;
484         if (limits.no_turbo)
485                 max_perf = cpu->pstate.max_pstate;
486
487         max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
488         *max = clamp_t(int, max_perf_adj,
489                         cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
490
491         min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf));
492         *min = clamp_t(int, min_perf,
493                         cpu->pstate.min_pstate, max_perf);
494 }
495
496 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
497 {
498         int max_perf, min_perf;
499
500         intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
501
502         pstate = clamp_t(int, pstate, min_perf, max_perf);
503
504         if (pstate == cpu->pstate.current_pstate)
505                 return;
506
507         trace_cpu_frequency(pstate * 100000, cpu->cpu);
508
509         cpu->pstate.current_pstate = pstate;
510
511         pstate_funcs.set(cpu, pstate);
512 }
513
514 static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
515 {
516         int target;
517         target = cpu->pstate.current_pstate + steps;
518
519         intel_pstate_set_pstate(cpu, target);
520 }
521
522 static inline void intel_pstate_pstate_decrease(struct cpudata *cpu, int steps)
523 {
524         int target;
525         target = cpu->pstate.current_pstate - steps;
526         intel_pstate_set_pstate(cpu, target);
527 }
528
529 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
530 {
531         sprintf(cpu->name, "Intel 2nd generation core");
532
533         cpu->pstate.min_pstate = pstate_funcs.get_min();
534         cpu->pstate.max_pstate = pstate_funcs.get_max();
535         cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
536
537         if (pstate_funcs.get_vid)
538                 pstate_funcs.get_vid(cpu);
539
540         /*
541          * goto max pstate so we don't slow up boot if we are built-in if we are
542          * a module we will take care of it during normal operation
543          */
544         intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
545 }
546
547 static inline void intel_pstate_calc_busy(struct cpudata *cpu,
548                                         struct sample *sample)
549 {
550         u64 core_pct;
551         u64 c0_pct;
552
553         core_pct = div64_u64(sample->aperf * 100, sample->mperf);
554
555         c0_pct = div64_u64(sample->mperf * 100, sample->tsc);
556         sample->freq = fp_toint(
557                 mul_fp(int_tofp(cpu->pstate.max_pstate),
558                         int_tofp(core_pct * 1000)));
559
560         sample->core_pct_busy = mul_fp(int_tofp(core_pct),
561                                 div_fp(int_tofp(c0_pct + 1), int_tofp(100)));
562 }
563
564 static inline void intel_pstate_sample(struct cpudata *cpu)
565 {
566         u64 aperf, mperf;
567         unsigned long long tsc;
568
569         rdmsrl(MSR_IA32_APERF, aperf);
570         rdmsrl(MSR_IA32_MPERF, mperf);
571         tsc = native_read_tsc();
572
573         cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
574         cpu->samples[cpu->sample_ptr].aperf = aperf;
575         cpu->samples[cpu->sample_ptr].mperf = mperf;
576         cpu->samples[cpu->sample_ptr].tsc = tsc;
577         cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
578         cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
579         cpu->samples[cpu->sample_ptr].tsc -= cpu->prev_tsc;
580
581         intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
582
583         cpu->prev_aperf = aperf;
584         cpu->prev_mperf = mperf;
585         cpu->prev_tsc = tsc;
586 }
587
588 static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
589 {
590         int sample_time, delay;
591
592         sample_time = pid_params.sample_rate_ms;
593         delay = msecs_to_jiffies(sample_time);
594         mod_timer_pinned(&cpu->timer, jiffies + delay);
595 }
596
597 static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
598 {
599         int32_t core_busy, max_pstate, current_pstate;
600
601         core_busy = cpu->samples[cpu->sample_ptr].core_pct_busy;
602         max_pstate = int_tofp(cpu->pstate.max_pstate);
603         current_pstate = int_tofp(cpu->pstate.current_pstate);
604         return mul_fp(core_busy, div_fp(max_pstate, current_pstate));
605 }
606
607 static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
608 {
609         int32_t busy_scaled;
610         struct _pid *pid;
611         signed int ctl = 0;
612         int steps;
613
614         pid = &cpu->pid;
615         busy_scaled = intel_pstate_get_scaled_busy(cpu);
616
617         ctl = pid_calc(pid, busy_scaled);
618
619         steps = abs(ctl);
620
621         if (ctl < 0)
622                 intel_pstate_pstate_increase(cpu, steps);
623         else
624                 intel_pstate_pstate_decrease(cpu, steps);
625 }
626
627 static void intel_pstate_timer_func(unsigned long __data)
628 {
629         struct cpudata *cpu = (struct cpudata *) __data;
630         struct sample *sample;
631
632         intel_pstate_sample(cpu);
633
634         sample = &cpu->samples[cpu->sample_ptr];
635
636         intel_pstate_adjust_busy_pstate(cpu);
637
638         trace_pstate_sample(fp_toint(sample->core_pct_busy),
639                         fp_toint(intel_pstate_get_scaled_busy(cpu)),
640                         cpu->pstate.current_pstate,
641                         sample->mperf,
642                         sample->aperf,
643                         sample->freq);
644
645         intel_pstate_set_sample_time(cpu);
646 }
647
648 #define ICPU(model, policy) \
649         { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
650                         (unsigned long)&policy }
651
652 static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
653         ICPU(0x2a, core_params),
654         ICPU(0x2d, core_params),
655         ICPU(0x37, byt_params),
656         ICPU(0x3a, core_params),
657         ICPU(0x3c, core_params),
658         ICPU(0x3e, core_params),
659         ICPU(0x3f, core_params),
660         ICPU(0x45, core_params),
661         ICPU(0x46, core_params),
662         {}
663 };
664 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
665
666 static int intel_pstate_init_cpu(unsigned int cpunum)
667 {
668
669         const struct x86_cpu_id *id;
670         struct cpudata *cpu;
671
672         id = x86_match_cpu(intel_pstate_cpu_ids);
673         if (!id)
674                 return -ENODEV;
675
676         all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), GFP_KERNEL);
677         if (!all_cpu_data[cpunum])
678                 return -ENOMEM;
679
680         cpu = all_cpu_data[cpunum];
681
682         intel_pstate_get_cpu_pstates(cpu);
683         if (!cpu->pstate.current_pstate) {
684                 all_cpu_data[cpunum] = NULL;
685                 kfree(cpu);
686                 return -ENODATA;
687         }
688
689         cpu->cpu = cpunum;
690
691         init_timer_deferrable(&cpu->timer);
692         cpu->timer.function = intel_pstate_timer_func;
693         cpu->timer.data =
694                 (unsigned long)cpu;
695         cpu->timer.expires = jiffies + HZ/100;
696         intel_pstate_busy_pid_reset(cpu);
697         intel_pstate_sample(cpu);
698         intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
699
700         add_timer_on(&cpu->timer, cpunum);
701
702         pr_info("Intel pstate controlling: cpu %d\n", cpunum);
703
704         return 0;
705 }
706
707 static unsigned int intel_pstate_get(unsigned int cpu_num)
708 {
709         struct sample *sample;
710         struct cpudata *cpu;
711
712         cpu = all_cpu_data[cpu_num];
713         if (!cpu)
714                 return 0;
715         sample = &cpu->samples[cpu->sample_ptr];
716         return sample->freq;
717 }
718
719 static int intel_pstate_set_policy(struct cpufreq_policy *policy)
720 {
721         struct cpudata *cpu;
722
723         cpu = all_cpu_data[policy->cpu];
724
725         if (!policy->cpuinfo.max_freq)
726                 return -ENODEV;
727
728         if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
729                 limits.min_perf_pct = 100;
730                 limits.min_perf = int_tofp(1);
731                 limits.max_perf_pct = 100;
732                 limits.max_perf = int_tofp(1);
733                 limits.no_turbo = 0;
734                 return 0;
735         }
736         limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
737         limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
738         limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
739
740         limits.max_policy_pct = policy->max * 100 / policy->cpuinfo.max_freq;
741         limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100);
742         limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
743         limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
744
745         return 0;
746 }
747
748 static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
749 {
750         cpufreq_verify_within_cpu_limits(policy);
751
752         if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) &&
753                 (policy->policy != CPUFREQ_POLICY_PERFORMANCE))
754                 return -EINVAL;
755
756         return 0;
757 }
758
759 static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
760 {
761         int cpu = policy->cpu;
762
763         del_timer(&all_cpu_data[cpu]->timer);
764         kfree(all_cpu_data[cpu]);
765         all_cpu_data[cpu] = NULL;
766         return 0;
767 }
768
769 static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
770 {
771         struct cpudata *cpu;
772         int rc;
773
774         rc = intel_pstate_init_cpu(policy->cpu);
775         if (rc)
776                 return rc;
777
778         cpu = all_cpu_data[policy->cpu];
779
780         if (!limits.no_turbo &&
781                 limits.min_perf_pct == 100 && limits.max_perf_pct == 100)
782                 policy->policy = CPUFREQ_POLICY_PERFORMANCE;
783         else
784                 policy->policy = CPUFREQ_POLICY_POWERSAVE;
785
786         policy->min = cpu->pstate.min_pstate * 100000;
787         policy->max = cpu->pstate.turbo_pstate * 100000;
788
789         /* cpuinfo and default policy values */
790         policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000;
791         policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate * 100000;
792         policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
793         cpumask_set_cpu(policy->cpu, policy->cpus);
794
795         return 0;
796 }
797
798 static struct cpufreq_driver intel_pstate_driver = {
799         .flags          = CPUFREQ_CONST_LOOPS,
800         .verify         = intel_pstate_verify_policy,
801         .setpolicy      = intel_pstate_set_policy,
802         .get            = intel_pstate_get,
803         .init           = intel_pstate_cpu_init,
804         .exit           = intel_pstate_cpu_exit,
805         .name           = "intel_pstate",
806 };
807
808 static int __initdata no_load;
809
810 static int intel_pstate_msrs_not_valid(void)
811 {
812         /* Check that all the msr's we are using are valid. */
813         u64 aperf, mperf, tmp;
814
815         rdmsrl(MSR_IA32_APERF, aperf);
816         rdmsrl(MSR_IA32_MPERF, mperf);
817
818         if (!pstate_funcs.get_max() ||
819                 !pstate_funcs.get_min() ||
820                 !pstate_funcs.get_turbo())
821                 return -ENODEV;
822
823         rdmsrl(MSR_IA32_APERF, tmp);
824         if (!(tmp - aperf))
825                 return -ENODEV;
826
827         rdmsrl(MSR_IA32_MPERF, tmp);
828         if (!(tmp - mperf))
829                 return -ENODEV;
830
831         return 0;
832 }
833
834 static void copy_pid_params(struct pstate_adjust_policy *policy)
835 {
836         pid_params.sample_rate_ms = policy->sample_rate_ms;
837         pid_params.p_gain_pct = policy->p_gain_pct;
838         pid_params.i_gain_pct = policy->i_gain_pct;
839         pid_params.d_gain_pct = policy->d_gain_pct;
840         pid_params.deadband = policy->deadband;
841         pid_params.setpoint = policy->setpoint;
842 }
843
844 static void copy_cpu_funcs(struct pstate_funcs *funcs)
845 {
846         pstate_funcs.get_max   = funcs->get_max;
847         pstate_funcs.get_min   = funcs->get_min;
848         pstate_funcs.get_turbo = funcs->get_turbo;
849         pstate_funcs.set       = funcs->set;
850         pstate_funcs.get_vid   = funcs->get_vid;
851 }
852
853 #if IS_ENABLED(CONFIG_ACPI)
854 #include <acpi/processor.h>
855
856 static bool intel_pstate_no_acpi_pss(void)
857 {
858         int i;
859
860         for_each_possible_cpu(i) {
861                 acpi_status status;
862                 union acpi_object *pss;
863                 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
864                 struct acpi_processor *pr = per_cpu(processors, i);
865
866                 if (!pr)
867                         continue;
868
869                 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
870                 if (ACPI_FAILURE(status))
871                         continue;
872
873                 pss = buffer.pointer;
874                 if (pss && pss->type == ACPI_TYPE_PACKAGE) {
875                         kfree(pss);
876                         return false;
877                 }
878
879                 kfree(pss);
880         }
881
882         return true;
883 }
884
885 struct hw_vendor_info {
886         u16  valid;
887         char oem_id[ACPI_OEM_ID_SIZE];
888         char oem_table_id[ACPI_OEM_TABLE_ID_SIZE];
889 };
890
891 /* Hardware vendor-specific info that has its own power management modes */
892 static struct hw_vendor_info vendor_info[] = {
893         {1, "HP    ", "ProLiant"},
894         {0, "", ""},
895 };
896
897 static bool intel_pstate_platform_pwr_mgmt_exists(void)
898 {
899         struct acpi_table_header hdr;
900         struct hw_vendor_info *v_info;
901
902         if (acpi_disabled
903             || ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr)))
904                 return false;
905
906         for (v_info = vendor_info; v_info->valid; v_info++) {
907                 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE)
908                     && !strncmp(hdr.oem_table_id, v_info->oem_table_id, ACPI_OEM_TABLE_ID_SIZE)
909                     && intel_pstate_no_acpi_pss())
910                         return true;
911         }
912
913         return false;
914 }
915 #else /* CONFIG_ACPI not enabled */
916 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
917 #endif /* CONFIG_ACPI */
918
919 static int __init intel_pstate_init(void)
920 {
921         int cpu, rc = 0;
922         const struct x86_cpu_id *id;
923         struct cpu_defaults *cpu_info;
924
925         if (no_load)
926                 return -ENODEV;
927
928         id = x86_match_cpu(intel_pstate_cpu_ids);
929         if (!id)
930                 return -ENODEV;
931
932         /*
933          * The Intel pstate driver will be ignored if the platform
934          * firmware has its own power management modes.
935          */
936         if (intel_pstate_platform_pwr_mgmt_exists())
937                 return -ENODEV;
938
939         cpu_info = (struct cpu_defaults *)id->driver_data;
940
941         copy_pid_params(&cpu_info->pid_policy);
942         copy_cpu_funcs(&cpu_info->funcs);
943
944         if (intel_pstate_msrs_not_valid())
945                 return -ENODEV;
946
947         pr_info("Intel P-state driver initializing.\n");
948
949         all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
950         if (!all_cpu_data)
951                 return -ENOMEM;
952
953         rc = cpufreq_register_driver(&intel_pstate_driver);
954         if (rc)
955                 goto out;
956
957         intel_pstate_debug_expose_params();
958         intel_pstate_sysfs_expose_params();
959
960         return rc;
961 out:
962         get_online_cpus();
963         for_each_online_cpu(cpu) {
964                 if (all_cpu_data[cpu]) {
965                         del_timer_sync(&all_cpu_data[cpu]->timer);
966                         kfree(all_cpu_data[cpu]);
967                 }
968         }
969
970         put_online_cpus();
971         vfree(all_cpu_data);
972         return -ENODEV;
973 }
974 device_initcall(intel_pstate_init);
975
976 static int __init intel_pstate_setup(char *str)
977 {
978         if (!str)
979                 return -EINVAL;
980
981         if (!strcmp(str, "disable"))
982                 no_load = 1;
983         return 0;
984 }
985 early_param("intel_pstate", intel_pstate_setup);
986
987 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
988 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
989 MODULE_LICENSE("GPL");