2 * Copyright 2011 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
27 #include "amdgpu_atombios.h"
28 #include "amdgpu_i2c.h"
29 #include "amdgpu_dpm.h"
32 void amdgpu_dpm_print_class_info(u32 class, u32 class2)
34 printk("\tui class: ");
35 switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
36 case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
40 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
43 case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
46 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
47 printk("performance\n");
50 printk("\tinternal class: ");
51 if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
55 if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
57 if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
59 if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
60 printk("limited_pwr ");
61 if (class & ATOM_PPLIB_CLASSIFICATION_REST)
63 if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
65 if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
67 if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
69 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
71 if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
73 if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
75 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
77 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
79 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
81 if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
82 printk("limited_pwr2 ");
83 if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
85 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
91 void amdgpu_dpm_print_cap_info(u32 caps)
94 if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
95 printk("single_disp ");
96 if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
98 if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
103 void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
104 struct amdgpu_ps *rps)
106 printk("\tstatus: ");
107 if (rps == adev->pm.dpm.current_ps)
109 if (rps == adev->pm.dpm.requested_ps)
111 if (rps == adev->pm.dpm.boot_ps)
116 u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
118 struct drm_device *dev = adev->ddev;
119 struct drm_crtc *crtc;
120 struct amdgpu_crtc *amdgpu_crtc;
121 u32 line_time_us, vblank_lines;
122 u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
124 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
125 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
126 amdgpu_crtc = to_amdgpu_crtc(crtc);
127 if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
128 line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
129 amdgpu_crtc->hw_mode.clock;
130 vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
131 amdgpu_crtc->hw_mode.crtc_vdisplay +
132 (amdgpu_crtc->v_border * 2);
133 vblank_time_us = vblank_lines * line_time_us;
139 return vblank_time_us;
142 u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
144 struct drm_device *dev = adev->ddev;
145 struct drm_crtc *crtc;
146 struct amdgpu_crtc *amdgpu_crtc;
149 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
150 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
151 amdgpu_crtc = to_amdgpu_crtc(crtc);
152 if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
153 vrefresh = amdgpu_crtc->hw_mode.vrefresh;
162 void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
169 i_c = (i * r_c) / 100;
178 *p = i_c / (1 << (2 * (*u)));
181 int amdgpu_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
186 if ((fl == 0) || (fh == 0) || (fl > fh))
190 t1 = (t * (k - 100));
191 a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
193 ah = ((a * t) + 5000) / 10000;
202 bool amdgpu_is_uvd_state(u32 class, u32 class2)
204 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
206 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
208 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
210 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
212 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
217 bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
220 case THERMAL_TYPE_RV6XX:
221 case THERMAL_TYPE_RV770:
222 case THERMAL_TYPE_EVERGREEN:
223 case THERMAL_TYPE_SUMO:
224 case THERMAL_TYPE_NI:
225 case THERMAL_TYPE_SI:
226 case THERMAL_TYPE_CI:
227 case THERMAL_TYPE_KV:
229 case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
230 case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
231 return false; /* need special handling */
232 case THERMAL_TYPE_NONE:
233 case THERMAL_TYPE_EXTERNAL:
234 case THERMAL_TYPE_EXTERNAL_GPIO:
241 struct _ATOM_POWERPLAY_INFO info;
242 struct _ATOM_POWERPLAY_INFO_V2 info_2;
243 struct _ATOM_POWERPLAY_INFO_V3 info_3;
244 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
245 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
246 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
247 struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
248 struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
252 struct _ATOM_PPLIB_FANTABLE fan;
253 struct _ATOM_PPLIB_FANTABLE2 fan2;
254 struct _ATOM_PPLIB_FANTABLE3 fan3;
257 static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
258 ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
260 u32 size = atom_table->ucNumEntries *
261 sizeof(struct amdgpu_clock_voltage_dependency_entry);
263 ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
265 amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
266 if (!amdgpu_table->entries)
269 entry = &atom_table->entries[0];
270 for (i = 0; i < atom_table->ucNumEntries; i++) {
271 amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
272 (entry->ucClockHigh << 16);
273 amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
274 entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
275 ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
277 amdgpu_table->count = atom_table->ucNumEntries;
282 int amdgpu_get_platform_caps(struct amdgpu_device *adev)
284 struct amdgpu_mode_info *mode_info = &adev->mode_info;
285 union power_info *power_info;
286 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
290 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
291 &frev, &crev, &data_offset))
293 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
295 adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
296 adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
297 adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
302 /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
303 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
304 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
305 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
306 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
307 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
308 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
309 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
310 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
312 int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
314 struct amdgpu_mode_info *mode_info = &adev->mode_info;
315 union power_info *power_info;
316 union fan_info *fan_info;
317 ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
318 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
323 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
324 &frev, &crev, &data_offset))
326 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
329 if (le16_to_cpu(power_info->pplib.usTableSize) >=
330 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
331 if (power_info->pplib3.usFanTableOffset) {
332 fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
333 le16_to_cpu(power_info->pplib3.usFanTableOffset));
334 adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
335 adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
336 adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
337 adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
338 adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
339 adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
340 adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
341 if (fan_info->fan.ucFanTableFormat >= 2)
342 adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
344 adev->pm.dpm.fan.t_max = 10900;
345 adev->pm.dpm.fan.cycle_delay = 100000;
346 if (fan_info->fan.ucFanTableFormat >= 3) {
347 adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
348 adev->pm.dpm.fan.default_max_fan_pwm =
349 le16_to_cpu(fan_info->fan3.usFanPWMMax);
350 adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
351 adev->pm.dpm.fan.fan_output_sensitivity =
352 le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
354 adev->pm.dpm.fan.ucode_fan_control = true;
358 /* clock dependancy tables, shedding tables */
359 if (le16_to_cpu(power_info->pplib.usTableSize) >=
360 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
361 if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
362 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
363 (mode_info->atom_context->bios + data_offset +
364 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
365 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
368 amdgpu_free_extended_power_table(adev);
372 if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
373 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
374 (mode_info->atom_context->bios + data_offset +
375 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
376 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
379 amdgpu_free_extended_power_table(adev);
383 if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
384 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
385 (mode_info->atom_context->bios + data_offset +
386 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
387 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
390 amdgpu_free_extended_power_table(adev);
394 if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
395 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
396 (mode_info->atom_context->bios + data_offset +
397 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
398 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
401 amdgpu_free_extended_power_table(adev);
405 if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
406 ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
407 (ATOM_PPLIB_Clock_Voltage_Limit_Table *)
408 (mode_info->atom_context->bios + data_offset +
409 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
410 if (clk_v->ucNumEntries) {
411 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
412 le16_to_cpu(clk_v->entries[0].usSclkLow) |
413 (clk_v->entries[0].ucSclkHigh << 16);
414 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
415 le16_to_cpu(clk_v->entries[0].usMclkLow) |
416 (clk_v->entries[0].ucMclkHigh << 16);
417 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
418 le16_to_cpu(clk_v->entries[0].usVddc);
419 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
420 le16_to_cpu(clk_v->entries[0].usVddci);
423 if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
424 ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
425 (ATOM_PPLIB_PhaseSheddingLimits_Table *)
426 (mode_info->atom_context->bios + data_offset +
427 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
428 ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
430 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
431 kzalloc(psl->ucNumEntries *
432 sizeof(struct amdgpu_phase_shedding_limits_entry),
434 if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
435 amdgpu_free_extended_power_table(adev);
439 entry = &psl->entries[0];
440 for (i = 0; i < psl->ucNumEntries; i++) {
441 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
442 le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
443 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
444 le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
445 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
446 le16_to_cpu(entry->usVoltage);
447 entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
448 ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
450 adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
456 if (le16_to_cpu(power_info->pplib.usTableSize) >=
457 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
458 adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
459 adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
460 adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
461 adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
462 if (adev->pm.dpm.tdp_od_limit)
463 adev->pm.dpm.power_control = true;
465 adev->pm.dpm.power_control = false;
466 adev->pm.dpm.tdp_adjustment = 0;
467 adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
468 adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
469 adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
470 if (power_info->pplib5.usCACLeakageTableOffset) {
471 ATOM_PPLIB_CAC_Leakage_Table *cac_table =
472 (ATOM_PPLIB_CAC_Leakage_Table *)
473 (mode_info->atom_context->bios + data_offset +
474 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
475 ATOM_PPLIB_CAC_Leakage_Record *entry;
476 u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
477 adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
478 if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
479 amdgpu_free_extended_power_table(adev);
482 entry = &cac_table->entries[0];
483 for (i = 0; i < cac_table->ucNumEntries; i++) {
484 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
485 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
486 le16_to_cpu(entry->usVddc1);
487 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
488 le16_to_cpu(entry->usVddc2);
489 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
490 le16_to_cpu(entry->usVddc3);
492 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
493 le16_to_cpu(entry->usVddc);
494 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
495 le32_to_cpu(entry->ulLeakageValue);
497 entry = (ATOM_PPLIB_CAC_Leakage_Record *)
498 ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
500 adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
505 if (le16_to_cpu(power_info->pplib.usTableSize) >=
506 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
507 ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
508 (mode_info->atom_context->bios + data_offset +
509 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
510 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
511 ext_hdr->usVCETableOffset) {
512 VCEClockInfoArray *array = (VCEClockInfoArray *)
513 (mode_info->atom_context->bios + data_offset +
514 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
515 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
516 (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
517 (mode_info->atom_context->bios + data_offset +
518 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
519 1 + array->ucNumEntries * sizeof(VCEClockInfo));
520 ATOM_PPLIB_VCE_State_Table *states =
521 (ATOM_PPLIB_VCE_State_Table *)
522 (mode_info->atom_context->bios + data_offset +
523 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
524 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
525 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
526 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
527 ATOM_PPLIB_VCE_State_Record *state_entry;
528 VCEClockInfo *vce_clk;
529 u32 size = limits->numEntries *
530 sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
531 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
532 kzalloc(size, GFP_KERNEL);
533 if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
534 amdgpu_free_extended_power_table(adev);
537 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
539 entry = &limits->entries[0];
540 state_entry = &states->entries[0];
541 for (i = 0; i < limits->numEntries; i++) {
542 vce_clk = (VCEClockInfo *)
543 ((u8 *)&array->entries[0] +
544 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
545 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
546 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
547 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
548 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
549 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
550 le16_to_cpu(entry->usVoltage);
551 entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
552 ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
554 for (i = 0; i < states->numEntries; i++) {
555 if (i >= AMDGPU_MAX_VCE_LEVELS)
557 vce_clk = (VCEClockInfo *)
558 ((u8 *)&array->entries[0] +
559 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
560 adev->pm.dpm.vce_states[i].evclk =
561 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
562 adev->pm.dpm.vce_states[i].ecclk =
563 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
564 adev->pm.dpm.vce_states[i].clk_idx =
565 state_entry->ucClockInfoIndex & 0x3f;
566 adev->pm.dpm.vce_states[i].pstate =
567 (state_entry->ucClockInfoIndex & 0xc0) >> 6;
568 state_entry = (ATOM_PPLIB_VCE_State_Record *)
569 ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
572 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
573 ext_hdr->usUVDTableOffset) {
574 UVDClockInfoArray *array = (UVDClockInfoArray *)
575 (mode_info->atom_context->bios + data_offset +
576 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
577 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
578 (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
579 (mode_info->atom_context->bios + data_offset +
580 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
581 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
582 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
583 u32 size = limits->numEntries *
584 sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
585 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
586 kzalloc(size, GFP_KERNEL);
587 if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
588 amdgpu_free_extended_power_table(adev);
591 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
593 entry = &limits->entries[0];
594 for (i = 0; i < limits->numEntries; i++) {
595 UVDClockInfo *uvd_clk = (UVDClockInfo *)
596 ((u8 *)&array->entries[0] +
597 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
598 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
599 le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
600 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
601 le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
602 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
603 le16_to_cpu(entry->usVoltage);
604 entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
605 ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
608 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
609 ext_hdr->usSAMUTableOffset) {
610 ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
611 (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
612 (mode_info->atom_context->bios + data_offset +
613 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
614 ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
615 u32 size = limits->numEntries *
616 sizeof(struct amdgpu_clock_voltage_dependency_entry);
617 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
618 kzalloc(size, GFP_KERNEL);
619 if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
620 amdgpu_free_extended_power_table(adev);
623 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
625 entry = &limits->entries[0];
626 for (i = 0; i < limits->numEntries; i++) {
627 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
628 le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
629 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
630 le16_to_cpu(entry->usVoltage);
631 entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
632 ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
635 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
636 ext_hdr->usPPMTableOffset) {
637 ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
638 (mode_info->atom_context->bios + data_offset +
639 le16_to_cpu(ext_hdr->usPPMTableOffset));
640 adev->pm.dpm.dyn_state.ppm_table =
641 kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
642 if (!adev->pm.dpm.dyn_state.ppm_table) {
643 amdgpu_free_extended_power_table(adev);
646 adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
647 adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
648 le16_to_cpu(ppm->usCpuCoreNumber);
649 adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
650 le32_to_cpu(ppm->ulPlatformTDP);
651 adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
652 le32_to_cpu(ppm->ulSmallACPlatformTDP);
653 adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
654 le32_to_cpu(ppm->ulPlatformTDC);
655 adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
656 le32_to_cpu(ppm->ulSmallACPlatformTDC);
657 adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
658 le32_to_cpu(ppm->ulApuTDP);
659 adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
660 le32_to_cpu(ppm->ulDGpuTDP);
661 adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
662 le32_to_cpu(ppm->ulDGpuUlvPower);
663 adev->pm.dpm.dyn_state.ppm_table->tj_max =
664 le32_to_cpu(ppm->ulTjmax);
666 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
667 ext_hdr->usACPTableOffset) {
668 ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
669 (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
670 (mode_info->atom_context->bios + data_offset +
671 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
672 ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
673 u32 size = limits->numEntries *
674 sizeof(struct amdgpu_clock_voltage_dependency_entry);
675 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
676 kzalloc(size, GFP_KERNEL);
677 if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
678 amdgpu_free_extended_power_table(adev);
681 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
683 entry = &limits->entries[0];
684 for (i = 0; i < limits->numEntries; i++) {
685 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
686 le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
687 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
688 le16_to_cpu(entry->usVoltage);
689 entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
690 ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
693 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
694 ext_hdr->usPowerTuneTableOffset) {
695 u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
696 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
697 ATOM_PowerTune_Table *pt;
698 adev->pm.dpm.dyn_state.cac_tdp_table =
699 kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
700 if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
701 amdgpu_free_extended_power_table(adev);
705 ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
706 (mode_info->atom_context->bios + data_offset +
707 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
708 adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
709 ppt->usMaximumPowerDeliveryLimit;
710 pt = &ppt->power_tune_table;
712 ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
713 (mode_info->atom_context->bios + data_offset +
714 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
715 adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
716 pt = &ppt->power_tune_table;
718 adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
719 adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
720 le16_to_cpu(pt->usConfigurableTDP);
721 adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
722 adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
723 le16_to_cpu(pt->usBatteryPowerLimit);
724 adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
725 le16_to_cpu(pt->usSmallPowerLimit);
726 adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
727 le16_to_cpu(pt->usLowCACLeakage);
728 adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
729 le16_to_cpu(pt->usHighCACLeakage);
731 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
732 ext_hdr->usSclkVddgfxTableOffset) {
733 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
734 (mode_info->atom_context->bios + data_offset +
735 le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
736 ret = amdgpu_parse_clk_voltage_dep_table(
737 &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
740 kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
749 void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
751 struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
753 kfree(dyn_state->vddc_dependency_on_sclk.entries);
754 kfree(dyn_state->vddci_dependency_on_mclk.entries);
755 kfree(dyn_state->vddc_dependency_on_mclk.entries);
756 kfree(dyn_state->mvdd_dependency_on_mclk.entries);
757 kfree(dyn_state->cac_leakage_table.entries);
758 kfree(dyn_state->phase_shedding_limits_table.entries);
759 kfree(dyn_state->ppm_table);
760 kfree(dyn_state->cac_tdp_table);
761 kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
762 kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
763 kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
764 kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
765 kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
768 static const char *pp_lib_thermal_controller_names[] = {
791 void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
793 struct amdgpu_mode_info *mode_info = &adev->mode_info;
794 ATOM_PPLIB_POWERPLAYTABLE *power_table;
795 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
796 ATOM_PPLIB_THERMALCONTROLLER *controller;
797 struct amdgpu_i2c_bus_rec i2c_bus;
801 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
802 &frev, &crev, &data_offset))
804 power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
805 (mode_info->atom_context->bios + data_offset);
806 controller = &power_table->sThermalController;
808 /* add the i2c bus for thermal/fan chip */
809 if (controller->ucType > 0) {
810 if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
811 adev->pm.no_fan = true;
812 adev->pm.fan_pulses_per_revolution =
813 controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
814 if (adev->pm.fan_pulses_per_revolution) {
815 adev->pm.fan_min_rpm = controller->ucFanMinRPM;
816 adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
818 if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
819 DRM_INFO("Internal thermal controller %s fan control\n",
820 (controller->ucFanParameters &
821 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
822 adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
823 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
824 DRM_INFO("Internal thermal controller %s fan control\n",
825 (controller->ucFanParameters &
826 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
827 adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
828 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
829 DRM_INFO("Internal thermal controller %s fan control\n",
830 (controller->ucFanParameters &
831 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
832 adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
833 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
834 DRM_INFO("Internal thermal controller %s fan control\n",
835 (controller->ucFanParameters &
836 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
837 adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
838 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
839 DRM_INFO("Internal thermal controller %s fan control\n",
840 (controller->ucFanParameters &
841 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
842 adev->pm.int_thermal_type = THERMAL_TYPE_NI;
843 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
844 DRM_INFO("Internal thermal controller %s fan control\n",
845 (controller->ucFanParameters &
846 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
847 adev->pm.int_thermal_type = THERMAL_TYPE_SI;
848 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
849 DRM_INFO("Internal thermal controller %s fan control\n",
850 (controller->ucFanParameters &
851 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
852 adev->pm.int_thermal_type = THERMAL_TYPE_CI;
853 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
854 DRM_INFO("Internal thermal controller %s fan control\n",
855 (controller->ucFanParameters &
856 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
857 adev->pm.int_thermal_type = THERMAL_TYPE_KV;
858 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
859 DRM_INFO("External GPIO thermal controller %s fan control\n",
860 (controller->ucFanParameters &
861 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
862 adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
863 } else if (controller->ucType ==
864 ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
865 DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
866 (controller->ucFanParameters &
867 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
868 adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
869 } else if (controller->ucType ==
870 ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
871 DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
872 (controller->ucFanParameters &
873 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
874 adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
875 } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
876 DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
877 pp_lib_thermal_controller_names[controller->ucType],
878 controller->ucI2cAddress >> 1,
879 (controller->ucFanParameters &
880 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
881 adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
882 i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
883 adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
884 if (adev->pm.i2c_bus) {
885 struct i2c_board_info info = { };
886 const char *name = pp_lib_thermal_controller_names[controller->ucType];
887 info.addr = controller->ucI2cAddress >> 1;
888 strlcpy(info.type, name, sizeof(info.type));
889 i2c_new_device(&adev->pm.i2c_bus->adapter, &info);
892 DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
894 controller->ucI2cAddress >> 1,
895 (controller->ucFanParameters &
896 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
901 enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
903 enum amdgpu_pcie_gen asic_gen,
904 enum amdgpu_pcie_gen default_gen)
907 case AMDGPU_PCIE_GEN1:
908 return AMDGPU_PCIE_GEN1;
909 case AMDGPU_PCIE_GEN2:
910 return AMDGPU_PCIE_GEN2;
911 case AMDGPU_PCIE_GEN3:
912 return AMDGPU_PCIE_GEN3;
914 if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
915 return AMDGPU_PCIE_GEN3;
916 else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
917 return AMDGPU_PCIE_GEN2;
919 return AMDGPU_PCIE_GEN1;
921 return AMDGPU_PCIE_GEN1;
924 u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
928 switch (asic_lanes) {
931 return default_lanes;
947 u8 amdgpu_encode_pci_lane_width(u32 lanes)
949 u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 };
954 return encoded_lanes[lanes];