2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
30 #include "smu/smu_7_0_0_d.h"
31 #include "smu/smu_7_0_0_sh_mask.h"
33 int amdgpu_kv_notify_message_to_smu(struct amdgpu_device *adev, u32 id)
38 WREG32(mmSMC_MESSAGE_0, id & SMC_MESSAGE_0__SMC_MSG_MASK);
40 for (i = 0; i < adev->usec_timeout; i++) {
41 if ((RREG32(mmSMC_RESP_0) & SMC_RESP_0__SMC_RESP_MASK) != 0)
45 tmp = RREG32(mmSMC_RESP_0) & SMC_RESP_0__SMC_RESP_MASK;
57 int amdgpu_kv_dpm_get_enable_mask(struct amdgpu_device *adev, u32 *enable_mask)
61 ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SCLKDPM_GetEnabledMask);
64 *enable_mask = RREG32_SMC(ixSMC_SYSCON_MSG_ARG_0);
69 int amdgpu_kv_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
70 PPSMC_Msg msg, u32 parameter)
73 WREG32(mmSMC_MSG_ARG_0, parameter);
75 return amdgpu_kv_notify_message_to_smu(adev, msg);
78 static int kv_set_smc_sram_address(struct amdgpu_device *adev,
79 u32 smc_address, u32 limit)
83 if ((smc_address + 3) > limit)
86 WREG32(mmSMC_IND_INDEX_0, smc_address);
87 WREG32_P(mmSMC_IND_ACCESS_CNTL, 0,
88 ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
93 int amdgpu_kv_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
94 u32 *value, u32 limit)
98 ret = kv_set_smc_sram_address(adev, smc_address, limit);
102 *value = RREG32(mmSMC_IND_DATA_0);
106 int amdgpu_kv_smc_dpm_enable(struct amdgpu_device *adev, bool enable)
109 return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DPM_Enable);
111 return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DPM_Disable);
114 int amdgpu_kv_smc_bapm_enable(struct amdgpu_device *adev, bool enable)
117 return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_EnableBAPM);
119 return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DisableBAPM);
122 int amdgpu_kv_copy_bytes_to_smc(struct amdgpu_device *adev,
123 u32 smc_start_address,
124 const u8 *src, u32 byte_count, u32 limit)
127 u32 data, original_data, addr, extra_shift, t_byte, count, mask;
129 if ((smc_start_address + byte_count) > limit)
132 addr = smc_start_address;
135 /* RMW for the initial bytes */
139 ret = kv_set_smc_sram_address(adev, addr, limit);
143 original_data = RREG32(mmSMC_IND_DATA_0);
150 mask = (mask << 8) | 0xff;
152 } else if (byte_count > 0) {
153 data = (data << 8) + *src++;
158 mask = (mask << 8) | 0xff;
163 data |= original_data & mask;
165 ret = kv_set_smc_sram_address(adev, addr, limit);
169 WREG32(mmSMC_IND_DATA_0, data);
174 while (byte_count >= 4) {
175 /* SMC address space is BE */
176 data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
178 ret = kv_set_smc_sram_address(adev, addr, limit);
182 WREG32(mmSMC_IND_DATA_0, data);
189 /* RMW for the final bytes */
190 if (byte_count > 0) {
193 ret = kv_set_smc_sram_address(adev, addr, limit);
197 original_data = RREG32(mmSMC_IND_DATA_0);
199 extra_shift = 8 * (4 - byte_count);
201 while (byte_count > 0) {
202 /* SMC address space is BE */
203 data = (data << 8) + *src++;
207 data <<= extra_shift;
209 data |= (original_data & ~((~0UL) << extra_shift));
211 ret = kv_set_smc_sram_address(adev, addr, limit);
215 WREG32(mmSMC_IND_DATA_0, data);