This source file includes following definitions.
- smu8_get_argument
- smu8_send_msg_to_smc_with_parameter
- smu8_send_msg_to_smc
- smu8_set_smc_sram_address
- smu8_write_smc_sram_dword
- smu8_check_fw_load_finish
- smu8_load_mec_firmware
- smu8_translate_firmware_enum_to_arg
- smu8_convert_fw_type_to_cgs
- smu8_smu_populate_single_scratch_task
- smu8_smu_populate_single_ucode_load_task
- smu8_smu_construct_toc_for_rlc_aram_save
- smu8_smu_initialize_toc_empty_job_list
- smu8_smu_construct_toc_for_vddgfx_enter
- smu8_smu_construct_toc_for_vddgfx_exit
- smu8_smu_construct_toc_for_power_profiling
- smu8_smu_construct_toc_for_bootup
- smu8_smu_construct_toc_for_clock_table
- smu8_smu_construct_toc
- smu8_smu_populate_firmware_entries
- smu8_smu_populate_single_scratch_entry
- smu8_download_pptable_settings
- smu8_upload_pptable_settings
- smu8_request_smu_load_fw
- smu8_start_smu
- smu8_smu_init
- smu8_smu_fini
- smu8_dpm_check_smu_features
- smu8_is_dpm_running
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24 #include <linux/delay.h>
25 #include <linux/gfp.h>
26 #include <linux/kernel.h>
27 #include <linux/ktime.h>
28 #include <linux/slab.h>
29 #include <linux/types.h>
30
31 #include "cgs_common.h"
32 #include "smu/smu_8_0_d.h"
33 #include "smu/smu_8_0_sh_mask.h"
34 #include "smu8.h"
35 #include "smu8_fusion.h"
36 #include "smu8_smumgr.h"
37 #include "cz_ppsmc.h"
38 #include "smu_ucode_xfer_cz.h"
39 #include "gca/gfx_8_0_d.h"
40 #include "gca/gfx_8_0_sh_mask.h"
41 #include "smumgr.h"
42
43 #define SIZE_ALIGN_32(x) (((x) + 31) / 32 * 32)
44
45 static const enum smu8_scratch_entry firmware_list[] = {
46 SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA0,
47 SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA1,
48 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE,
49 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
50 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME,
51 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
52 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
53 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G,
54 };
55
56 static uint32_t smu8_get_argument(struct pp_hwmgr *hwmgr)
57 {
58 if (hwmgr == NULL || hwmgr->device == NULL)
59 return 0;
60
61 return cgs_read_register(hwmgr->device,
62 mmSMU_MP1_SRBM2P_ARG_0);
63 }
64
65
66 static int smu8_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
67 uint16_t msg, uint32_t parameter)
68 {
69 int result = 0;
70 ktime_t t_start;
71 s64 elapsed_us;
72
73 if (hwmgr == NULL || hwmgr->device == NULL)
74 return -EINVAL;
75
76 result = PHM_WAIT_FIELD_UNEQUAL(hwmgr,
77 SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
78 if (result != 0) {
79
80 uint32_t val = cgs_read_register(hwmgr->device,
81 mmSMU_MP1_SRBM2P_MSG_0);
82 pr_err("%s(0x%04x) aborted; SMU still servicing msg (0x%04x)\n",
83 __func__, msg, val);
84 return result;
85 }
86 t_start = ktime_get();
87
88 cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter);
89
90 cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0);
91 cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg);
92
93 result = PHM_WAIT_FIELD_UNEQUAL(hwmgr,
94 SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
95
96 elapsed_us = ktime_us_delta(ktime_get(), t_start);
97
98 WARN(result, "%s(0x%04x, %#x) timed out after %lld us\n",
99 __func__, msg, parameter, elapsed_us);
100
101 return result;
102 }
103
104 static int smu8_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
105 {
106 return smu8_send_msg_to_smc_with_parameter(hwmgr, msg, 0);
107 }
108
109 static int smu8_set_smc_sram_address(struct pp_hwmgr *hwmgr,
110 uint32_t smc_address, uint32_t limit)
111 {
112 if (hwmgr == NULL || hwmgr->device == NULL)
113 return -EINVAL;
114
115 if (0 != (3 & smc_address)) {
116 pr_err("SMC address must be 4 byte aligned\n");
117 return -EINVAL;
118 }
119
120 if (limit <= (smc_address + 3)) {
121 pr_err("SMC address beyond the SMC RAM area\n");
122 return -EINVAL;
123 }
124
125 cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX_0,
126 SMN_MP1_SRAM_START_ADDR + smc_address);
127
128 return 0;
129 }
130
131 static int smu8_write_smc_sram_dword(struct pp_hwmgr *hwmgr,
132 uint32_t smc_address, uint32_t value, uint32_t limit)
133 {
134 int result;
135
136 if (hwmgr == NULL || hwmgr->device == NULL)
137 return -EINVAL;
138
139 result = smu8_set_smc_sram_address(hwmgr, smc_address, limit);
140 if (!result)
141 cgs_write_register(hwmgr->device, mmMP0PUB_IND_DATA_0, value);
142
143 return result;
144 }
145
146 static int smu8_check_fw_load_finish(struct pp_hwmgr *hwmgr,
147 uint32_t firmware)
148 {
149 int i;
150 uint32_t index = SMN_MP1_SRAM_START_ADDR +
151 SMU8_FIRMWARE_HEADER_LOCATION +
152 offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
153
154 if (hwmgr == NULL || hwmgr->device == NULL)
155 return -EINVAL;
156
157 cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
158
159 for (i = 0; i < hwmgr->usec_timeout; i++) {
160 if (firmware ==
161 (cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA) & firmware))
162 break;
163 udelay(1);
164 }
165
166 if (i >= hwmgr->usec_timeout) {
167 pr_err("SMU check loaded firmware failed.\n");
168 return -EINVAL;
169 }
170
171 return 0;
172 }
173
174 static int smu8_load_mec_firmware(struct pp_hwmgr *hwmgr)
175 {
176 uint32_t reg_data;
177 uint32_t tmp;
178 int ret = 0;
179 struct cgs_firmware_info info = {0};
180 struct smu8_smumgr *smu8_smu;
181
182 if (hwmgr == NULL || hwmgr->device == NULL)
183 return -EINVAL;
184
185 smu8_smu = hwmgr->smu_backend;
186 ret = cgs_get_firmware_info(hwmgr->device,
187 CGS_UCODE_ID_CP_MEC, &info);
188
189 if (ret)
190 return -EINVAL;
191
192
193 tmp = cgs_read_register(hwmgr->device,
194 mmCP_MEC_CNTL);
195 tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
196 tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
197 cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, tmp);
198
199 tmp = cgs_read_register(hwmgr->device,
200 mmCP_CPC_IC_BASE_CNTL);
201
202 tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
203 tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
204 tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
205 tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
206 cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_CNTL, tmp);
207
208 reg_data = lower_32_bits(info.mc_addr) &
209 PHM_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
210 cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_LO, reg_data);
211
212 reg_data = upper_32_bits(info.mc_addr) &
213 PHM_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
214 cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_HI, reg_data);
215
216 return 0;
217 }
218
219 static uint8_t smu8_translate_firmware_enum_to_arg(struct pp_hwmgr *hwmgr,
220 enum smu8_scratch_entry firmware_enum)
221 {
222 uint8_t ret = 0;
223
224 switch (firmware_enum) {
225 case SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA0:
226 ret = UCODE_ID_SDMA0;
227 break;
228 case SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA1:
229 if (hwmgr->chip_id == CHIP_STONEY)
230 ret = UCODE_ID_SDMA0;
231 else
232 ret = UCODE_ID_SDMA1;
233 break;
234 case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE:
235 ret = UCODE_ID_CP_CE;
236 break;
237 case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP:
238 ret = UCODE_ID_CP_PFP;
239 break;
240 case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME:
241 ret = UCODE_ID_CP_ME;
242 break;
243 case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1:
244 ret = UCODE_ID_CP_MEC_JT1;
245 break;
246 case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
247 if (hwmgr->chip_id == CHIP_STONEY)
248 ret = UCODE_ID_CP_MEC_JT1;
249 else
250 ret = UCODE_ID_CP_MEC_JT2;
251 break;
252 case SMU8_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG:
253 ret = UCODE_ID_GMCON_RENG;
254 break;
255 case SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G:
256 ret = UCODE_ID_RLC_G;
257 break;
258 case SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH:
259 ret = UCODE_ID_RLC_SCRATCH;
260 break;
261 case SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM:
262 ret = UCODE_ID_RLC_SRM_ARAM;
263 break;
264 case SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM:
265 ret = UCODE_ID_RLC_SRM_DRAM;
266 break;
267 case SMU8_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM:
268 ret = UCODE_ID_DMCU_ERAM;
269 break;
270 case SMU8_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM:
271 ret = UCODE_ID_DMCU_IRAM;
272 break;
273 case SMU8_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING:
274 ret = TASK_ARG_INIT_MM_PWR_LOG;
275 break;
276 case SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_HALT:
277 case SMU8_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING:
278 case SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS:
279 case SMU8_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT:
280 case SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_START:
281 case SMU8_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS:
282 ret = TASK_ARG_REG_MMIO;
283 break;
284 case SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE:
285 ret = TASK_ARG_INIT_CLK_TABLE;
286 break;
287 }
288
289 return ret;
290 }
291
292 static enum cgs_ucode_id smu8_convert_fw_type_to_cgs(uint32_t fw_type)
293 {
294 enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
295
296 switch (fw_type) {
297 case UCODE_ID_SDMA0:
298 result = CGS_UCODE_ID_SDMA0;
299 break;
300 case UCODE_ID_SDMA1:
301 result = CGS_UCODE_ID_SDMA1;
302 break;
303 case UCODE_ID_CP_CE:
304 result = CGS_UCODE_ID_CP_CE;
305 break;
306 case UCODE_ID_CP_PFP:
307 result = CGS_UCODE_ID_CP_PFP;
308 break;
309 case UCODE_ID_CP_ME:
310 result = CGS_UCODE_ID_CP_ME;
311 break;
312 case UCODE_ID_CP_MEC_JT1:
313 result = CGS_UCODE_ID_CP_MEC_JT1;
314 break;
315 case UCODE_ID_CP_MEC_JT2:
316 result = CGS_UCODE_ID_CP_MEC_JT2;
317 break;
318 case UCODE_ID_RLC_G:
319 result = CGS_UCODE_ID_RLC_G;
320 break;
321 default:
322 break;
323 }
324
325 return result;
326 }
327
328 static int smu8_smu_populate_single_scratch_task(
329 struct pp_hwmgr *hwmgr,
330 enum smu8_scratch_entry fw_enum,
331 uint8_t type, bool is_last)
332 {
333 uint8_t i;
334 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
335 struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
336 struct SMU_Task *task = &toc->tasks[smu8_smu->toc_entry_used_count++];
337
338 task->type = type;
339 task->arg = smu8_translate_firmware_enum_to_arg(hwmgr, fw_enum);
340 task->next = is_last ? END_OF_TASK_LIST : smu8_smu->toc_entry_used_count;
341
342 for (i = 0; i < smu8_smu->scratch_buffer_length; i++)
343 if (smu8_smu->scratch_buffer[i].firmware_ID == fw_enum)
344 break;
345
346 if (i >= smu8_smu->scratch_buffer_length) {
347 pr_err("Invalid Firmware Type\n");
348 return -EINVAL;
349 }
350
351 task->addr.low = lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr);
352 task->addr.high = upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr);
353 task->size_bytes = smu8_smu->scratch_buffer[i].data_size;
354
355 if (SMU8_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == fw_enum) {
356 struct smu8_ih_meta_data *pIHReg_restore =
357 (struct smu8_ih_meta_data *)smu8_smu->scratch_buffer[i].kaddr;
358 pIHReg_restore->command =
359 METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD;
360 }
361
362 return 0;
363 }
364
365 static int smu8_smu_populate_single_ucode_load_task(
366 struct pp_hwmgr *hwmgr,
367 enum smu8_scratch_entry fw_enum,
368 bool is_last)
369 {
370 uint8_t i;
371 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
372 struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
373 struct SMU_Task *task = &toc->tasks[smu8_smu->toc_entry_used_count++];
374
375 task->type = TASK_TYPE_UCODE_LOAD;
376 task->arg = smu8_translate_firmware_enum_to_arg(hwmgr, fw_enum);
377 task->next = is_last ? END_OF_TASK_LIST : smu8_smu->toc_entry_used_count;
378
379 for (i = 0; i < smu8_smu->driver_buffer_length; i++)
380 if (smu8_smu->driver_buffer[i].firmware_ID == fw_enum)
381 break;
382
383 if (i >= smu8_smu->driver_buffer_length) {
384 pr_err("Invalid Firmware Type\n");
385 return -EINVAL;
386 }
387
388 task->addr.low = lower_32_bits(smu8_smu->driver_buffer[i].mc_addr);
389 task->addr.high = upper_32_bits(smu8_smu->driver_buffer[i].mc_addr);
390 task->size_bytes = smu8_smu->driver_buffer[i].data_size;
391
392 return 0;
393 }
394
395 static int smu8_smu_construct_toc_for_rlc_aram_save(struct pp_hwmgr *hwmgr)
396 {
397 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
398
399 smu8_smu->toc_entry_aram = smu8_smu->toc_entry_used_count;
400 smu8_smu_populate_single_scratch_task(hwmgr,
401 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
402 TASK_TYPE_UCODE_SAVE, true);
403
404 return 0;
405 }
406
407 static int smu8_smu_initialize_toc_empty_job_list(struct pp_hwmgr *hwmgr)
408 {
409 int i;
410 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
411 struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
412
413 for (i = 0; i < NUM_JOBLIST_ENTRIES; i++)
414 toc->JobList[i] = (uint8_t)IGNORE_JOB;
415
416 return 0;
417 }
418
419 static int smu8_smu_construct_toc_for_vddgfx_enter(struct pp_hwmgr *hwmgr)
420 {
421 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
422 struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
423
424 toc->JobList[JOB_GFX_SAVE] = (uint8_t)smu8_smu->toc_entry_used_count;
425 smu8_smu_populate_single_scratch_task(hwmgr,
426 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
427 TASK_TYPE_UCODE_SAVE, false);
428
429 smu8_smu_populate_single_scratch_task(hwmgr,
430 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
431 TASK_TYPE_UCODE_SAVE, true);
432
433 return 0;
434 }
435
436
437 static int smu8_smu_construct_toc_for_vddgfx_exit(struct pp_hwmgr *hwmgr)
438 {
439 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
440 struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
441
442 toc->JobList[JOB_GFX_RESTORE] = (uint8_t)smu8_smu->toc_entry_used_count;
443
444 smu8_smu_populate_single_ucode_load_task(hwmgr,
445 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
446 smu8_smu_populate_single_ucode_load_task(hwmgr,
447 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
448 smu8_smu_populate_single_ucode_load_task(hwmgr,
449 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
450 smu8_smu_populate_single_ucode_load_task(hwmgr,
451 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
452
453 if (hwmgr->chip_id == CHIP_STONEY)
454 smu8_smu_populate_single_ucode_load_task(hwmgr,
455 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
456 else
457 smu8_smu_populate_single_ucode_load_task(hwmgr,
458 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
459
460 smu8_smu_populate_single_ucode_load_task(hwmgr,
461 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
462
463
464 smu8_smu_populate_single_scratch_task(hwmgr,
465 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
466 TASK_TYPE_UCODE_LOAD, false);
467
468 smu8_smu_populate_single_scratch_task(hwmgr,
469 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
470 TASK_TYPE_UCODE_LOAD, false);
471
472 smu8_smu_populate_single_scratch_task(hwmgr,
473 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
474 TASK_TYPE_UCODE_LOAD, true);
475
476 return 0;
477 }
478
479 static int smu8_smu_construct_toc_for_power_profiling(struct pp_hwmgr *hwmgr)
480 {
481 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
482
483 smu8_smu->toc_entry_power_profiling_index = smu8_smu->toc_entry_used_count;
484
485 smu8_smu_populate_single_scratch_task(hwmgr,
486 SMU8_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
487 TASK_TYPE_INITIALIZE, true);
488 return 0;
489 }
490
491 static int smu8_smu_construct_toc_for_bootup(struct pp_hwmgr *hwmgr)
492 {
493 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
494
495 smu8_smu->toc_entry_initialize_index = smu8_smu->toc_entry_used_count;
496
497 smu8_smu_populate_single_ucode_load_task(hwmgr,
498 SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
499 if (hwmgr->chip_id != CHIP_STONEY)
500 smu8_smu_populate_single_ucode_load_task(hwmgr,
501 SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
502 smu8_smu_populate_single_ucode_load_task(hwmgr,
503 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
504 smu8_smu_populate_single_ucode_load_task(hwmgr,
505 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
506 smu8_smu_populate_single_ucode_load_task(hwmgr,
507 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
508 smu8_smu_populate_single_ucode_load_task(hwmgr,
509 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
510 if (hwmgr->chip_id != CHIP_STONEY)
511 smu8_smu_populate_single_ucode_load_task(hwmgr,
512 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
513 smu8_smu_populate_single_ucode_load_task(hwmgr,
514 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
515
516 return 0;
517 }
518
519 static int smu8_smu_construct_toc_for_clock_table(struct pp_hwmgr *hwmgr)
520 {
521 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
522
523 smu8_smu->toc_entry_clock_table = smu8_smu->toc_entry_used_count;
524
525 smu8_smu_populate_single_scratch_task(hwmgr,
526 SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
527 TASK_TYPE_INITIALIZE, true);
528
529 return 0;
530 }
531
532 static int smu8_smu_construct_toc(struct pp_hwmgr *hwmgr)
533 {
534 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
535
536 smu8_smu->toc_entry_used_count = 0;
537 smu8_smu_initialize_toc_empty_job_list(hwmgr);
538 smu8_smu_construct_toc_for_rlc_aram_save(hwmgr);
539 smu8_smu_construct_toc_for_vddgfx_enter(hwmgr);
540 smu8_smu_construct_toc_for_vddgfx_exit(hwmgr);
541 smu8_smu_construct_toc_for_power_profiling(hwmgr);
542 smu8_smu_construct_toc_for_bootup(hwmgr);
543 smu8_smu_construct_toc_for_clock_table(hwmgr);
544
545 return 0;
546 }
547
548 static int smu8_smu_populate_firmware_entries(struct pp_hwmgr *hwmgr)
549 {
550 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
551 uint32_t firmware_type;
552 uint32_t i;
553 int ret;
554 enum cgs_ucode_id ucode_id;
555 struct cgs_firmware_info info = {0};
556
557 smu8_smu->driver_buffer_length = 0;
558
559 for (i = 0; i < ARRAY_SIZE(firmware_list); i++) {
560
561 firmware_type = smu8_translate_firmware_enum_to_arg(hwmgr,
562 firmware_list[i]);
563
564 ucode_id = smu8_convert_fw_type_to_cgs(firmware_type);
565
566 ret = cgs_get_firmware_info(hwmgr->device,
567 ucode_id, &info);
568
569 if (ret == 0) {
570 smu8_smu->driver_buffer[i].mc_addr = info.mc_addr;
571
572 smu8_smu->driver_buffer[i].data_size = info.image_size;
573
574 smu8_smu->driver_buffer[i].firmware_ID = firmware_list[i];
575 smu8_smu->driver_buffer_length++;
576 }
577 }
578
579 return 0;
580 }
581
582 static int smu8_smu_populate_single_scratch_entry(
583 struct pp_hwmgr *hwmgr,
584 enum smu8_scratch_entry scratch_type,
585 uint32_t ulsize_byte,
586 struct smu8_buffer_entry *entry)
587 {
588 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
589 uint32_t ulsize_aligned = SIZE_ALIGN_32(ulsize_byte);
590
591 entry->data_size = ulsize_byte;
592 entry->kaddr = (char *) smu8_smu->smu_buffer.kaddr +
593 smu8_smu->smu_buffer_used_bytes;
594 entry->mc_addr = smu8_smu->smu_buffer.mc_addr + smu8_smu->smu_buffer_used_bytes;
595 entry->firmware_ID = scratch_type;
596
597 smu8_smu->smu_buffer_used_bytes += ulsize_aligned;
598
599 return 0;
600 }
601
602 static int smu8_download_pptable_settings(struct pp_hwmgr *hwmgr, void **table)
603 {
604 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
605 unsigned long i;
606
607 for (i = 0; i < smu8_smu->scratch_buffer_length; i++) {
608 if (smu8_smu->scratch_buffer[i].firmware_ID
609 == SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
610 break;
611 }
612
613 *table = (struct SMU8_Fusion_ClkTable *)smu8_smu->scratch_buffer[i].kaddr;
614
615 smu8_send_msg_to_smc_with_parameter(hwmgr,
616 PPSMC_MSG_SetClkTableAddrHi,
617 upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
618
619 smu8_send_msg_to_smc_with_parameter(hwmgr,
620 PPSMC_MSG_SetClkTableAddrLo,
621 lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
622
623 smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
624 smu8_smu->toc_entry_clock_table);
625
626 smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToDram);
627
628 return 0;
629 }
630
631 static int smu8_upload_pptable_settings(struct pp_hwmgr *hwmgr)
632 {
633 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
634 unsigned long i;
635
636 for (i = 0; i < smu8_smu->scratch_buffer_length; i++) {
637 if (smu8_smu->scratch_buffer[i].firmware_ID
638 == SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
639 break;
640 }
641
642 smu8_send_msg_to_smc_with_parameter(hwmgr,
643 PPSMC_MSG_SetClkTableAddrHi,
644 upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
645
646 smu8_send_msg_to_smc_with_parameter(hwmgr,
647 PPSMC_MSG_SetClkTableAddrLo,
648 lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
649
650 smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
651 smu8_smu->toc_entry_clock_table);
652
653 smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToSmu);
654
655 return 0;
656 }
657
658 static int smu8_request_smu_load_fw(struct pp_hwmgr *hwmgr)
659 {
660 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
661 uint32_t smc_address;
662 uint32_t fw_to_check = 0;
663 int ret;
664
665 amdgpu_ucode_init_bo(hwmgr->adev);
666
667 smu8_smu_populate_firmware_entries(hwmgr);
668
669 smu8_smu_construct_toc(hwmgr);
670
671 smc_address = SMU8_FIRMWARE_HEADER_LOCATION +
672 offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
673
674 smu8_write_smc_sram_dword(hwmgr, smc_address, 0, smc_address+4);
675
676 smu8_send_msg_to_smc_with_parameter(hwmgr,
677 PPSMC_MSG_DriverDramAddrHi,
678 upper_32_bits(smu8_smu->toc_buffer.mc_addr));
679
680 smu8_send_msg_to_smc_with_parameter(hwmgr,
681 PPSMC_MSG_DriverDramAddrLo,
682 lower_32_bits(smu8_smu->toc_buffer.mc_addr));
683
684 smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs);
685
686 smu8_send_msg_to_smc_with_parameter(hwmgr,
687 PPSMC_MSG_ExecuteJob,
688 smu8_smu->toc_entry_aram);
689 smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
690 smu8_smu->toc_entry_power_profiling_index);
691
692 smu8_send_msg_to_smc_with_parameter(hwmgr,
693 PPSMC_MSG_ExecuteJob,
694 smu8_smu->toc_entry_initialize_index);
695
696 fw_to_check = UCODE_ID_RLC_G_MASK |
697 UCODE_ID_SDMA0_MASK |
698 UCODE_ID_SDMA1_MASK |
699 UCODE_ID_CP_CE_MASK |
700 UCODE_ID_CP_ME_MASK |
701 UCODE_ID_CP_PFP_MASK |
702 UCODE_ID_CP_MEC_JT1_MASK |
703 UCODE_ID_CP_MEC_JT2_MASK;
704
705 if (hwmgr->chip_id == CHIP_STONEY)
706 fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
707
708 ret = smu8_check_fw_load_finish(hwmgr, fw_to_check);
709 if (ret) {
710 pr_err("SMU firmware load failed\n");
711 return ret;
712 }
713
714 ret = smu8_load_mec_firmware(hwmgr);
715 if (ret) {
716 pr_err("Mec Firmware load failed\n");
717 return ret;
718 }
719
720 return 0;
721 }
722
723 static int smu8_start_smu(struct pp_hwmgr *hwmgr)
724 {
725 struct amdgpu_device *adev;
726
727 uint32_t index = SMN_MP1_SRAM_START_ADDR +
728 SMU8_FIRMWARE_HEADER_LOCATION +
729 offsetof(struct SMU8_Firmware_Header, Version);
730
731 if (hwmgr == NULL || hwmgr->device == NULL)
732 return -EINVAL;
733
734 adev = hwmgr->adev;
735
736 cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
737 hwmgr->smu_version = cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA);
738 pr_info("smu version %02d.%02d.%02d\n",
739 ((hwmgr->smu_version >> 16) & 0xFF),
740 ((hwmgr->smu_version >> 8) & 0xFF),
741 (hwmgr->smu_version & 0xFF));
742 adev->pm.fw_version = hwmgr->smu_version >> 8;
743
744 return smu8_request_smu_load_fw(hwmgr);
745 }
746
747 static int smu8_smu_init(struct pp_hwmgr *hwmgr)
748 {
749 int ret = 0;
750 struct smu8_smumgr *smu8_smu;
751
752 smu8_smu = kzalloc(sizeof(struct smu8_smumgr), GFP_KERNEL);
753 if (smu8_smu == NULL)
754 return -ENOMEM;
755
756 hwmgr->smu_backend = smu8_smu;
757
758 smu8_smu->toc_buffer.data_size = 4096;
759 smu8_smu->smu_buffer.data_size =
760 ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) +
761 ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) +
762 ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) +
763 ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) +
764 ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32);
765
766 ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
767 smu8_smu->toc_buffer.data_size,
768 PAGE_SIZE,
769 AMDGPU_GEM_DOMAIN_VRAM,
770 &smu8_smu->toc_buffer.handle,
771 &smu8_smu->toc_buffer.mc_addr,
772 &smu8_smu->toc_buffer.kaddr);
773 if (ret)
774 goto err2;
775
776 ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
777 smu8_smu->smu_buffer.data_size,
778 PAGE_SIZE,
779 AMDGPU_GEM_DOMAIN_VRAM,
780 &smu8_smu->smu_buffer.handle,
781 &smu8_smu->smu_buffer.mc_addr,
782 &smu8_smu->smu_buffer.kaddr);
783 if (ret)
784 goto err1;
785
786 if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
787 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
788 UCODE_ID_RLC_SCRATCH_SIZE_BYTE,
789 &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
790 pr_err("Error when Populate Firmware Entry.\n");
791 goto err0;
792 }
793
794 if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
795 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
796 UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE,
797 &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
798 pr_err("Error when Populate Firmware Entry.\n");
799 goto err0;
800 }
801 if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
802 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
803 UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE,
804 &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
805 pr_err("Error when Populate Firmware Entry.\n");
806 goto err0;
807 }
808
809 if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
810 SMU8_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
811 sizeof(struct SMU8_MultimediaPowerLogData),
812 &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
813 pr_err("Error when Populate Firmware Entry.\n");
814 goto err0;
815 }
816
817 if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
818 SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
819 sizeof(struct SMU8_Fusion_ClkTable),
820 &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
821 pr_err("Error when Populate Firmware Entry.\n");
822 goto err0;
823 }
824
825 return 0;
826
827 err0:
828 amdgpu_bo_free_kernel(&smu8_smu->smu_buffer.handle,
829 &smu8_smu->smu_buffer.mc_addr,
830 &smu8_smu->smu_buffer.kaddr);
831 err1:
832 amdgpu_bo_free_kernel(&smu8_smu->toc_buffer.handle,
833 &smu8_smu->toc_buffer.mc_addr,
834 &smu8_smu->toc_buffer.kaddr);
835 err2:
836 kfree(smu8_smu);
837 return -EINVAL;
838 }
839
840 static int smu8_smu_fini(struct pp_hwmgr *hwmgr)
841 {
842 struct smu8_smumgr *smu8_smu;
843
844 if (hwmgr == NULL || hwmgr->device == NULL)
845 return -EINVAL;
846
847 smu8_smu = hwmgr->smu_backend;
848 if (smu8_smu) {
849 amdgpu_bo_free_kernel(&smu8_smu->toc_buffer.handle,
850 &smu8_smu->toc_buffer.mc_addr,
851 &smu8_smu->toc_buffer.kaddr);
852 amdgpu_bo_free_kernel(&smu8_smu->smu_buffer.handle,
853 &smu8_smu->smu_buffer.mc_addr,
854 &smu8_smu->smu_buffer.kaddr);
855 kfree(smu8_smu);
856 }
857
858 return 0;
859 }
860
861 static bool smu8_dpm_check_smu_features(struct pp_hwmgr *hwmgr,
862 unsigned long check_feature)
863 {
864 int result;
865 unsigned long features;
866
867 result = smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetFeatureStatus, 0);
868 if (result == 0) {
869 features = smum_get_argument(hwmgr);
870 if (features & check_feature)
871 return true;
872 }
873
874 return false;
875 }
876
877 static bool smu8_is_dpm_running(struct pp_hwmgr *hwmgr)
878 {
879 if (smu8_dpm_check_smu_features(hwmgr, SMU_EnabledFeatureScoreboard_SclkDpmOn))
880 return true;
881 return false;
882 }
883
884 const struct pp_smumgr_func smu8_smu_funcs = {
885 .name = "smu8_smu",
886 .smu_init = smu8_smu_init,
887 .smu_fini = smu8_smu_fini,
888 .start_smu = smu8_start_smu,
889 .check_fw_load_finish = smu8_check_fw_load_finish,
890 .request_smu_load_fw = NULL,
891 .request_smu_load_specific_fw = NULL,
892 .get_argument = smu8_get_argument,
893 .send_msg_to_smc = smu8_send_msg_to_smc,
894 .send_msg_to_smc_with_parameter = smu8_send_msg_to_smc_with_parameter,
895 .download_pptable_settings = smu8_download_pptable_settings,
896 .upload_pptable_settings = smu8_upload_pptable_settings,
897 .is_dpm_running = smu8_is_dpm_running,
898 };
899