1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include "drmP.h"
26 #include "amdgpu.h"
27 #include "fiji_ppsmc.h"
28 #include "fiji_smumgr.h"
29 #include "smu_ucode_xfer_vi.h"
30 #include "amdgpu_ucode.h"
31 
32 #include "smu/smu_7_1_3_d.h"
33 #include "smu/smu_7_1_3_sh_mask.h"
34 
35 #define FIJI_SMC_SIZE 0x20000
36 
fiji_set_smc_sram_address(struct amdgpu_device * adev,uint32_t smc_address,uint32_t limit)37 static int fiji_set_smc_sram_address(struct amdgpu_device *adev, uint32_t smc_address, uint32_t limit)
38 {
39 	uint32_t val;
40 
41 	if (smc_address & 3)
42 		return -EINVAL;
43 
44 	if ((smc_address + 3) > limit)
45 		return -EINVAL;
46 
47 	WREG32(mmSMC_IND_INDEX_0, smc_address);
48 
49 	val = RREG32(mmSMC_IND_ACCESS_CNTL);
50 	val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
51 	WREG32(mmSMC_IND_ACCESS_CNTL, val);
52 
53 	return 0;
54 }
55 
fiji_copy_bytes_to_smc(struct amdgpu_device * adev,uint32_t smc_start_address,const uint8_t * src,uint32_t byte_count,uint32_t limit)56 static int fiji_copy_bytes_to_smc(struct amdgpu_device *adev, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit)
57 {
58 	uint32_t addr;
59 	uint32_t data, orig_data;
60 	int result = 0;
61 	uint32_t extra_shift;
62 	unsigned long flags;
63 
64 	if (smc_start_address & 3)
65 		return -EINVAL;
66 
67 	if ((smc_start_address + byte_count) > limit)
68 		return -EINVAL;
69 
70 	addr = smc_start_address;
71 
72 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
73 	while (byte_count >= 4) {
74 		/* Bytes are written into the SMC addres space with the MSB first */
75 		data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
76 
77 		result = fiji_set_smc_sram_address(adev, addr, limit);
78 
79 		if (result)
80 			goto out;
81 
82 		WREG32(mmSMC_IND_DATA_0, data);
83 
84 		src += 4;
85 		byte_count -= 4;
86 		addr += 4;
87 	}
88 
89 	if (0 != byte_count) {
90 		/* Now write odd bytes left, do a read modify write cycle */
91 		data = 0;
92 
93 		result = fiji_set_smc_sram_address(adev, addr, limit);
94 		if (result)
95 			goto out;
96 
97 		orig_data = RREG32(mmSMC_IND_DATA_0);
98 		extra_shift = 8 * (4 - byte_count);
99 
100 		while (byte_count > 0) {
101 			data = (data << 8) + *src++;
102 			byte_count--;
103 		}
104 
105 		data <<= extra_shift;
106 		data |= (orig_data & ~((~0UL) << extra_shift));
107 
108 		result = fiji_set_smc_sram_address(adev, addr, limit);
109 		if (result)
110 			goto out;
111 
112 		WREG32(mmSMC_IND_DATA_0, data);
113 	}
114 
115 out:
116 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
117 	return result;
118 }
119 
fiji_program_jump_on_start(struct amdgpu_device * adev)120 static int fiji_program_jump_on_start(struct amdgpu_device *adev)
121 {
122 	static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40};
123 	fiji_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
124 
125 	return 0;
126 }
127 
fiji_is_smc_ram_running(struct amdgpu_device * adev)128 static bool fiji_is_smc_ram_running(struct amdgpu_device *adev)
129 {
130 	uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
131 	val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable);
132 
133 	return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C)));
134 }
135 
wait_smu_response(struct amdgpu_device * adev)136 static int wait_smu_response(struct amdgpu_device *adev)
137 {
138 	int i;
139 	uint32_t val;
140 
141 	for (i = 0; i < adev->usec_timeout; i++) {
142 		val = RREG32(mmSMC_RESP_0);
143 		if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP))
144 			break;
145 		udelay(1);
146 	}
147 
148 	if (i == adev->usec_timeout)
149 		return -EINVAL;
150 
151 	return 0;
152 }
153 
fiji_send_msg_to_smc_offset(struct amdgpu_device * adev)154 static int fiji_send_msg_to_smc_offset(struct amdgpu_device *adev)
155 {
156 	if (wait_smu_response(adev)) {
157 		DRM_ERROR("Failed to send previous message\n");
158 		return -EINVAL;
159 	}
160 
161 	WREG32(mmSMC_MSG_ARG_0, 0x20000);
162 	WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_Test);
163 
164 	if (wait_smu_response(adev)) {
165 		DRM_ERROR("Failed to send message\n");
166 		return -EINVAL;
167 	}
168 
169 	return 0;
170 }
171 
fiji_send_msg_to_smc(struct amdgpu_device * adev,PPSMC_Msg msg)172 static int fiji_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
173 {
174 	if (!fiji_is_smc_ram_running(adev))
175 	{
176 		return -EINVAL;;
177 	}
178 
179 	if (wait_smu_response(adev)) {
180 		DRM_ERROR("Failed to send previous message\n");
181 		return -EINVAL;
182 	}
183 
184 	WREG32(mmSMC_MESSAGE_0, msg);
185 
186 	if (wait_smu_response(adev)) {
187 		DRM_ERROR("Failed to send message\n");
188 		return -EINVAL;
189 	}
190 
191 	return 0;
192 }
193 
fiji_send_msg_to_smc_without_waiting(struct amdgpu_device * adev,PPSMC_Msg msg)194 static int fiji_send_msg_to_smc_without_waiting(struct amdgpu_device *adev,
195 						PPSMC_Msg msg)
196 {
197 	if (wait_smu_response(adev)) {
198 		DRM_ERROR("Failed to send previous message\n");
199 		return -EINVAL;
200 	}
201 
202 	WREG32(mmSMC_MESSAGE_0, msg);
203 
204 	return 0;
205 }
206 
fiji_send_msg_to_smc_with_parameter(struct amdgpu_device * adev,PPSMC_Msg msg,uint32_t parameter)207 static int fiji_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
208 						PPSMC_Msg msg,
209 						uint32_t parameter)
210 {
211 	if (!fiji_is_smc_ram_running(adev))
212 		return -EINVAL;
213 
214 	if (wait_smu_response(adev)) {
215 		DRM_ERROR("Failed to send previous message\n");
216 		return -EINVAL;
217 	}
218 
219 	WREG32(mmSMC_MSG_ARG_0, parameter);
220 
221 	return fiji_send_msg_to_smc(adev, msg);
222 }
223 
fiji_send_msg_to_smc_with_parameter_without_waiting(struct amdgpu_device * adev,PPSMC_Msg msg,uint32_t parameter)224 static int fiji_send_msg_to_smc_with_parameter_without_waiting(
225 					struct amdgpu_device *adev,
226 					PPSMC_Msg msg, uint32_t parameter)
227 {
228 	if (wait_smu_response(adev)) {
229 		DRM_ERROR("Failed to send previous message\n");
230 		return -EINVAL;
231 	}
232 
233 	WREG32(mmSMC_MSG_ARG_0, parameter);
234 
235 	return fiji_send_msg_to_smc_without_waiting(adev, msg);
236 }
237 
238 #if 0 /* not used yet */
239 static int fiji_wait_for_smc_inactive(struct amdgpu_device *adev)
240 {
241 	int i;
242 	uint32_t val;
243 
244 	if (!fiji_is_smc_ram_running(adev))
245 		return -EINVAL;
246 
247 	for (i = 0; i < adev->usec_timeout; i++) {
248 		val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
249 		if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0)
250 			break;
251 		udelay(1);
252 	}
253 
254 	if (i == adev->usec_timeout)
255 		return -EINVAL;
256 
257 	return 0;
258 }
259 #endif
260 
fiji_smu_upload_firmware_image(struct amdgpu_device * adev)261 static int fiji_smu_upload_firmware_image(struct amdgpu_device *adev)
262 {
263 	const struct smc_firmware_header_v1_0 *hdr;
264 	uint32_t ucode_size;
265 	uint32_t ucode_start_address;
266 	const uint8_t *src;
267 	uint32_t val;
268 	uint32_t byte_count;
269 	uint32_t *data;
270 	unsigned long flags;
271 
272 	if (!adev->pm.fw)
273 		return -EINVAL;
274 
275 	hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
276 	amdgpu_ucode_print_smc_hdr(&hdr->header);
277 
278 	adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
279 	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
280 	ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
281 	src = (const uint8_t *)
282 		(adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
283 
284 	if (ucode_size & 3) {
285 		DRM_ERROR("SMC ucode is not 4 bytes aligned\n");
286 		return -EINVAL;
287 	}
288 
289 	if (ucode_size > FIJI_SMC_SIZE) {
290 		DRM_ERROR("SMC address is beyond the SMC RAM area\n");
291 		return -EINVAL;
292 	}
293 
294 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
295 	WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
296 
297 	val = RREG32(mmSMC_IND_ACCESS_CNTL);
298 	val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
299 	WREG32(mmSMC_IND_ACCESS_CNTL, val);
300 
301 	byte_count = ucode_size;
302 	data = (uint32_t *)src;
303 	for (; byte_count >= 4; data++, byte_count -= 4)
304 		WREG32(mmSMC_IND_DATA_0, data[0]);
305 
306 	val = RREG32(mmSMC_IND_ACCESS_CNTL);
307 	val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
308 	WREG32(mmSMC_IND_ACCESS_CNTL, val);
309 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
310 
311 	return 0;
312 }
313 
314 #if 0 /* not used yet */
315 static int fiji_read_smc_sram_dword(struct amdgpu_device *adev,
316 				uint32_t smc_address,
317 				uint32_t *value,
318 				uint32_t limit)
319 {
320 	int result;
321 	unsigned long flags;
322 
323 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
324 	result = fiji_set_smc_sram_address(adev, smc_address, limit);
325 	if (result == 0)
326 		*value = RREG32(mmSMC_IND_DATA_0);
327 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
328 	return result;
329 }
330 
331 static int fiji_write_smc_sram_dword(struct amdgpu_device *adev,
332 				uint32_t smc_address,
333 				uint32_t value,
334 				uint32_t limit)
335 {
336 	int result;
337 	unsigned long flags;
338 
339 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
340 	result = fiji_set_smc_sram_address(adev, smc_address, limit);
341 	if (result == 0)
342 		WREG32(mmSMC_IND_DATA_0, value);
343 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
344 	return result;
345 }
346 
347 static int fiji_smu_stop_smc(struct amdgpu_device *adev)
348 {
349 	uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
350 	val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
351 	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
352 
353 	val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
354 	val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
355 	WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
356 
357 	return 0;
358 }
359 #endif
360 
fiji_convert_fw_type(uint32_t fw_type)361 static enum AMDGPU_UCODE_ID fiji_convert_fw_type(uint32_t fw_type)
362 {
363 	switch (fw_type) {
364 		case UCODE_ID_SDMA0:
365 			return AMDGPU_UCODE_ID_SDMA0;
366 		case UCODE_ID_SDMA1:
367 			return AMDGPU_UCODE_ID_SDMA1;
368 		case UCODE_ID_CP_CE:
369 			return AMDGPU_UCODE_ID_CP_CE;
370 		case UCODE_ID_CP_PFP:
371 			return AMDGPU_UCODE_ID_CP_PFP;
372 		case UCODE_ID_CP_ME:
373 			return AMDGPU_UCODE_ID_CP_ME;
374 		case UCODE_ID_CP_MEC:
375 		case UCODE_ID_CP_MEC_JT1:
376 		case UCODE_ID_CP_MEC_JT2:
377 			return AMDGPU_UCODE_ID_CP_MEC1;
378 		case UCODE_ID_RLC_G:
379 			return AMDGPU_UCODE_ID_RLC_G;
380 		default:
381 			DRM_ERROR("ucode type is out of range!\n");
382 			return AMDGPU_UCODE_ID_MAXIMUM;
383 	}
384 }
385 
fiji_smu_populate_single_firmware_entry(struct amdgpu_device * adev,uint32_t fw_type,struct SMU_Entry * entry)386 static int fiji_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
387 						uint32_t fw_type,
388 						struct SMU_Entry *entry)
389 {
390 	enum AMDGPU_UCODE_ID id = fiji_convert_fw_type(fw_type);
391 	struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
392 	const struct gfx_firmware_header_v1_0 *header = NULL;
393 	uint64_t gpu_addr;
394 	uint32_t data_size;
395 
396 	if (ucode->fw == NULL)
397 		return -EINVAL;
398 	gpu_addr  = ucode->mc_addr;
399 	header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
400 	data_size = le32_to_cpu(header->header.ucode_size_bytes);
401 
402 	if ((fw_type == UCODE_ID_CP_MEC_JT1) ||
403 		(fw_type == UCODE_ID_CP_MEC_JT2)) {
404 		gpu_addr += le32_to_cpu(header->jt_offset) << 2;
405 		data_size = le32_to_cpu(header->jt_size) << 2;
406 	}
407 
408 	entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
409 	entry->id = (uint16_t)fw_type;
410 	entry->image_addr_high = upper_32_bits(gpu_addr);
411 	entry->image_addr_low = lower_32_bits(gpu_addr);
412 	entry->meta_data_addr_high = 0;
413 	entry->meta_data_addr_low = 0;
414 	entry->data_size_byte = data_size;
415 	entry->num_register_entries = 0;
416 
417 	if (fw_type == UCODE_ID_RLC_G)
418 		entry->flags = 1;
419 	else
420 		entry->flags = 0;
421 
422 	return 0;
423 }
424 
fiji_smu_request_load_fw(struct amdgpu_device * adev)425 static int fiji_smu_request_load_fw(struct amdgpu_device *adev)
426 {
427 	struct fiji_smu_private_data *private = (struct fiji_smu_private_data *)adev->smu.priv;
428 	struct SMU_DRAMData_TOC *toc;
429 	uint32_t fw_to_load;
430 
431 	WREG32_SMC(ixSOFT_REGISTERS_TABLE_28, 0);
432 
433 	fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_HI, private->smu_buffer_addr_high);
434 	fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_LO, private->smu_buffer_addr_low);
435 
436 	toc = (struct SMU_DRAMData_TOC *)private->header;
437 	toc->num_entries = 0;
438 	toc->structure_version = 1;
439 
440 	if (!adev->firmware.smu_load)
441 		return 0;
442 
443 	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G,
444 			&toc->entry[toc->num_entries++])) {
445 		DRM_ERROR("Failed to get firmware entry for RLC\n");
446 		return -EINVAL;
447 	}
448 
449 	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE,
450 			&toc->entry[toc->num_entries++])) {
451 		DRM_ERROR("Failed to get firmware entry for CE\n");
452 		return -EINVAL;
453 	}
454 
455 	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP,
456 			&toc->entry[toc->num_entries++])) {
457 		DRM_ERROR("Failed to get firmware entry for PFP\n");
458 		return -EINVAL;
459 	}
460 
461 	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME,
462 			&toc->entry[toc->num_entries++])) {
463 		DRM_ERROR("Failed to get firmware entry for ME\n");
464 		return -EINVAL;
465 	}
466 
467 	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC,
468 			&toc->entry[toc->num_entries++])) {
469 		DRM_ERROR("Failed to get firmware entry for MEC\n");
470 		return -EINVAL;
471 	}
472 
473 	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1,
474 			&toc->entry[toc->num_entries++])) {
475 		DRM_ERROR("Failed to get firmware entry for MEC_JT1\n");
476 		return -EINVAL;
477 	}
478 
479 	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
480 			&toc->entry[toc->num_entries++])) {
481 		DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
482 		return -EINVAL;
483 	}
484 
485 	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
486 			&toc->entry[toc->num_entries++])) {
487 		DRM_ERROR("Failed to get firmware entry for SDMA0\n");
488 		return -EINVAL;
489 	}
490 
491 	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1,
492 			&toc->entry[toc->num_entries++])) {
493 		DRM_ERROR("Failed to get firmware entry for SDMA1\n");
494 		return -EINVAL;
495 	}
496 
497 	fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high);
498 	fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low);
499 
500 	fw_to_load = UCODE_ID_RLC_G_MASK |
501 			UCODE_ID_SDMA0_MASK |
502 			UCODE_ID_SDMA1_MASK |
503 			UCODE_ID_CP_CE_MASK |
504 			UCODE_ID_CP_ME_MASK |
505 			UCODE_ID_CP_PFP_MASK |
506 			UCODE_ID_CP_MEC_MASK;
507 
508 	if (fiji_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
509 		DRM_ERROR("Fail to request SMU load ucode\n");
510 		return -EINVAL;
511 	}
512 
513 	return 0;
514 }
515 
fiji_smu_get_mask_for_fw_type(uint32_t fw_type)516 static uint32_t fiji_smu_get_mask_for_fw_type(uint32_t fw_type)
517 {
518 	switch (fw_type) {
519 		case AMDGPU_UCODE_ID_SDMA0:
520 			return UCODE_ID_SDMA0_MASK;
521 		case AMDGPU_UCODE_ID_SDMA1:
522 			return UCODE_ID_SDMA1_MASK;
523 		case AMDGPU_UCODE_ID_CP_CE:
524 			return UCODE_ID_CP_CE_MASK;
525 		case AMDGPU_UCODE_ID_CP_PFP:
526 			return UCODE_ID_CP_PFP_MASK;
527 		case AMDGPU_UCODE_ID_CP_ME:
528 			return UCODE_ID_CP_ME_MASK;
529 		case AMDGPU_UCODE_ID_CP_MEC1:
530 			return UCODE_ID_CP_MEC_MASK;
531 		case AMDGPU_UCODE_ID_CP_MEC2:
532 			return UCODE_ID_CP_MEC_MASK;
533 		case AMDGPU_UCODE_ID_RLC_G:
534 			return UCODE_ID_RLC_G_MASK;
535 		default:
536 			DRM_ERROR("ucode type is out of range!\n");
537 			return 0;
538 	}
539 }
540 
fiji_smu_check_fw_load_finish(struct amdgpu_device * adev,uint32_t fw_type)541 static int fiji_smu_check_fw_load_finish(struct amdgpu_device *adev,
542 					uint32_t fw_type)
543 {
544 	uint32_t fw_mask = fiji_smu_get_mask_for_fw_type(fw_type);
545 	int i;
546 
547 	for (i = 0; i < adev->usec_timeout; i++) {
548 		if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_28) & fw_mask))
549 			break;
550 		udelay(1);
551 	}
552 
553 	if (i == adev->usec_timeout) {
554 		DRM_ERROR("check firmware loading failed\n");
555 		return -EINVAL;
556 	}
557 
558 	return 0;
559 }
560 
fiji_smu_start_in_protection_mode(struct amdgpu_device * adev)561 static int fiji_smu_start_in_protection_mode(struct amdgpu_device *adev)
562 {
563 	int result;
564 	uint32_t val;
565 	int i;
566 
567 	/* Assert reset */
568 	val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
569 	val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
570 	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
571 
572 	result = fiji_smu_upload_firmware_image(adev);
573 	if (result)
574 		return result;
575 
576 	/* Clear status */
577 	WREG32_SMC(ixSMU_STATUS, 0);
578 
579 	/* Enable clock */
580 	val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
581 	val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
582 	WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
583 
584 	/* De-assert reset */
585 	val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
586 	val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
587 	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
588 
589 	/* Set SMU Auto Start */
590 	val = RREG32_SMC(ixSMU_INPUT_DATA);
591 	val = REG_SET_FIELD(val, SMU_INPUT_DATA, AUTO_START, 1);
592 	WREG32_SMC(ixSMU_INPUT_DATA, val);
593 
594 	/* Clear firmware interrupt enable flag */
595 	WREG32_SMC(ixFIRMWARE_FLAGS, 0);
596 
597 	for (i = 0; i < adev->usec_timeout; i++) {
598 		val = RREG32_SMC(ixRCU_UC_EVENTS);
599 		if (REG_GET_FIELD(val, RCU_UC_EVENTS, INTERRUPTS_ENABLED))
600 			break;
601 		udelay(1);
602 	}
603 
604 	if (i == adev->usec_timeout) {
605 		DRM_ERROR("Interrupt is not enabled by firmware\n");
606 		return -EINVAL;
607 	}
608 
609 	/* Call Test SMU message with 0x20000 offset
610 	 * to trigger SMU start
611 	 */
612 	fiji_send_msg_to_smc_offset(adev);
613 	DRM_INFO("[FM]try triger smu start\n");
614 	/* Wait for done bit to be set */
615 	for (i = 0; i < adev->usec_timeout; i++) {
616 		val = RREG32_SMC(ixSMU_STATUS);
617 		if (REG_GET_FIELD(val, SMU_STATUS, SMU_DONE))
618 			break;
619 		udelay(1);
620 	}
621 
622 	if (i == adev->usec_timeout) {
623 		DRM_ERROR("Timeout for SMU start\n");
624 		return -EINVAL;
625 	}
626 
627 	/* Check pass/failed indicator */
628 	val = RREG32_SMC(ixSMU_STATUS);
629 	if (!REG_GET_FIELD(val, SMU_STATUS, SMU_PASS)) {
630 		DRM_ERROR("SMU Firmware start failed\n");
631 		return -EINVAL;
632 	}
633 	DRM_INFO("[FM]smu started\n");
634 	/* Wait for firmware to initialize */
635 	for (i = 0; i < adev->usec_timeout; i++) {
636 		val = RREG32_SMC(ixFIRMWARE_FLAGS);
637 		if(REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
638 			break;
639 		udelay(1);
640 	}
641 
642 	if (i == adev->usec_timeout) {
643 		DRM_ERROR("SMU firmware initialization failed\n");
644 		return -EINVAL;
645 	}
646 	DRM_INFO("[FM]smu initialized\n");
647 
648 	return 0;
649 }
650 
fiji_smu_start_in_non_protection_mode(struct amdgpu_device * adev)651 static int fiji_smu_start_in_non_protection_mode(struct amdgpu_device *adev)
652 {
653 	int i, result;
654 	uint32_t val;
655 
656 	/* wait for smc boot up */
657 	for (i = 0; i < adev->usec_timeout; i++) {
658 		val = RREG32_SMC(ixRCU_UC_EVENTS);
659 		val = REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done);
660 		if (val)
661 			break;
662 		udelay(1);
663 	}
664 
665 	if (i == adev->usec_timeout) {
666 		DRM_ERROR("SMC boot sequence is not completed\n");
667 		return -EINVAL;
668 	}
669 
670 	/* Clear firmware interrupt enable flag */
671 	WREG32_SMC(ixFIRMWARE_FLAGS, 0);
672 
673 	/* Assert reset */
674 	val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
675 	val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
676 	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
677 
678 	result = fiji_smu_upload_firmware_image(adev);
679 	if (result)
680 		return result;
681 
682 	/* Set smc instruct start point at 0x0 */
683 	fiji_program_jump_on_start(adev);
684 
685 	/* Enable clock */
686 	val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
687 	val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
688 	WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
689 
690 	/* De-assert reset */
691 	val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
692 	val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
693 	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
694 
695 	/* Wait for firmware to initialize */
696 	for (i = 0; i < adev->usec_timeout; i++) {
697 		val = RREG32_SMC(ixFIRMWARE_FLAGS);
698 		if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
699 			break;
700 		udelay(1);
701 	}
702 
703 	if (i == adev->usec_timeout) {
704 		DRM_ERROR("Timeout for SMC firmware initialization\n");
705 		return -EINVAL;
706 	}
707 
708 	return 0;
709 }
710 
fiji_smu_start(struct amdgpu_device * adev)711 int fiji_smu_start(struct amdgpu_device *adev)
712 {
713 	int result;
714 	uint32_t val;
715 
716 	if (!fiji_is_smc_ram_running(adev)) {
717 		val = RREG32_SMC(ixSMU_FIRMWARE);
718 		if (!REG_GET_FIELD(val, SMU_FIRMWARE, SMU_MODE)) {
719 			DRM_INFO("[FM]start smu in nonprotection mode\n");
720 			result = fiji_smu_start_in_non_protection_mode(adev);
721 			if (result)
722 				return result;
723 		} else {
724 			DRM_INFO("[FM]start smu in protection mode\n");
725 			result = fiji_smu_start_in_protection_mode(adev);
726 			if (result)
727 				return result;
728 		}
729 	}
730 
731 	return fiji_smu_request_load_fw(adev);
732 }
733 
734 static const struct amdgpu_smumgr_funcs fiji_smumgr_funcs = {
735 	.check_fw_load_finish = fiji_smu_check_fw_load_finish,
736 	.request_smu_load_fw = NULL,
737 	.request_smu_specific_fw = NULL,
738 };
739 
fiji_smu_init(struct amdgpu_device * adev)740 int fiji_smu_init(struct amdgpu_device *adev)
741 {
742 	struct fiji_smu_private_data *private;
743 	uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
744 	uint32_t smu_internal_buffer_size = 200*4096;
745 	struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
746 	struct amdgpu_bo **smu_buf = &adev->smu.smu_buf;
747 	uint64_t mc_addr;
748 	void *toc_buf_ptr;
749 	void *smu_buf_ptr;
750 	int ret;
751 
752 	private = kzalloc(sizeof(struct fiji_smu_private_data), GFP_KERNEL);
753 	if (NULL == private)
754 		return -ENOMEM;
755 
756 	/* allocate firmware buffers */
757 	if (adev->firmware.smu_load)
758 		amdgpu_ucode_init_bo(adev);
759 
760 	adev->smu.priv = private;
761 	adev->smu.fw_flags = 0;
762 
763 	/* Allocate FW image data structure and header buffer */
764 	ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
765 			       true, AMDGPU_GEM_DOMAIN_VRAM,
766 			       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
767 			       NULL, NULL, toc_buf);
768 	if (ret) {
769 		DRM_ERROR("Failed to allocate memory for TOC buffer\n");
770 		return -ENOMEM;
771 	}
772 
773 	/* Allocate buffer for SMU internal buffer */
774 	ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
775 			       true, AMDGPU_GEM_DOMAIN_VRAM,
776 			       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
777 			       NULL, NULL, smu_buf);
778 	if (ret) {
779 		DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
780 		return -ENOMEM;
781 	}
782 
783 	/* Retrieve GPU address for header buffer and internal buffer */
784 	ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
785 	if (ret) {
786 		amdgpu_bo_unref(&adev->smu.toc_buf);
787 		DRM_ERROR("Failed to reserve the TOC buffer\n");
788 		return -EINVAL;
789 	}
790 
791 	ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
792 	if (ret) {
793 		amdgpu_bo_unreserve(adev->smu.toc_buf);
794 		amdgpu_bo_unref(&adev->smu.toc_buf);
795 		DRM_ERROR("Failed to pin the TOC buffer\n");
796 		return -EINVAL;
797 	}
798 
799 	ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
800 	if (ret) {
801 		amdgpu_bo_unreserve(adev->smu.toc_buf);
802 		amdgpu_bo_unref(&adev->smu.toc_buf);
803 		DRM_ERROR("Failed to map the TOC buffer\n");
804 		return -EINVAL;
805 	}
806 
807 	amdgpu_bo_unreserve(adev->smu.toc_buf);
808 	private->header_addr_low = lower_32_bits(mc_addr);
809 	private->header_addr_high = upper_32_bits(mc_addr);
810 	private->header = toc_buf_ptr;
811 
812 	ret = amdgpu_bo_reserve(adev->smu.smu_buf, false);
813 	if (ret) {
814 		amdgpu_bo_unref(&adev->smu.smu_buf);
815 		amdgpu_bo_unref(&adev->smu.toc_buf);
816 		DRM_ERROR("Failed to reserve the SMU internal buffer\n");
817 		return -EINVAL;
818 	}
819 
820 	ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
821 	if (ret) {
822 		amdgpu_bo_unreserve(adev->smu.smu_buf);
823 		amdgpu_bo_unref(&adev->smu.smu_buf);
824 		amdgpu_bo_unref(&adev->smu.toc_buf);
825 		DRM_ERROR("Failed to pin the SMU internal buffer\n");
826 		return -EINVAL;
827 	}
828 
829 	ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr);
830 	if (ret) {
831 		amdgpu_bo_unreserve(adev->smu.smu_buf);
832 		amdgpu_bo_unref(&adev->smu.smu_buf);
833 		amdgpu_bo_unref(&adev->smu.toc_buf);
834 		DRM_ERROR("Failed to map the SMU internal buffer\n");
835 		return -EINVAL;
836 	}
837 
838 	amdgpu_bo_unreserve(adev->smu.smu_buf);
839 	private->smu_buffer_addr_low = lower_32_bits(mc_addr);
840 	private->smu_buffer_addr_high = upper_32_bits(mc_addr);
841 
842 	adev->smu.smumgr_funcs = &fiji_smumgr_funcs;
843 
844 	return 0;
845 }
846 
fiji_smu_fini(struct amdgpu_device * adev)847 int fiji_smu_fini(struct amdgpu_device *adev)
848 {
849 	amdgpu_bo_unref(&adev->smu.toc_buf);
850 	amdgpu_bo_unref(&adev->smu.smu_buf);
851 	kfree(adev->smu.priv);
852 	adev->smu.priv = NULL;
853 	if (adev->firmware.fw_buf)
854 		amdgpu_ucode_fini_bo(adev);
855 
856 	return 0;
857 }
858