This source file includes following definitions.
- acr_r352_generate_flcn_bl_desc
- acr_r352_ls_ucode_img_load
- acr_r352_ls_img_fill_headers
- acr_r352_ls_fill_headers
- acr_r352_ls_write_wpr
- acr_r352_prepare_ls_blob
- acr_r352_fixup_hs_desc
- acr_r352_generate_hs_bl_desc
- acr_r352_prepare_hs_blob
- acr_r352_load_blobs
- acr_r352_load
- acr_r352_shutdown
- acr_r352_wpr_is_set
- acr_r352_bootstrap
- acr_r352_reset_nopmu
- acr_r352_reset
- acr_r352_fini
- acr_r352_dtor
- acr_r352_generate_pmu_bl_desc
- acr_r352_new_
- acr_r352_new
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23 #include "acr_r352.h"
24 #include "hs_ucode.h"
25
26 #include <core/gpuobj.h>
27 #include <core/firmware.h>
28 #include <engine/falcon.h>
29 #include <subdev/pmu.h>
30 #include <core/msgqueue.h>
31 #include <engine/sec2.h>
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56 struct acr_r352_flcn_bl_desc {
57 u32 reserved[4];
58 u32 signature[4];
59 u32 ctx_dma;
60 u32 code_dma_base;
61 u32 non_sec_code_off;
62 u32 non_sec_code_size;
63 u32 sec_code_off;
64 u32 sec_code_size;
65 u32 code_entry_point;
66 u32 data_dma_base;
67 u32 data_size;
68 u32 code_dma_base1;
69 u32 data_dma_base1;
70 };
71
72
73
74
75 static void
76 acr_r352_generate_flcn_bl_desc(const struct nvkm_acr *acr,
77 const struct ls_ucode_img *img, u64 wpr_addr,
78 void *_desc)
79 {
80 struct acr_r352_flcn_bl_desc *desc = _desc;
81 const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
82 u64 base, addr_code, addr_data;
83
84 base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
85 addr_code = (base + pdesc->app_resident_code_offset) >> 8;
86 addr_data = (base + pdesc->app_resident_data_offset) >> 8;
87
88 desc->ctx_dma = FALCON_DMAIDX_UCODE;
89 desc->code_dma_base = lower_32_bits(addr_code);
90 desc->code_dma_base1 = upper_32_bits(addr_code);
91 desc->non_sec_code_off = pdesc->app_resident_code_offset;
92 desc->non_sec_code_size = pdesc->app_resident_code_size;
93 desc->code_entry_point = pdesc->app_imem_entry;
94 desc->data_dma_base = lower_32_bits(addr_data);
95 desc->data_dma_base1 = upper_32_bits(addr_data);
96 desc->data_size = pdesc->app_resident_data_size;
97 }
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112 struct hsflcn_acr_desc {
113 union {
114 u8 reserved_dmem[0x200];
115 u32 signatures[4];
116 } ucode_reserved_space;
117 u32 wpr_region_id;
118 u32 wpr_offset;
119 u32 mmu_mem_range;
120 #define FLCN_ACR_MAX_REGIONS 2
121 struct {
122 u32 no_regions;
123 struct {
124 u32 start_addr;
125 u32 end_addr;
126 u32 region_id;
127 u32 read_mask;
128 u32 write_mask;
129 u32 client_mask;
130 } region_props[FLCN_ACR_MAX_REGIONS];
131 } regions;
132 u32 ucode_blob_size;
133 u64 ucode_blob_base __aligned(8);
134 struct {
135 u32 vpr_enabled;
136 u32 vpr_start;
137 u32 vpr_end;
138 u32 hdcp_policies;
139 } vpr_desc;
140 };
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169 struct acr_r352_lsf_lsb_header {
170
171
172
173
174
175
176
177
178 struct {
179 u8 prd_keys[2][16];
180 u8 dbg_keys[2][16];
181 u32 b_prd_present;
182 u32 b_dbg_present;
183 u32 falcon_id;
184 } signature;
185 u32 ucode_off;
186 u32 ucode_size;
187 u32 data_size;
188 u32 bl_code_size;
189 u32 bl_imem_off;
190 u32 bl_data_off;
191 u32 bl_data_size;
192 u32 app_code_off;
193 u32 app_code_size;
194 u32 app_data_off;
195 u32 app_data_size;
196 u32 flags;
197 };
198
199
200
201
202
203
204
205
206
207
208
209
210
211 struct acr_r352_lsf_wpr_header {
212 u32 falcon_id;
213 u32 lsb_offset;
214 u32 bootstrap_owner;
215 u32 lazy_bootstrap;
216 u32 status;
217 #define LSF_IMAGE_STATUS_NONE 0
218 #define LSF_IMAGE_STATUS_COPY 1
219 #define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED 2
220 #define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED 3
221 #define LSF_IMAGE_STATUS_VALIDATION_DONE 4
222 #define LSF_IMAGE_STATUS_VALIDATION_SKIPPED 5
223 #define LSF_IMAGE_STATUS_BOOTSTRAP_READY 6
224 };
225
226
227
228
229 struct ls_ucode_img_r352 {
230 struct ls_ucode_img base;
231
232 const struct acr_r352_lsf_func *func;
233
234 struct acr_r352_lsf_wpr_header wpr_header;
235 struct acr_r352_lsf_lsb_header lsb_header;
236 };
237 #define ls_ucode_img_r352(i) container_of(i, struct ls_ucode_img_r352, base)
238
239
240
241
242 struct ls_ucode_img *
243 acr_r352_ls_ucode_img_load(const struct acr_r352 *acr,
244 const struct nvkm_secboot *sb,
245 enum nvkm_secboot_falcon falcon_id)
246 {
247 const struct nvkm_subdev *subdev = acr->base.subdev;
248 const struct acr_r352_ls_func *func = acr->func->ls_func[falcon_id];
249 struct ls_ucode_img_r352 *img;
250 int ret;
251
252 img = kzalloc(sizeof(*img), GFP_KERNEL);
253 if (!img)
254 return ERR_PTR(-ENOMEM);
255
256 img->base.falcon_id = falcon_id;
257
258 ret = func->load(sb, func->version_max, &img->base);
259 if (ret < 0) {
260 kfree(img->base.ucode_data);
261 kfree(img->base.sig);
262 kfree(img);
263 return ERR_PTR(ret);
264 }
265
266 img->func = func->version[ret];
267
268
269 if (img->base.sig_size != sizeof(img->lsb_header.signature)) {
270 nvkm_error(subdev, "invalid signature size for %s falcon!\n",
271 nvkm_secboot_falcon_name[falcon_id]);
272 return ERR_PTR(-EINVAL);
273 }
274
275
276 memcpy(&img->lsb_header.signature, img->base.sig, img->base.sig_size);
277
278
279 img->lsb_header.signature.falcon_id = falcon_id;
280
281 return &img->base;
282 }
283
284 #define LSF_LSB_HEADER_ALIGN 256
285 #define LSF_BL_DATA_ALIGN 256
286 #define LSF_BL_DATA_SIZE_ALIGN 256
287 #define LSF_BL_CODE_SIZE_ALIGN 256
288 #define LSF_UCODE_DATA_ALIGN 4096
289
290
291
292
293
294
295
296
297
298
299
300
301 static u32
302 acr_r352_ls_img_fill_headers(struct acr_r352 *acr,
303 struct ls_ucode_img_r352 *img, u32 offset)
304 {
305 struct ls_ucode_img *_img = &img->base;
306 struct acr_r352_lsf_wpr_header *whdr = &img->wpr_header;
307 struct acr_r352_lsf_lsb_header *lhdr = &img->lsb_header;
308 struct ls_ucode_img_desc *desc = &_img->ucode_desc;
309 const struct acr_r352_lsf_func *func = img->func;
310
311
312 whdr->falcon_id = _img->falcon_id;
313 whdr->bootstrap_owner = acr->base.boot_falcon;
314 whdr->status = LSF_IMAGE_STATUS_COPY;
315
316
317 if (acr->lazy_bootstrap & BIT(_img->falcon_id))
318 whdr->lazy_bootstrap = 1;
319
320
321 offset = ALIGN(offset, LSF_LSB_HEADER_ALIGN);
322 whdr->lsb_offset = offset;
323 offset += sizeof(*lhdr);
324
325
326
327
328
329 offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN);
330 _img->ucode_off = lhdr->ucode_off = offset;
331 offset += _img->ucode_size;
332
333
334
335
336
337
338
339
340
341 lhdr->bl_code_size = ALIGN(desc->bootloader_size,
342 LSF_BL_CODE_SIZE_ALIGN);
343 lhdr->ucode_size = ALIGN(desc->app_resident_data_offset,
344 LSF_BL_CODE_SIZE_ALIGN) + lhdr->bl_code_size;
345 lhdr->data_size = ALIGN(desc->app_size, LSF_BL_CODE_SIZE_ALIGN) +
346 lhdr->bl_code_size - lhdr->ucode_size;
347
348
349
350
351
352 lhdr->bl_imem_off = desc->bootloader_imem_offset;
353 lhdr->app_code_off = desc->app_start_offset +
354 desc->app_resident_code_offset;
355 lhdr->app_code_size = desc->app_resident_code_size;
356 lhdr->app_data_off = desc->app_start_offset +
357 desc->app_resident_data_offset;
358 lhdr->app_data_size = desc->app_resident_data_size;
359
360 lhdr->flags = func->lhdr_flags;
361 if (_img->falcon_id == acr->base.boot_falcon)
362 lhdr->flags |= LSF_FLAG_DMACTL_REQ_CTX;
363
364
365 lhdr->bl_data_size = ALIGN(func->bl_desc_size, LSF_BL_DATA_SIZE_ALIGN);
366
367
368
369
370 offset = ALIGN(offset, LSF_BL_DATA_ALIGN);
371 lhdr->bl_data_off = offset;
372 offset += lhdr->bl_data_size;
373
374 return offset;
375 }
376
377
378
379
380 int
381 acr_r352_ls_fill_headers(struct acr_r352 *acr, struct list_head *imgs)
382 {
383 struct ls_ucode_img_r352 *img;
384 struct list_head *l;
385 u32 count = 0;
386 u32 offset;
387
388
389 list_for_each(l, imgs)
390 count++;
391
392
393
394
395
396
397
398 offset = sizeof(img->wpr_header) * (count + 1);
399
400
401
402
403
404 list_for_each_entry(img, imgs, base.node) {
405 offset = acr_r352_ls_img_fill_headers(acr, img, offset);
406 }
407
408 return offset;
409 }
410
411
412
413
414 int
415 acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
416 struct nvkm_gpuobj *wpr_blob, u64 wpr_addr)
417 {
418 struct ls_ucode_img *_img;
419 u32 pos = 0;
420 u32 max_desc_size = 0;
421 u8 *gdesc;
422
423
424 list_for_each_entry(_img, imgs, node) {
425 struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
426 const struct acr_r352_lsf_func *ls_func = img->func;
427
428 max_desc_size = max(max_desc_size, ls_func->bl_desc_size);
429 }
430
431 gdesc = kmalloc(max_desc_size, GFP_KERNEL);
432 if (!gdesc)
433 return -ENOMEM;
434
435 nvkm_kmap(wpr_blob);
436
437 list_for_each_entry(_img, imgs, node) {
438 struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
439 const struct acr_r352_lsf_func *ls_func = img->func;
440
441 nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
442 sizeof(img->wpr_header));
443
444 nvkm_gpuobj_memcpy_to(wpr_blob, img->wpr_header.lsb_offset,
445 &img->lsb_header, sizeof(img->lsb_header));
446
447
448 memset(gdesc, 0, ls_func->bl_desc_size);
449 ls_func->generate_bl_desc(&acr->base, _img, wpr_addr, gdesc);
450
451 nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.bl_data_off,
452 gdesc, ls_func->bl_desc_size);
453
454
455 nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.ucode_off,
456 _img->ucode_data, _img->ucode_size);
457
458 pos += sizeof(img->wpr_header);
459 }
460
461 nvkm_wo32(wpr_blob, pos, NVKM_SECBOOT_FALCON_INVALID);
462
463 nvkm_done(wpr_blob);
464
465 kfree(gdesc);
466
467 return 0;
468 }
469
470
471 #define WPR_ALIGNMENT 0x40000
472
473
474
475
476
477
478
479
480 static int
481 acr_r352_prepare_ls_blob(struct acr_r352 *acr, struct nvkm_secboot *sb)
482 {
483 const struct nvkm_subdev *subdev = acr->base.subdev;
484 struct list_head imgs;
485 struct ls_ucode_img *img, *t;
486 unsigned long managed_falcons = acr->base.managed_falcons;
487 u64 wpr_addr = sb->wpr_addr;
488 u32 wpr_size = sb->wpr_size;
489 int managed_count = 0;
490 u32 image_wpr_size, ls_blob_size;
491 int falcon_id;
492 int ret;
493
494 INIT_LIST_HEAD(&imgs);
495
496
497 for_each_set_bit(falcon_id, &managed_falcons, NVKM_SECBOOT_FALCON_END) {
498 struct ls_ucode_img *img;
499
500 img = acr->func->ls_ucode_img_load(acr, sb, falcon_id);
501 if (IS_ERR(img)) {
502 if (acr->base.optional_falcons & BIT(falcon_id)) {
503 managed_falcons &= ~BIT(falcon_id);
504 nvkm_info(subdev, "skipping %s falcon...\n",
505 nvkm_secboot_falcon_name[falcon_id]);
506 continue;
507 }
508 ret = PTR_ERR(img);
509 goto cleanup;
510 }
511
512 list_add_tail(&img->node, &imgs);
513 managed_count++;
514 }
515
516
517 acr->base.managed_falcons = managed_falcons;
518
519
520
521
522
523 if (acr->func->ls_func[acr->base.boot_falcon] &&
524 (managed_falcons & BIT(acr->base.boot_falcon))) {
525 for_each_set_bit(falcon_id, &managed_falcons,
526 NVKM_SECBOOT_FALCON_END) {
527 if (falcon_id == acr->base.boot_falcon)
528 continue;
529
530 acr->lazy_bootstrap |= BIT(falcon_id);
531 }
532 }
533
534
535
536
537
538 image_wpr_size = acr->func->ls_fill_headers(acr, &imgs);
539 image_wpr_size = ALIGN(image_wpr_size, WPR_ALIGNMENT);
540
541 ls_blob_size = image_wpr_size;
542
543
544
545
546
547 if (wpr_size == 0 && acr->func->shadow_blob)
548 ls_blob_size *= 2;
549
550
551 ret = nvkm_gpuobj_new(subdev->device, ls_blob_size, WPR_ALIGNMENT,
552 false, NULL, &acr->ls_blob);
553 if (ret)
554 goto cleanup;
555
556 nvkm_debug(subdev, "%d managed LS falcons, WPR size is %d bytes\n",
557 managed_count, image_wpr_size);
558
559
560 if (wpr_size == 0) {
561 wpr_addr = acr->ls_blob->addr;
562 if (acr->func->shadow_blob)
563 wpr_addr += acr->ls_blob->size / 2;
564
565 wpr_size = image_wpr_size;
566
567
568
569
570 } else if (image_wpr_size > wpr_size) {
571 nvkm_error(subdev, "WPR region too small for FW blob!\n");
572 nvkm_error(subdev, "required: %dB\n", image_wpr_size);
573 nvkm_error(subdev, "available: %dB\n", wpr_size);
574 ret = -ENOSPC;
575 goto cleanup;
576 }
577
578
579 ret = acr->func->ls_write_wpr(acr, &imgs, acr->ls_blob, wpr_addr);
580 if (ret)
581 nvkm_gpuobj_del(&acr->ls_blob);
582
583 cleanup:
584 list_for_each_entry_safe(img, t, &imgs, node) {
585 kfree(img->ucode_data);
586 kfree(img->sig);
587 kfree(img);
588 }
589
590 return ret;
591 }
592
593
594
595
596 void
597 acr_r352_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
598 void *_desc)
599 {
600 struct hsflcn_acr_desc *desc = _desc;
601 struct nvkm_gpuobj *ls_blob = acr->ls_blob;
602
603
604 if (sb->wpr_size == 0) {
605 u64 wpr_start = ls_blob->addr;
606 u64 wpr_end = wpr_start + ls_blob->size;
607
608 desc->wpr_region_id = 1;
609 desc->regions.no_regions = 2;
610 desc->regions.region_props[0].start_addr = wpr_start >> 8;
611 desc->regions.region_props[0].end_addr = wpr_end >> 8;
612 desc->regions.region_props[0].region_id = 1;
613 desc->regions.region_props[0].read_mask = 0xf;
614 desc->regions.region_props[0].write_mask = 0xc;
615 desc->regions.region_props[0].client_mask = 0x2;
616 } else {
617 desc->ucode_blob_base = ls_blob->addr;
618 desc->ucode_blob_size = ls_blob->size;
619 }
620 }
621
622 static void
623 acr_r352_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
624 u64 offset)
625 {
626 struct acr_r352_flcn_bl_desc *bl_desc = _bl_desc;
627 u64 addr_code, addr_data;
628
629 addr_code = offset >> 8;
630 addr_data = (offset + hdr->data_dma_base) >> 8;
631
632 bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
633 bl_desc->code_dma_base = lower_32_bits(addr_code);
634 bl_desc->non_sec_code_off = hdr->non_sec_code_off;
635 bl_desc->non_sec_code_size = hdr->non_sec_code_size;
636 bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0);
637 bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0);
638 bl_desc->code_entry_point = 0;
639 bl_desc->data_dma_base = lower_32_bits(addr_data);
640 bl_desc->data_size = hdr->data_size;
641 }
642
643
644
645
646
647
648
649
650
651
652 static int
653 acr_r352_prepare_hs_blob(struct acr_r352 *acr, struct nvkm_secboot *sb,
654 const char *fw, struct nvkm_gpuobj **blob,
655 struct hsf_load_header *load_header, bool patch)
656 {
657 struct nvkm_subdev *subdev = &sb->subdev;
658 void *acr_image;
659 struct fw_bin_header *hsbin_hdr;
660 struct hsf_fw_header *fw_hdr;
661 struct hsf_load_header *load_hdr;
662 void *acr_data;
663 int ret;
664
665 acr_image = hs_ucode_load_blob(subdev, sb->boot_falcon, fw);
666 if (IS_ERR(acr_image))
667 return PTR_ERR(acr_image);
668
669 hsbin_hdr = acr_image;
670 fw_hdr = acr_image + hsbin_hdr->header_offset;
671 load_hdr = acr_image + fw_hdr->hdr_offset;
672 acr_data = acr_image + hsbin_hdr->data_offset;
673
674
675 if (patch) {
676 struct hsflcn_acr_desc *desc;
677
678 desc = acr_data + load_hdr->data_dma_base;
679 acr->func->fixup_hs_desc(acr, sb, desc);
680 }
681
682 if (load_hdr->num_apps > ACR_R352_MAX_APPS) {
683 nvkm_error(subdev, "more apps (%d) than supported (%d)!",
684 load_hdr->num_apps, ACR_R352_MAX_APPS);
685 ret = -EINVAL;
686 goto cleanup;
687 }
688 memcpy(load_header, load_hdr, sizeof(*load_header) +
689 (sizeof(load_hdr->apps[0]) * 2 * load_hdr->num_apps));
690
691
692 ret = nvkm_gpuobj_new(subdev->device, ALIGN(hsbin_hdr->data_size, 256),
693 0x1000, false, NULL, blob);
694 if (ret)
695 goto cleanup;
696
697 nvkm_kmap(*blob);
698 nvkm_gpuobj_memcpy_to(*blob, 0, acr_data, hsbin_hdr->data_size);
699 nvkm_done(*blob);
700
701 cleanup:
702 kfree(acr_image);
703
704 return ret;
705 }
706
707
708
709
710
711
712
713
714 int
715 acr_r352_load_blobs(struct acr_r352 *acr, struct nvkm_secboot *sb)
716 {
717 struct nvkm_subdev *subdev = &sb->subdev;
718 int ret;
719
720
721 if (acr->firmware_ok)
722 return 0;
723
724
725 ret = acr_r352_prepare_ls_blob(acr, sb);
726 if (ret)
727 return ret;
728
729
730 if (!acr->load_blob) {
731 ret = acr_r352_prepare_hs_blob(acr, sb, "acr/ucode_load",
732 &acr->load_blob,
733 &acr->load_bl_header, true);
734 if (ret)
735 return ret;
736 }
737
738
739 if (sb->wpr_size == 0) {
740 ret = acr_r352_prepare_hs_blob(acr, sb, "acr/ucode_unload",
741 &acr->unload_blob,
742 &acr->unload_bl_header, false);
743 if (ret)
744 return ret;
745 }
746
747
748 if (!acr->hsbl_blob) {
749 acr->hsbl_blob = nvkm_acr_load_firmware(subdev, "acr/bl", 0);
750 if (IS_ERR(acr->hsbl_blob)) {
751 ret = PTR_ERR(acr->hsbl_blob);
752 acr->hsbl_blob = NULL;
753 return ret;
754 }
755
756 if (acr->base.boot_falcon != NVKM_SECBOOT_FALCON_PMU) {
757 acr->hsbl_unload_blob = nvkm_acr_load_firmware(subdev,
758 "acr/unload_bl", 0);
759 if (IS_ERR(acr->hsbl_unload_blob)) {
760 ret = PTR_ERR(acr->hsbl_unload_blob);
761 acr->hsbl_unload_blob = NULL;
762 return ret;
763 }
764 } else {
765 acr->hsbl_unload_blob = acr->hsbl_blob;
766 }
767 }
768
769 acr->firmware_ok = true;
770 nvkm_debug(&sb->subdev, "LS blob successfully created\n");
771
772 return 0;
773 }
774
775
776
777
778
779
780 static int
781 acr_r352_load(struct nvkm_acr *_acr, struct nvkm_falcon *falcon,
782 struct nvkm_gpuobj *blob, u64 offset)
783 {
784 struct acr_r352 *acr = acr_r352(_acr);
785 const u32 bl_desc_size = acr->func->hs_bl_desc_size;
786 const struct hsf_load_header *load_hdr;
787 struct fw_bin_header *bl_hdr;
788 struct fw_bl_desc *hsbl_desc;
789 void *bl, *blob_data, *hsbl_code, *hsbl_data;
790 u32 code_size;
791 u8 *bl_desc;
792
793 bl_desc = kzalloc(bl_desc_size, GFP_KERNEL);
794 if (!bl_desc)
795 return -ENOMEM;
796
797
798 if (blob == acr->load_blob) {
799 load_hdr = &acr->load_bl_header;
800 bl = acr->hsbl_blob;
801 } else if (blob == acr->unload_blob) {
802 load_hdr = &acr->unload_bl_header;
803 bl = acr->hsbl_unload_blob;
804 } else {
805 nvkm_error(_acr->subdev, "invalid secure boot blob!\n");
806 kfree(bl_desc);
807 return -EINVAL;
808 }
809
810 bl_hdr = bl;
811 hsbl_desc = bl + bl_hdr->header_offset;
812 blob_data = bl + bl_hdr->data_offset;
813 hsbl_code = blob_data + hsbl_desc->code_off;
814 hsbl_data = blob_data + hsbl_desc->data_off;
815 code_size = ALIGN(hsbl_desc->code_size, 256);
816
817
818
819
820 nvkm_falcon_load_dmem(falcon, hsbl_data, 0x0, hsbl_desc->data_size, 0);
821
822
823 nvkm_falcon_load_imem(falcon, hsbl_code, falcon->code.limit - code_size,
824 code_size, hsbl_desc->start_tag, 0, false);
825
826
827 acr->func->generate_hs_bl_desc(load_hdr, bl_desc, offset);
828
829
830
831
832 nvkm_falcon_load_dmem(falcon, bl_desc, hsbl_desc->dmem_load_off,
833 bl_desc_size, 0);
834
835 kfree(bl_desc);
836 return hsbl_desc->start_tag << 8;
837 }
838
839 static int
840 acr_r352_shutdown(struct acr_r352 *acr, struct nvkm_secboot *sb)
841 {
842 struct nvkm_subdev *subdev = &sb->subdev;
843 int i;
844
845
846 if (acr->unload_blob && sb->wpr_set) {
847 int ret;
848
849 nvkm_debug(subdev, "running HS unload blob\n");
850 ret = sb->func->run_blob(sb, acr->unload_blob, sb->halt_falcon);
851 if (ret < 0)
852 return ret;
853
854
855
856
857 if (ret && ret != 0x1d) {
858 nvkm_error(subdev, "HS unload failed, ret 0x%08x\n", ret);
859 return -EINVAL;
860 }
861 nvkm_debug(subdev, "HS unload blob completed\n");
862 }
863
864 for (i = 0; i < NVKM_SECBOOT_FALCON_END; i++)
865 acr->falcon_state[i] = NON_SECURE;
866
867 sb->wpr_set = false;
868
869 return 0;
870 }
871
872
873
874
875
876 static bool
877 acr_r352_wpr_is_set(const struct acr_r352 *acr, const struct nvkm_secboot *sb)
878 {
879 const struct nvkm_subdev *subdev = &sb->subdev;
880 const struct nvkm_device *device = subdev->device;
881 u64 wpr_lo, wpr_hi;
882 u64 wpr_range_lo, wpr_range_hi;
883
884 nvkm_wr32(device, 0x100cd4, 0x2);
885 wpr_lo = (nvkm_rd32(device, 0x100cd4) & ~0xff);
886 wpr_lo <<= 8;
887 nvkm_wr32(device, 0x100cd4, 0x3);
888 wpr_hi = (nvkm_rd32(device, 0x100cd4) & ~0xff);
889 wpr_hi <<= 8;
890
891 if (sb->wpr_size != 0) {
892 wpr_range_lo = sb->wpr_addr;
893 wpr_range_hi = wpr_range_lo + sb->wpr_size;
894 } else {
895 wpr_range_lo = acr->ls_blob->addr;
896 wpr_range_hi = wpr_range_lo + acr->ls_blob->size;
897 }
898
899 return (wpr_lo >= wpr_range_lo && wpr_lo < wpr_range_hi &&
900 wpr_hi > wpr_range_lo && wpr_hi <= wpr_range_hi);
901 }
902
903 static int
904 acr_r352_bootstrap(struct acr_r352 *acr, struct nvkm_secboot *sb)
905 {
906 const struct nvkm_subdev *subdev = &sb->subdev;
907 unsigned long managed_falcons = acr->base.managed_falcons;
908 int falcon_id;
909 int ret;
910
911 if (sb->wpr_set)
912 return 0;
913
914
915 ret = acr_r352_load_blobs(acr, sb);
916 if (ret)
917 return ret;
918
919 nvkm_debug(subdev, "running HS load blob\n");
920 ret = sb->func->run_blob(sb, acr->load_blob, sb->boot_falcon);
921
922 nvkm_falcon_clear_interrupt(sb->boot_falcon, 0x10);
923 sb->wpr_set = acr_r352_wpr_is_set(acr, sb);
924 if (ret < 0) {
925 return ret;
926 } else if (ret > 0) {
927 nvkm_error(subdev, "HS load failed, ret 0x%08x\n", ret);
928 return -EINVAL;
929 }
930 nvkm_debug(subdev, "HS load blob completed\n");
931
932 if (!sb->wpr_set) {
933 nvkm_error(subdev, "ACR blob completed but WPR not set!\n");
934 return -EINVAL;
935 }
936
937
938 for_each_set_bit(falcon_id, &managed_falcons, NVKM_SECBOOT_FALCON_END) {
939 const struct acr_r352_ls_func *func =
940 acr->func->ls_func[falcon_id];
941
942 if (func->post_run) {
943 ret = func->post_run(&acr->base, sb);
944 if (ret)
945 return ret;
946 }
947 }
948
949 return 0;
950 }
951
952
953
954
955
956
957
958 static int
959 acr_r352_reset_nopmu(struct acr_r352 *acr, struct nvkm_secboot *sb,
960 unsigned long falcon_mask)
961 {
962 int falcon;
963 int ret;
964
965
966
967
968
969 if (!(falcon_mask & BIT(NVKM_SECBOOT_FALCON_FECS)))
970 goto end;
971
972 ret = acr_r352_shutdown(acr, sb);
973 if (ret)
974 return ret;
975
976 ret = acr_r352_bootstrap(acr, sb);
977 if (ret)
978 return ret;
979
980 end:
981 for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END) {
982 acr->falcon_state[falcon] = RESET;
983 }
984 return 0;
985 }
986
987
988
989
990
991
992
993
994 static int
995 acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
996 unsigned long falcon_mask)
997 {
998 struct acr_r352 *acr = acr_r352(_acr);
999 struct nvkm_msgqueue *queue;
1000 int falcon;
1001 bool wpr_already_set = sb->wpr_set;
1002 int ret;
1003
1004
1005 ret = acr_r352_bootstrap(acr, sb);
1006 if (ret)
1007 return ret;
1008
1009
1010 if (!nvkm_secboot_is_managed(sb, _acr->boot_falcon)) {
1011
1012 if (wpr_already_set)
1013 return acr_r352_reset_nopmu(acr, sb, falcon_mask);
1014
1015 else
1016 return ret;
1017 }
1018
1019 switch (_acr->boot_falcon) {
1020 case NVKM_SECBOOT_FALCON_PMU:
1021 queue = sb->subdev.device->pmu->queue;
1022 break;
1023 case NVKM_SECBOOT_FALCON_SEC2:
1024 queue = sb->subdev.device->sec2->queue;
1025 break;
1026 default:
1027 return -EINVAL;
1028 }
1029
1030
1031 for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END)
1032 nvkm_debug(&sb->subdev, "resetting %s falcon\n",
1033 nvkm_secboot_falcon_name[falcon]);
1034 ret = nvkm_msgqueue_acr_boot_falcons(queue, falcon_mask);
1035 if (ret) {
1036 nvkm_error(&sb->subdev, "error during falcon reset: %d\n", ret);
1037 return ret;
1038 }
1039 nvkm_debug(&sb->subdev, "falcon reset done\n");
1040
1041 return 0;
1042 }
1043
1044 static int
1045 acr_r352_fini(struct nvkm_acr *_acr, struct nvkm_secboot *sb, bool suspend)
1046 {
1047 struct acr_r352 *acr = acr_r352(_acr);
1048
1049 return acr_r352_shutdown(acr, sb);
1050 }
1051
1052 static void
1053 acr_r352_dtor(struct nvkm_acr *_acr)
1054 {
1055 struct acr_r352 *acr = acr_r352(_acr);
1056
1057 nvkm_gpuobj_del(&acr->unload_blob);
1058
1059 if (_acr->boot_falcon != NVKM_SECBOOT_FALCON_PMU)
1060 kfree(acr->hsbl_unload_blob);
1061 kfree(acr->hsbl_blob);
1062 nvkm_gpuobj_del(&acr->load_blob);
1063 nvkm_gpuobj_del(&acr->ls_blob);
1064
1065 kfree(acr);
1066 }
1067
1068 static const struct acr_r352_lsf_func
1069 acr_r352_ls_fecs_func_0 = {
1070 .generate_bl_desc = acr_r352_generate_flcn_bl_desc,
1071 .bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
1072 };
1073
1074 const struct acr_r352_ls_func
1075 acr_r352_ls_fecs_func = {
1076 .load = acr_ls_ucode_load_fecs,
1077 .version_max = 0,
1078 .version = {
1079 &acr_r352_ls_fecs_func_0,
1080 }
1081 };
1082
1083 static const struct acr_r352_lsf_func
1084 acr_r352_ls_gpccs_func_0 = {
1085 .generate_bl_desc = acr_r352_generate_flcn_bl_desc,
1086 .bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
1087
1088 .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
1089 };
1090
1091 static const struct acr_r352_ls_func
1092 acr_r352_ls_gpccs_func = {
1093 .load = acr_ls_ucode_load_gpccs,
1094 .version_max = 0,
1095 .version = {
1096 &acr_r352_ls_gpccs_func_0,
1097 }
1098 };
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117 struct acr_r352_pmu_bl_desc {
1118 u32 dma_idx;
1119 u32 code_dma_base;
1120 u32 code_size_total;
1121 u32 code_size_to_load;
1122 u32 code_entry_point;
1123 u32 data_dma_base;
1124 u32 data_size;
1125 u32 overlay_dma_base;
1126 u32 argc;
1127 u32 argv;
1128 u16 code_dma_base1;
1129 u16 data_dma_base1;
1130 u16 overlay_dma_base1;
1131 };
1132
1133
1134
1135
1136
1137 static void
1138 acr_r352_generate_pmu_bl_desc(const struct nvkm_acr *acr,
1139 const struct ls_ucode_img *img, u64 wpr_addr,
1140 void *_desc)
1141 {
1142 const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
1143 const struct nvkm_pmu *pmu = acr->subdev->device->pmu;
1144 struct acr_r352_pmu_bl_desc *desc = _desc;
1145 u64 base;
1146 u64 addr_code;
1147 u64 addr_data;
1148 u32 addr_args;
1149
1150 base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
1151 addr_code = (base + pdesc->app_resident_code_offset) >> 8;
1152 addr_data = (base + pdesc->app_resident_data_offset) >> 8;
1153 addr_args = pmu->falcon->data.limit;
1154 addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
1155
1156 desc->dma_idx = FALCON_DMAIDX_UCODE;
1157 desc->code_dma_base = lower_32_bits(addr_code);
1158 desc->code_dma_base1 = upper_32_bits(addr_code);
1159 desc->code_size_total = pdesc->app_size;
1160 desc->code_size_to_load = pdesc->app_resident_code_size;
1161 desc->code_entry_point = pdesc->app_imem_entry;
1162 desc->data_dma_base = lower_32_bits(addr_data);
1163 desc->data_dma_base1 = upper_32_bits(addr_data);
1164 desc->data_size = pdesc->app_resident_data_size;
1165 desc->overlay_dma_base = lower_32_bits(addr_code);
1166 desc->overlay_dma_base1 = upper_32_bits(addr_code);
1167 desc->argc = 1;
1168 desc->argv = addr_args;
1169 }
1170
1171 static const struct acr_r352_lsf_func
1172 acr_r352_ls_pmu_func_0 = {
1173 .generate_bl_desc = acr_r352_generate_pmu_bl_desc,
1174 .bl_desc_size = sizeof(struct acr_r352_pmu_bl_desc),
1175 };
1176
1177 static const struct acr_r352_ls_func
1178 acr_r352_ls_pmu_func = {
1179 .load = acr_ls_ucode_load_pmu,
1180 .post_run = acr_ls_pmu_post_run,
1181 .version_max = 0,
1182 .version = {
1183 &acr_r352_ls_pmu_func_0,
1184 }
1185 };
1186
1187 const struct acr_r352_func
1188 acr_r352_func = {
1189 .fixup_hs_desc = acr_r352_fixup_hs_desc,
1190 .generate_hs_bl_desc = acr_r352_generate_hs_bl_desc,
1191 .hs_bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
1192 .ls_ucode_img_load = acr_r352_ls_ucode_img_load,
1193 .ls_fill_headers = acr_r352_ls_fill_headers,
1194 .ls_write_wpr = acr_r352_ls_write_wpr,
1195 .ls_func = {
1196 [NVKM_SECBOOT_FALCON_FECS] = &acr_r352_ls_fecs_func,
1197 [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r352_ls_gpccs_func,
1198 [NVKM_SECBOOT_FALCON_PMU] = &acr_r352_ls_pmu_func,
1199 },
1200 };
1201
1202 static const struct nvkm_acr_func
1203 acr_r352_base_func = {
1204 .dtor = acr_r352_dtor,
1205 .fini = acr_r352_fini,
1206 .load = acr_r352_load,
1207 .reset = acr_r352_reset,
1208 };
1209
1210 struct nvkm_acr *
1211 acr_r352_new_(const struct acr_r352_func *func,
1212 enum nvkm_secboot_falcon boot_falcon,
1213 unsigned long managed_falcons)
1214 {
1215 struct acr_r352 *acr;
1216 int i;
1217
1218
1219 for_each_set_bit(i, &managed_falcons, NVKM_SECBOOT_FALCON_END) {
1220 if (!func->ls_func[i])
1221 return ERR_PTR(-ENOTSUPP);
1222 }
1223
1224 acr = kzalloc(sizeof(*acr), GFP_KERNEL);
1225 if (!acr)
1226 return ERR_PTR(-ENOMEM);
1227
1228 acr->base.boot_falcon = boot_falcon;
1229 acr->base.managed_falcons = managed_falcons;
1230 acr->base.func = &acr_r352_base_func;
1231 acr->func = func;
1232
1233 return &acr->base;
1234 }
1235
1236 struct nvkm_acr *
1237 acr_r352_new(unsigned long managed_falcons)
1238 {
1239 return acr_r352_new_(&acr_r352_func, NVKM_SECBOOT_FALCON_PMU,
1240 managed_falcons);
1241 }