This source file includes following definitions.
- amdgpu_device_get_pcie_replay_count
- amdgpu_device_is_px
- amdgpu_mm_rreg
- amdgpu_mm_rreg8
- amdgpu_mm_wreg8
- amdgpu_mm_wreg
- amdgpu_io_rreg
- amdgpu_io_wreg
- amdgpu_mm_rdoorbell
- amdgpu_mm_wdoorbell
- amdgpu_mm_rdoorbell64
- amdgpu_mm_wdoorbell64
- amdgpu_invalid_rreg
- amdgpu_invalid_wreg
- amdgpu_invalid_rreg64
- amdgpu_invalid_wreg64
- amdgpu_block_invalid_rreg
- amdgpu_block_invalid_wreg
- amdgpu_device_vram_scratch_init
- amdgpu_device_vram_scratch_fini
- amdgpu_device_program_register_sequence
- amdgpu_device_pci_config_reset
- amdgpu_device_doorbell_init
- amdgpu_device_doorbell_fini
- amdgpu_device_wb_fini
- amdgpu_device_wb_init
- amdgpu_device_wb_get
- amdgpu_device_wb_free
- amdgpu_device_resize_fb_bar
- amdgpu_device_need_post
- amdgpu_device_vga_set_decode
- amdgpu_device_check_block_size
- amdgpu_device_check_vm_size
- amdgpu_device_check_smu_prv_buffer_size
- amdgpu_device_check_arguments
- amdgpu_switcheroo_set_state
- amdgpu_switcheroo_can_switch
- amdgpu_device_ip_set_clockgating_state
- amdgpu_device_ip_set_powergating_state
- amdgpu_device_ip_get_clockgating_state
- amdgpu_device_ip_wait_for_idle
- amdgpu_device_ip_is_idle
- amdgpu_device_ip_get_ip_block
- amdgpu_device_ip_block_version_cmp
- amdgpu_device_ip_block_add
- amdgpu_device_enable_virtual_display
- amdgpu_device_parse_gpu_info_fw
- amdgpu_device_ip_early_init
- amdgpu_device_ip_hw_init_phase1
- amdgpu_device_ip_hw_init_phase2
- amdgpu_device_fw_loading
- amdgpu_device_ip_init
- amdgpu_device_fill_reset_magic
- amdgpu_device_check_vram_lost
- amdgpu_device_set_cg_state
- amdgpu_device_set_pg_state
- amdgpu_device_enable_mgpu_fan_boost
- amdgpu_device_ip_late_init
- amdgpu_device_ip_fini
- amdgpu_device_delayed_init_work_handler
- amdgpu_device_delay_enable_gfx_off
- amdgpu_device_ip_suspend_phase1
- amdgpu_device_ip_suspend_phase2
- amdgpu_device_ip_suspend
- amdgpu_device_ip_reinit_early_sriov
- amdgpu_device_ip_reinit_late_sriov
- amdgpu_device_ip_resume_phase1
- amdgpu_device_ip_resume_phase2
- amdgpu_device_ip_resume
- amdgpu_device_detect_sriov_bios
- amdgpu_device_asic_has_dc_support
- amdgpu_device_has_dc_support
- amdgpu_device_xgmi_reset_func
- amdgpu_device_init
- amdgpu_device_fini
- amdgpu_device_suspend
- amdgpu_device_resume
- amdgpu_device_ip_check_soft_reset
- amdgpu_device_ip_pre_soft_reset
- amdgpu_device_ip_need_full_reset
- amdgpu_device_ip_soft_reset
- amdgpu_device_ip_post_soft_reset
- amdgpu_device_recover_vram
- amdgpu_device_reset_sriov
- amdgpu_device_should_recover_gpu
- amdgpu_device_pre_asic_reset
- amdgpu_do_asic_reset
- amdgpu_device_lock_adev
- amdgpu_device_unlock_adev
- amdgpu_device_gpu_recover
- amdgpu_device_get_pcie_info
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_probe_helper.h>
36 #include <drm/amdgpu_drm.h>
37 #include <linux/vgaarb.h>
38 #include <linux/vga_switcheroo.h>
39 #include <linux/efi.h>
40 #include "amdgpu.h"
41 #include "amdgpu_trace.h"
42 #include "amdgpu_i2c.h"
43 #include "atom.h"
44 #include "amdgpu_atombios.h"
45 #include "amdgpu_atomfirmware.h"
46 #include "amd_pcie.h"
47 #ifdef CONFIG_DRM_AMDGPU_SI
48 #include "si.h"
49 #endif
50 #ifdef CONFIG_DRM_AMDGPU_CIK
51 #include "cik.h"
52 #endif
53 #include "vi.h"
54 #include "soc15.h"
55 #include "nv.h"
56 #include "bif/bif_4_1_d.h"
57 #include <linux/pci.h>
58 #include <linux/firmware.h>
59 #include "amdgpu_vf_error.h"
60
61 #include "amdgpu_amdkfd.h"
62 #include "amdgpu_pm.h"
63
64 #include "amdgpu_xgmi.h"
65 #include "amdgpu_ras.h"
66 #include "amdgpu_pmu.h"
67
68 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
69 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
70 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
71 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
72 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
73 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
74 MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
75 MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
76 MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
77 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
78
79 #define AMDGPU_RESUME_MS 2000
80
81 static const char *amdgpu_asic_name[] = {
82 "TAHITI",
83 "PITCAIRN",
84 "VERDE",
85 "OLAND",
86 "HAINAN",
87 "BONAIRE",
88 "KAVERI",
89 "KABINI",
90 "HAWAII",
91 "MULLINS",
92 "TOPAZ",
93 "TONGA",
94 "FIJI",
95 "CARRIZO",
96 "STONEY",
97 "POLARIS10",
98 "POLARIS11",
99 "POLARIS12",
100 "VEGAM",
101 "VEGA10",
102 "VEGA12",
103 "VEGA20",
104 "RAVEN",
105 "ARCTURUS",
106 "RENOIR",
107 "NAVI10",
108 "NAVI14",
109 "NAVI12",
110 "LAST",
111 };
112
113
114
115
116
117
118
119
120
121
122 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
123 struct device_attribute *attr, char *buf)
124 {
125 struct drm_device *ddev = dev_get_drvdata(dev);
126 struct amdgpu_device *adev = ddev->dev_private;
127 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
128
129 return snprintf(buf, PAGE_SIZE, "%llu\n", cnt);
130 }
131
132 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
133 amdgpu_device_get_pcie_replay_count, NULL);
134
135 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
136
137
138
139
140
141
142
143
144
145 bool amdgpu_device_is_px(struct drm_device *dev)
146 {
147 struct amdgpu_device *adev = dev->dev_private;
148
149 if (adev->flags & AMD_IS_PX)
150 return true;
151 return false;
152 }
153
154
155
156
157
158
159
160
161
162
163
164
165
166 uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
167 uint32_t acc_flags)
168 {
169 uint32_t ret;
170
171 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
172 return amdgpu_virt_kiq_rreg(adev, reg);
173
174 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
175 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
176 else {
177 unsigned long flags;
178
179 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
180 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
181 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
182 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
183 }
184 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
185 return ret;
186 }
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) {
203 if (offset < adev->rmmio_size)
204 return (readb(adev->rmmio + offset));
205 BUG();
206 }
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) {
224 if (offset < adev->rmmio_size)
225 writeb(value, adev->rmmio + offset);
226 else
227 BUG();
228 }
229
230
231
232
233
234
235
236
237
238
239
240 void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
241 uint32_t acc_flags)
242 {
243 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
244
245 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
246 adev->last_mm_index = v;
247 }
248
249 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
250 return amdgpu_virt_kiq_wreg(adev, reg, v);
251
252 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
253 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
254 else {
255 unsigned long flags;
256
257 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
258 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
259 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
260 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
261 }
262
263 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
264 udelay(500);
265 }
266 }
267
268
269
270
271
272
273
274
275
276 u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
277 {
278 if ((reg * 4) < adev->rio_mem_size)
279 return ioread32(adev->rio_mem + (reg * 4));
280 else {
281 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
282 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
283 }
284 }
285
286
287
288
289
290
291
292
293
294
295 void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
296 {
297 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
298 adev->last_mm_index = v;
299 }
300
301 if ((reg * 4) < adev->rio_mem_size)
302 iowrite32(v, adev->rio_mem + (reg * 4));
303 else {
304 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
305 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
306 }
307
308 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
309 udelay(500);
310 }
311 }
312
313
314
315
316
317
318
319
320
321
322 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
323 {
324 if (index < adev->doorbell.num_doorbells) {
325 return readl(adev->doorbell.ptr + index);
326 } else {
327 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
328 return 0;
329 }
330 }
331
332
333
334
335
336
337
338
339
340
341
342 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
343 {
344 if (index < adev->doorbell.num_doorbells) {
345 writel(v, adev->doorbell.ptr + index);
346 } else {
347 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
348 }
349 }
350
351
352
353
354
355
356
357
358
359
360 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
361 {
362 if (index < adev->doorbell.num_doorbells) {
363 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
364 } else {
365 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
366 return 0;
367 }
368 }
369
370
371
372
373
374
375
376
377
378
379
380 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
381 {
382 if (index < adev->doorbell.num_doorbells) {
383 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
384 } else {
385 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
386 }
387 }
388
389
390
391
392
393
394
395
396
397
398
399 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
400 {
401 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
402 BUG();
403 return 0;
404 }
405
406
407
408
409
410
411
412
413
414
415
416 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
417 {
418 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
419 reg, v);
420 BUG();
421 }
422
423
424
425
426
427
428
429
430
431
432
433 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
434 {
435 DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
436 BUG();
437 return 0;
438 }
439
440
441
442
443
444
445
446
447
448
449
450 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
451 {
452 DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
453 reg, v);
454 BUG();
455 }
456
457
458
459
460
461
462
463
464
465
466
467
468 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
469 uint32_t block, uint32_t reg)
470 {
471 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
472 reg, block);
473 BUG();
474 return 0;
475 }
476
477
478
479
480
481
482
483
484
485
486
487
488 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
489 uint32_t block,
490 uint32_t reg, uint32_t v)
491 {
492 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
493 reg, block, v);
494 BUG();
495 }
496
497
498
499
500
501
502
503
504
505 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
506 {
507 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
508 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
509 &adev->vram_scratch.robj,
510 &adev->vram_scratch.gpu_addr,
511 (void **)&adev->vram_scratch.ptr);
512 }
513
514
515
516
517
518
519
520
521 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
522 {
523 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
524 }
525
526
527
528
529
530
531
532
533
534
535
536 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
537 const u32 *registers,
538 const u32 array_size)
539 {
540 u32 tmp, reg, and_mask, or_mask;
541 int i;
542
543 if (array_size % 3)
544 return;
545
546 for (i = 0; i < array_size; i +=3) {
547 reg = registers[i + 0];
548 and_mask = registers[i + 1];
549 or_mask = registers[i + 2];
550
551 if (and_mask == 0xffffffff) {
552 tmp = or_mask;
553 } else {
554 tmp = RREG32(reg);
555 tmp &= ~and_mask;
556 if (adev->family >= AMDGPU_FAMILY_AI)
557 tmp |= (or_mask & and_mask);
558 else
559 tmp |= or_mask;
560 }
561 WREG32(reg, tmp);
562 }
563 }
564
565
566
567
568
569
570
571
572
573 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
574 {
575 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
576 }
577
578
579
580
581
582
583
584
585
586
587
588
589 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
590 {
591
592
593 if (adev->asic_type < CHIP_BONAIRE) {
594 adev->doorbell.base = 0;
595 adev->doorbell.size = 0;
596 adev->doorbell.num_doorbells = 0;
597 adev->doorbell.ptr = NULL;
598 return 0;
599 }
600
601 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
602 return -EINVAL;
603
604 amdgpu_asic_init_doorbell_index(adev);
605
606
607 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
608 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
609
610 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
611 adev->doorbell_index.max_assignment+1);
612 if (adev->doorbell.num_doorbells == 0)
613 return -EINVAL;
614
615
616
617
618
619
620
621 if (adev->asic_type >= CHIP_VEGA10)
622 adev->doorbell.num_doorbells += 0x400;
623
624 adev->doorbell.ptr = ioremap(adev->doorbell.base,
625 adev->doorbell.num_doorbells *
626 sizeof(u32));
627 if (adev->doorbell.ptr == NULL)
628 return -ENOMEM;
629
630 return 0;
631 }
632
633
634
635
636
637
638
639
640 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
641 {
642 iounmap(adev->doorbell.ptr);
643 adev->doorbell.ptr = NULL;
644 }
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
663 {
664 if (adev->wb.wb_obj) {
665 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
666 &adev->wb.gpu_addr,
667 (void **)&adev->wb.wb);
668 adev->wb.wb_obj = NULL;
669 }
670 }
671
672
673
674
675
676
677
678
679
680
681 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
682 {
683 int r;
684
685 if (adev->wb.wb_obj == NULL) {
686
687 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
688 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
689 &adev->wb.wb_obj, &adev->wb.gpu_addr,
690 (void **)&adev->wb.wb);
691 if (r) {
692 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
693 return r;
694 }
695
696 adev->wb.num_wb = AMDGPU_MAX_WB;
697 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
698
699
700 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
701 }
702
703 return 0;
704 }
705
706
707
708
709
710
711
712
713
714
715 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
716 {
717 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
718
719 if (offset < adev->wb.num_wb) {
720 __set_bit(offset, adev->wb.used);
721 *wb = offset << 3;
722 return 0;
723 } else {
724 return -EINVAL;
725 }
726 }
727
728
729
730
731
732
733
734
735
736 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
737 {
738 wb >>= 3;
739 if (wb < adev->wb.num_wb)
740 __clear_bit(wb, adev->wb.used);
741 }
742
743
744
745
746
747
748
749
750
751
752 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
753 {
754 u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size);
755 u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
756 struct pci_bus *root;
757 struct resource *res;
758 unsigned i;
759 u16 cmd;
760 int r;
761
762
763 if (amdgpu_sriov_vf(adev))
764 return 0;
765
766
767 root = adev->pdev->bus;
768 while (root->parent)
769 root = root->parent;
770
771 pci_bus_for_each_resource(root, res, i) {
772 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
773 res->start > 0x100000000ull)
774 break;
775 }
776
777
778 if (!res)
779 return 0;
780
781
782 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
783 pci_write_config_word(adev->pdev, PCI_COMMAND,
784 cmd & ~PCI_COMMAND_MEMORY);
785
786
787 amdgpu_device_doorbell_fini(adev);
788 if (adev->asic_type >= CHIP_BONAIRE)
789 pci_release_resource(adev->pdev, 2);
790
791 pci_release_resource(adev->pdev, 0);
792
793 r = pci_resize_resource(adev->pdev, 0, rbar_size);
794 if (r == -ENOSPC)
795 DRM_INFO("Not enough PCI address space for a large BAR.");
796 else if (r && r != -ENOTSUPP)
797 DRM_ERROR("Problem resizing BAR0 (%d).", r);
798
799 pci_assign_unassigned_bus_resources(adev->pdev->bus);
800
801
802
803
804 r = amdgpu_device_doorbell_init(adev);
805 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
806 return -ENODEV;
807
808 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
809
810 return 0;
811 }
812
813
814
815
816
817
818
819
820
821
822
823
824
825 bool amdgpu_device_need_post(struct amdgpu_device *adev)
826 {
827 uint32_t reg;
828
829 if (amdgpu_sriov_vf(adev))
830 return false;
831
832 if (amdgpu_passthrough(adev)) {
833
834
835
836
837
838 if (adev->asic_type == CHIP_FIJI) {
839 int err;
840 uint32_t fw_ver;
841 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
842
843 if (err)
844 return true;
845
846 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
847 if (fw_ver < 0x00160e00)
848 return true;
849 }
850 }
851
852 if (adev->has_hw_reset) {
853 adev->has_hw_reset = false;
854 return true;
855 }
856
857
858 if (adev->asic_type >= CHIP_BONAIRE)
859 return amdgpu_atombios_scratch_need_asic_init(adev);
860
861
862 reg = amdgpu_asic_get_config_memsize(adev);
863
864 if ((reg != 0) && (reg != 0xffffffff))
865 return false;
866
867 return true;
868 }
869
870
871
872
873
874
875
876
877
878
879
880 static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
881 {
882 struct amdgpu_device *adev = cookie;
883 amdgpu_asic_set_vga_state(adev, state);
884 if (state)
885 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
886 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
887 else
888 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
889 }
890
891
892
893
894
895
896
897
898
899
900
901 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
902 {
903
904
905
906 if (amdgpu_vm_block_size == -1)
907 return;
908
909 if (amdgpu_vm_block_size < 9) {
910 dev_warn(adev->dev, "VM page table size (%d) too small\n",
911 amdgpu_vm_block_size);
912 amdgpu_vm_block_size = -1;
913 }
914 }
915
916
917
918
919
920
921
922
923
924 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
925 {
926
927 if (amdgpu_vm_size == -1)
928 return;
929
930 if (amdgpu_vm_size < 1) {
931 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
932 amdgpu_vm_size);
933 amdgpu_vm_size = -1;
934 }
935 }
936
937 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
938 {
939 struct sysinfo si;
940 bool is_os_64 = (sizeof(void *) == 8) ? true : false;
941 uint64_t total_memory;
942 uint64_t dram_size_seven_GB = 0x1B8000000;
943 uint64_t dram_size_three_GB = 0xB8000000;
944
945 if (amdgpu_smu_memory_pool_size == 0)
946 return;
947
948 if (!is_os_64) {
949 DRM_WARN("Not 64-bit OS, feature not supported\n");
950 goto def_value;
951 }
952 si_meminfo(&si);
953 total_memory = (uint64_t)si.totalram * si.mem_unit;
954
955 if ((amdgpu_smu_memory_pool_size == 1) ||
956 (amdgpu_smu_memory_pool_size == 2)) {
957 if (total_memory < dram_size_three_GB)
958 goto def_value1;
959 } else if ((amdgpu_smu_memory_pool_size == 4) ||
960 (amdgpu_smu_memory_pool_size == 8)) {
961 if (total_memory < dram_size_seven_GB)
962 goto def_value1;
963 } else {
964 DRM_WARN("Smu memory pool size not supported\n");
965 goto def_value;
966 }
967 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
968
969 return;
970
971 def_value1:
972 DRM_WARN("No enough system memory\n");
973 def_value:
974 adev->pm.smu_prv_buffer_size = 0;
975 }
976
977
978
979
980
981
982
983
984
985 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
986 {
987 int ret = 0;
988
989 if (amdgpu_sched_jobs < 4) {
990 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
991 amdgpu_sched_jobs);
992 amdgpu_sched_jobs = 4;
993 } else if (!is_power_of_2(amdgpu_sched_jobs)){
994 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
995 amdgpu_sched_jobs);
996 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
997 }
998
999 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1000
1001 dev_warn(adev->dev, "gart size (%d) too small\n",
1002 amdgpu_gart_size);
1003 amdgpu_gart_size = -1;
1004 }
1005
1006 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1007
1008 dev_warn(adev->dev, "gtt size (%d) too small\n",
1009 amdgpu_gtt_size);
1010 amdgpu_gtt_size = -1;
1011 }
1012
1013
1014 if (amdgpu_vm_fragment_size != -1 &&
1015 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1016 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1017 amdgpu_vm_fragment_size = -1;
1018 }
1019
1020 amdgpu_device_check_smu_prv_buffer_size(adev);
1021
1022 amdgpu_device_check_vm_size(adev);
1023
1024 amdgpu_device_check_block_size(adev);
1025
1026 ret = amdgpu_device_get_job_timeout_settings(adev);
1027 if (ret) {
1028 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
1029 return ret;
1030 }
1031
1032 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1033
1034 return ret;
1035 }
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1047 {
1048 struct drm_device *dev = pci_get_drvdata(pdev);
1049
1050 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1051 return;
1052
1053 if (state == VGA_SWITCHEROO_ON) {
1054 pr_info("amdgpu: switched on\n");
1055
1056 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1057
1058 amdgpu_device_resume(dev, true, true);
1059
1060 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1061 drm_kms_helper_poll_enable(dev);
1062 } else {
1063 pr_info("amdgpu: switched off\n");
1064 drm_kms_helper_poll_disable(dev);
1065 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1066 amdgpu_device_suspend(dev, true, true);
1067 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1068 }
1069 }
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1081 {
1082 struct drm_device *dev = pci_get_drvdata(pdev);
1083
1084
1085
1086
1087
1088
1089 return dev->open_count == 0;
1090 }
1091
1092 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1093 .set_gpu_state = amdgpu_switcheroo_set_state,
1094 .reprobe = NULL,
1095 .can_switch = amdgpu_switcheroo_can_switch,
1096 };
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109 int amdgpu_device_ip_set_clockgating_state(void *dev,
1110 enum amd_ip_block_type block_type,
1111 enum amd_clockgating_state state)
1112 {
1113 struct amdgpu_device *adev = dev;
1114 int i, r = 0;
1115
1116 for (i = 0; i < adev->num_ip_blocks; i++) {
1117 if (!adev->ip_blocks[i].status.valid)
1118 continue;
1119 if (adev->ip_blocks[i].version->type != block_type)
1120 continue;
1121 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1122 continue;
1123 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1124 (void *)adev, state);
1125 if (r)
1126 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1127 adev->ip_blocks[i].version->funcs->name, r);
1128 }
1129 return r;
1130 }
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143 int amdgpu_device_ip_set_powergating_state(void *dev,
1144 enum amd_ip_block_type block_type,
1145 enum amd_powergating_state state)
1146 {
1147 struct amdgpu_device *adev = dev;
1148 int i, r = 0;
1149
1150 for (i = 0; i < adev->num_ip_blocks; i++) {
1151 if (!adev->ip_blocks[i].status.valid)
1152 continue;
1153 if (adev->ip_blocks[i].version->type != block_type)
1154 continue;
1155 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1156 continue;
1157 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1158 (void *)adev, state);
1159 if (r)
1160 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1161 adev->ip_blocks[i].version->funcs->name, r);
1162 }
1163 return r;
1164 }
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1178 u32 *flags)
1179 {
1180 int i;
1181
1182 for (i = 0; i < adev->num_ip_blocks; i++) {
1183 if (!adev->ip_blocks[i].status.valid)
1184 continue;
1185 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1186 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1187 }
1188 }
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1200 enum amd_ip_block_type block_type)
1201 {
1202 int i, r;
1203
1204 for (i = 0; i < adev->num_ip_blocks; i++) {
1205 if (!adev->ip_blocks[i].status.valid)
1206 continue;
1207 if (adev->ip_blocks[i].version->type == block_type) {
1208 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1209 if (r)
1210 return r;
1211 break;
1212 }
1213 }
1214 return 0;
1215
1216 }
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1228 enum amd_ip_block_type block_type)
1229 {
1230 int i;
1231
1232 for (i = 0; i < adev->num_ip_blocks; i++) {
1233 if (!adev->ip_blocks[i].status.valid)
1234 continue;
1235 if (adev->ip_blocks[i].version->type == block_type)
1236 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1237 }
1238 return true;
1239
1240 }
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251 struct amdgpu_ip_block *
1252 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1253 enum amd_ip_block_type type)
1254 {
1255 int i;
1256
1257 for (i = 0; i < adev->num_ip_blocks; i++)
1258 if (adev->ip_blocks[i].version->type == type)
1259 return &adev->ip_blocks[i];
1260
1261 return NULL;
1262 }
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1276 enum amd_ip_block_type type,
1277 u32 major, u32 minor)
1278 {
1279 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1280
1281 if (ip_block && ((ip_block->version->major > major) ||
1282 ((ip_block->version->major == major) &&
1283 (ip_block->version->minor >= minor))))
1284 return 0;
1285
1286 return 1;
1287 }
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1299 const struct amdgpu_ip_block_version *ip_block_version)
1300 {
1301 if (!ip_block_version)
1302 return -EINVAL;
1303
1304 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1305 ip_block_version->funcs->name);
1306
1307 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1308
1309 return 0;
1310 }
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1325 {
1326 adev->enable_virtual_display = false;
1327
1328 if (amdgpu_virtual_display) {
1329 struct drm_device *ddev = adev->ddev;
1330 const char *pci_address_name = pci_name(ddev->pdev);
1331 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1332
1333 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1334 pciaddstr_tmp = pciaddstr;
1335 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1336 pciaddname = strsep(&pciaddname_tmp, ",");
1337 if (!strcmp("all", pciaddname)
1338 || !strcmp(pci_address_name, pciaddname)) {
1339 long num_crtc;
1340 int res = -1;
1341
1342 adev->enable_virtual_display = true;
1343
1344 if (pciaddname_tmp)
1345 res = kstrtol(pciaddname_tmp, 10,
1346 &num_crtc);
1347
1348 if (!res) {
1349 if (num_crtc < 1)
1350 num_crtc = 1;
1351 if (num_crtc > 6)
1352 num_crtc = 6;
1353 adev->mode_info.num_crtc = num_crtc;
1354 } else {
1355 adev->mode_info.num_crtc = 1;
1356 }
1357 break;
1358 }
1359 }
1360
1361 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1362 amdgpu_virtual_display, pci_address_name,
1363 adev->enable_virtual_display, adev->mode_info.num_crtc);
1364
1365 kfree(pciaddstr);
1366 }
1367 }
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1380 {
1381 const char *chip_name;
1382 char fw_name[30];
1383 int err;
1384 const struct gpu_info_firmware_header_v1_0 *hdr;
1385
1386 adev->firmware.gpu_info_fw = NULL;
1387
1388 switch (adev->asic_type) {
1389 case CHIP_TOPAZ:
1390 case CHIP_TONGA:
1391 case CHIP_FIJI:
1392 case CHIP_POLARIS10:
1393 case CHIP_POLARIS11:
1394 case CHIP_POLARIS12:
1395 case CHIP_VEGAM:
1396 case CHIP_CARRIZO:
1397 case CHIP_STONEY:
1398 #ifdef CONFIG_DRM_AMDGPU_SI
1399 case CHIP_VERDE:
1400 case CHIP_TAHITI:
1401 case CHIP_PITCAIRN:
1402 case CHIP_OLAND:
1403 case CHIP_HAINAN:
1404 #endif
1405 #ifdef CONFIG_DRM_AMDGPU_CIK
1406 case CHIP_BONAIRE:
1407 case CHIP_HAWAII:
1408 case CHIP_KAVERI:
1409 case CHIP_KABINI:
1410 case CHIP_MULLINS:
1411 #endif
1412 case CHIP_VEGA20:
1413 default:
1414 return 0;
1415 case CHIP_VEGA10:
1416 chip_name = "vega10";
1417 break;
1418 case CHIP_VEGA12:
1419 chip_name = "vega12";
1420 break;
1421 case CHIP_RAVEN:
1422 if (adev->rev_id >= 8)
1423 chip_name = "raven2";
1424 else if (adev->pdev->device == 0x15d8)
1425 chip_name = "picasso";
1426 else
1427 chip_name = "raven";
1428 break;
1429 case CHIP_ARCTURUS:
1430 chip_name = "arcturus";
1431 break;
1432 case CHIP_RENOIR:
1433 chip_name = "renoir";
1434 break;
1435 case CHIP_NAVI10:
1436 chip_name = "navi10";
1437 break;
1438 case CHIP_NAVI14:
1439 chip_name = "navi14";
1440 break;
1441 case CHIP_NAVI12:
1442 chip_name = "navi12";
1443 break;
1444 }
1445
1446 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1447 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1448 if (err) {
1449 dev_err(adev->dev,
1450 "Failed to load gpu_info firmware \"%s\"\n",
1451 fw_name);
1452 goto out;
1453 }
1454 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1455 if (err) {
1456 dev_err(adev->dev,
1457 "Failed to validate gpu_info firmware \"%s\"\n",
1458 fw_name);
1459 goto out;
1460 }
1461
1462 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1463 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1464
1465 switch (hdr->version_major) {
1466 case 1:
1467 {
1468 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1469 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1470 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1471
1472 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1473 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1474 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1475 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
1476 adev->gfx.config.max_texture_channel_caches =
1477 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1478 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1479 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1480 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1481 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
1482 adev->gfx.config.double_offchip_lds_buf =
1483 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1484 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
1485 adev->gfx.cu_info.max_waves_per_simd =
1486 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1487 adev->gfx.cu_info.max_scratch_slots_per_cu =
1488 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1489 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
1490 if (hdr->version_minor >= 1) {
1491 const struct gpu_info_firmware_v1_1 *gpu_info_fw =
1492 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
1493 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1494 adev->gfx.config.num_sc_per_sh =
1495 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
1496 adev->gfx.config.num_packer_per_sc =
1497 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
1498 }
1499 #ifdef CONFIG_DRM_AMD_DC_DCN2_0
1500 if (hdr->version_minor == 2) {
1501 const struct gpu_info_firmware_v1_2 *gpu_info_fw =
1502 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
1503 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1504 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
1505 }
1506 #endif
1507 break;
1508 }
1509 default:
1510 dev_err(adev->dev,
1511 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1512 err = -EINVAL;
1513 goto out;
1514 }
1515 out:
1516 return err;
1517 }
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
1530 {
1531 int i, r;
1532
1533 amdgpu_device_enable_virtual_display(adev);
1534
1535 switch (adev->asic_type) {
1536 case CHIP_TOPAZ:
1537 case CHIP_TONGA:
1538 case CHIP_FIJI:
1539 case CHIP_POLARIS10:
1540 case CHIP_POLARIS11:
1541 case CHIP_POLARIS12:
1542 case CHIP_VEGAM:
1543 case CHIP_CARRIZO:
1544 case CHIP_STONEY:
1545 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
1546 adev->family = AMDGPU_FAMILY_CZ;
1547 else
1548 adev->family = AMDGPU_FAMILY_VI;
1549
1550 r = vi_set_ip_blocks(adev);
1551 if (r)
1552 return r;
1553 break;
1554 #ifdef CONFIG_DRM_AMDGPU_SI
1555 case CHIP_VERDE:
1556 case CHIP_TAHITI:
1557 case CHIP_PITCAIRN:
1558 case CHIP_OLAND:
1559 case CHIP_HAINAN:
1560 adev->family = AMDGPU_FAMILY_SI;
1561 r = si_set_ip_blocks(adev);
1562 if (r)
1563 return r;
1564 break;
1565 #endif
1566 #ifdef CONFIG_DRM_AMDGPU_CIK
1567 case CHIP_BONAIRE:
1568 case CHIP_HAWAII:
1569 case CHIP_KAVERI:
1570 case CHIP_KABINI:
1571 case CHIP_MULLINS:
1572 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1573 adev->family = AMDGPU_FAMILY_CI;
1574 else
1575 adev->family = AMDGPU_FAMILY_KV;
1576
1577 r = cik_set_ip_blocks(adev);
1578 if (r)
1579 return r;
1580 break;
1581 #endif
1582 case CHIP_VEGA10:
1583 case CHIP_VEGA12:
1584 case CHIP_VEGA20:
1585 case CHIP_RAVEN:
1586 case CHIP_ARCTURUS:
1587 case CHIP_RENOIR:
1588 if (adev->asic_type == CHIP_RAVEN ||
1589 adev->asic_type == CHIP_RENOIR)
1590 adev->family = AMDGPU_FAMILY_RV;
1591 else
1592 adev->family = AMDGPU_FAMILY_AI;
1593
1594 r = soc15_set_ip_blocks(adev);
1595 if (r)
1596 return r;
1597 break;
1598 case CHIP_NAVI10:
1599 case CHIP_NAVI14:
1600 case CHIP_NAVI12:
1601 adev->family = AMDGPU_FAMILY_NV;
1602
1603 r = nv_set_ip_blocks(adev);
1604 if (r)
1605 return r;
1606 break;
1607 default:
1608
1609 return -EINVAL;
1610 }
1611
1612 r = amdgpu_device_parse_gpu_info_fw(adev);
1613 if (r)
1614 return r;
1615
1616 amdgpu_amdkfd_device_probe(adev);
1617
1618 if (amdgpu_sriov_vf(adev)) {
1619 r = amdgpu_virt_request_full_gpu(adev, true);
1620 if (r)
1621 return -EAGAIN;
1622 }
1623
1624 adev->pm.pp_feature = amdgpu_pp_feature_mask;
1625 if (amdgpu_sriov_vf(adev))
1626 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
1627
1628 for (i = 0; i < adev->num_ip_blocks; i++) {
1629 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1630 DRM_ERROR("disabled ip block: %d <%s>\n",
1631 i, adev->ip_blocks[i].version->funcs->name);
1632 adev->ip_blocks[i].status.valid = false;
1633 } else {
1634 if (adev->ip_blocks[i].version->funcs->early_init) {
1635 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
1636 if (r == -ENOENT) {
1637 adev->ip_blocks[i].status.valid = false;
1638 } else if (r) {
1639 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1640 adev->ip_blocks[i].version->funcs->name, r);
1641 return r;
1642 } else {
1643 adev->ip_blocks[i].status.valid = true;
1644 }
1645 } else {
1646 adev->ip_blocks[i].status.valid = true;
1647 }
1648 }
1649
1650 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
1651
1652 if (!amdgpu_get_bios(adev))
1653 return -EINVAL;
1654
1655 r = amdgpu_atombios_init(adev);
1656 if (r) {
1657 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
1658 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
1659 return r;
1660 }
1661 }
1662 }
1663
1664 adev->cg_flags &= amdgpu_cg_mask;
1665 adev->pg_flags &= amdgpu_pg_mask;
1666
1667 return 0;
1668 }
1669
1670 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
1671 {
1672 int i, r;
1673
1674 for (i = 0; i < adev->num_ip_blocks; i++) {
1675 if (!adev->ip_blocks[i].status.sw)
1676 continue;
1677 if (adev->ip_blocks[i].status.hw)
1678 continue;
1679 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1680 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
1681 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
1682 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1683 if (r) {
1684 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1685 adev->ip_blocks[i].version->funcs->name, r);
1686 return r;
1687 }
1688 adev->ip_blocks[i].status.hw = true;
1689 }
1690 }
1691
1692 return 0;
1693 }
1694
1695 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
1696 {
1697 int i, r;
1698
1699 for (i = 0; i < adev->num_ip_blocks; i++) {
1700 if (!adev->ip_blocks[i].status.sw)
1701 continue;
1702 if (adev->ip_blocks[i].status.hw)
1703 continue;
1704 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1705 if (r) {
1706 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1707 adev->ip_blocks[i].version->funcs->name, r);
1708 return r;
1709 }
1710 adev->ip_blocks[i].status.hw = true;
1711 }
1712
1713 return 0;
1714 }
1715
1716 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
1717 {
1718 int r = 0;
1719 int i;
1720 uint32_t smu_version;
1721
1722 if (adev->asic_type >= CHIP_VEGA10) {
1723 for (i = 0; i < adev->num_ip_blocks; i++) {
1724 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
1725 continue;
1726
1727
1728 if (adev->ip_blocks[i].status.hw == true)
1729 break;
1730
1731 if (adev->in_gpu_reset || adev->in_suspend) {
1732 r = adev->ip_blocks[i].version->funcs->resume(adev);
1733 if (r) {
1734 DRM_ERROR("resume of IP block <%s> failed %d\n",
1735 adev->ip_blocks[i].version->funcs->name, r);
1736 return r;
1737 }
1738 } else {
1739 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1740 if (r) {
1741 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1742 adev->ip_blocks[i].version->funcs->name, r);
1743 return r;
1744 }
1745 }
1746
1747 adev->ip_blocks[i].status.hw = true;
1748 break;
1749 }
1750 }
1751
1752 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
1753
1754 return r;
1755 }
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
1769 {
1770 int i, r;
1771
1772 r = amdgpu_ras_init(adev);
1773 if (r)
1774 return r;
1775
1776 for (i = 0; i < adev->num_ip_blocks; i++) {
1777 if (!adev->ip_blocks[i].status.valid)
1778 continue;
1779 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
1780 if (r) {
1781 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1782 adev->ip_blocks[i].version->funcs->name, r);
1783 goto init_failed;
1784 }
1785 adev->ip_blocks[i].status.sw = true;
1786
1787
1788 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1789 r = amdgpu_device_vram_scratch_init(adev);
1790 if (r) {
1791 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
1792 goto init_failed;
1793 }
1794 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
1795 if (r) {
1796 DRM_ERROR("hw_init %d failed %d\n", i, r);
1797 goto init_failed;
1798 }
1799 r = amdgpu_device_wb_init(adev);
1800 if (r) {
1801 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
1802 goto init_failed;
1803 }
1804 adev->ip_blocks[i].status.hw = true;
1805
1806
1807 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
1808 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
1809 AMDGPU_GEM_DOMAIN_VRAM,
1810 AMDGPU_CSA_SIZE);
1811 if (r) {
1812 DRM_ERROR("allocate CSA failed %d\n", r);
1813 goto init_failed;
1814 }
1815 }
1816 }
1817 }
1818
1819 r = amdgpu_ib_pool_init(adev);
1820 if (r) {
1821 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
1822 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
1823 goto init_failed;
1824 }
1825
1826 r = amdgpu_ucode_create_bo(adev);
1827 if (r)
1828 goto init_failed;
1829
1830 r = amdgpu_device_ip_hw_init_phase1(adev);
1831 if (r)
1832 goto init_failed;
1833
1834 r = amdgpu_device_fw_loading(adev);
1835 if (r)
1836 goto init_failed;
1837
1838 r = amdgpu_device_ip_hw_init_phase2(adev);
1839 if (r)
1840 goto init_failed;
1841
1842 if (adev->gmc.xgmi.num_physical_nodes > 1)
1843 amdgpu_xgmi_add_device(adev);
1844 amdgpu_amdkfd_device_init(adev);
1845
1846 init_failed:
1847 if (amdgpu_sriov_vf(adev)) {
1848 if (!r)
1849 amdgpu_virt_init_data_exchange(adev);
1850 amdgpu_virt_release_full_gpu(adev, true);
1851 }
1852
1853 return r;
1854 }
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
1866 {
1867 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
1868 }
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
1881 {
1882 return !!memcmp(adev->gart.ptr, adev->reset_magic,
1883 AMDGPU_RESET_MAGIC_NUM);
1884 }
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898 static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
1899 enum amd_clockgating_state state)
1900 {
1901 int i, j, r;
1902
1903 if (amdgpu_emu_mode == 1)
1904 return 0;
1905
1906 for (j = 0; j < adev->num_ip_blocks; j++) {
1907 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
1908 if (!adev->ip_blocks[i].status.late_initialized)
1909 continue;
1910
1911 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1912 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
1913 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
1914 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
1915
1916 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1917 state);
1918 if (r) {
1919 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
1920 adev->ip_blocks[i].version->funcs->name, r);
1921 return r;
1922 }
1923 }
1924 }
1925
1926 return 0;
1927 }
1928
1929 static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_powergating_state state)
1930 {
1931 int i, j, r;
1932
1933 if (amdgpu_emu_mode == 1)
1934 return 0;
1935
1936 for (j = 0; j < adev->num_ip_blocks; j++) {
1937 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
1938 if (!adev->ip_blocks[i].status.late_initialized)
1939 continue;
1940
1941 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1942 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
1943 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
1944 adev->ip_blocks[i].version->funcs->set_powergating_state) {
1945
1946 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
1947 state);
1948 if (r) {
1949 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
1950 adev->ip_blocks[i].version->funcs->name, r);
1951 return r;
1952 }
1953 }
1954 }
1955 return 0;
1956 }
1957
1958 static int amdgpu_device_enable_mgpu_fan_boost(void)
1959 {
1960 struct amdgpu_gpu_instance *gpu_ins;
1961 struct amdgpu_device *adev;
1962 int i, ret = 0;
1963
1964 mutex_lock(&mgpu_info.mutex);
1965
1966
1967
1968
1969
1970
1971 if (mgpu_info.num_dgpu < 2)
1972 goto out;
1973
1974 for (i = 0; i < mgpu_info.num_dgpu; i++) {
1975 gpu_ins = &(mgpu_info.gpu_ins[i]);
1976 adev = gpu_ins->adev;
1977 if (!(adev->flags & AMD_IS_APU) &&
1978 !gpu_ins->mgpu_fan_enabled &&
1979 adev->powerplay.pp_funcs &&
1980 adev->powerplay.pp_funcs->enable_mgpu_fan_boost) {
1981 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
1982 if (ret)
1983 break;
1984
1985 gpu_ins->mgpu_fan_enabled = 1;
1986 }
1987 }
1988
1989 out:
1990 mutex_unlock(&mgpu_info.mutex);
1991
1992 return ret;
1993 }
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2008 {
2009 int i = 0, r;
2010
2011 for (i = 0; i < adev->num_ip_blocks; i++) {
2012 if (!adev->ip_blocks[i].status.hw)
2013 continue;
2014 if (adev->ip_blocks[i].version->funcs->late_init) {
2015 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2016 if (r) {
2017 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2018 adev->ip_blocks[i].version->funcs->name, r);
2019 return r;
2020 }
2021 }
2022 adev->ip_blocks[i].status.late_initialized = true;
2023 }
2024
2025 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2026 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2027
2028 amdgpu_device_fill_reset_magic(adev);
2029
2030 r = amdgpu_device_enable_mgpu_fan_boost();
2031 if (r)
2032 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2033
2034
2035 amdgpu_xgmi_set_pstate(adev, 0);
2036
2037 return 0;
2038 }
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2052 {
2053 int i, r;
2054
2055 amdgpu_ras_pre_fini(adev);
2056
2057 if (adev->gmc.xgmi.num_physical_nodes > 1)
2058 amdgpu_xgmi_remove_device(adev);
2059
2060 amdgpu_amdkfd_device_fini(adev);
2061
2062 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2063 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2064
2065
2066 for (i = 0; i < adev->num_ip_blocks; i++) {
2067 if (!adev->ip_blocks[i].status.hw)
2068 continue;
2069 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2070 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2071
2072 if (r) {
2073 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2074 adev->ip_blocks[i].version->funcs->name, r);
2075 }
2076 adev->ip_blocks[i].status.hw = false;
2077 break;
2078 }
2079 }
2080
2081 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2082 if (!adev->ip_blocks[i].status.hw)
2083 continue;
2084
2085 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2086
2087 if (r) {
2088 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2089 adev->ip_blocks[i].version->funcs->name, r);
2090 }
2091
2092 adev->ip_blocks[i].status.hw = false;
2093 }
2094
2095
2096 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2097 if (!adev->ip_blocks[i].status.sw)
2098 continue;
2099
2100 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2101 amdgpu_ucode_free_bo(adev);
2102 amdgpu_free_static_csa(&adev->virt.csa_obj);
2103 amdgpu_device_wb_fini(adev);
2104 amdgpu_device_vram_scratch_fini(adev);
2105 amdgpu_ib_pool_fini(adev);
2106 }
2107
2108 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2109
2110 if (r) {
2111 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2112 adev->ip_blocks[i].version->funcs->name, r);
2113 }
2114 adev->ip_blocks[i].status.sw = false;
2115 adev->ip_blocks[i].status.valid = false;
2116 }
2117
2118 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2119 if (!adev->ip_blocks[i].status.late_initialized)
2120 continue;
2121 if (adev->ip_blocks[i].version->funcs->late_fini)
2122 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2123 adev->ip_blocks[i].status.late_initialized = false;
2124 }
2125
2126 amdgpu_ras_fini(adev);
2127
2128 if (amdgpu_sriov_vf(adev))
2129 if (amdgpu_virt_release_full_gpu(adev, false))
2130 DRM_ERROR("failed to release exclusive mode on fini\n");
2131
2132 return 0;
2133 }
2134
2135
2136
2137
2138
2139
2140 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2141 {
2142 struct amdgpu_device *adev =
2143 container_of(work, struct amdgpu_device, delayed_init_work.work);
2144 int r;
2145
2146 r = amdgpu_ib_ring_tests(adev);
2147 if (r)
2148 DRM_ERROR("ib ring test failed (%d).\n", r);
2149 }
2150
2151 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2152 {
2153 struct amdgpu_device *adev =
2154 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2155
2156 mutex_lock(&adev->gfx.gfx_off_mutex);
2157 if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
2158 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2159 adev->gfx.gfx_off_state = true;
2160 }
2161 mutex_unlock(&adev->gfx.gfx_off_mutex);
2162 }
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2176 {
2177 int i, r;
2178
2179 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2180 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2181
2182 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2183 if (!adev->ip_blocks[i].status.valid)
2184 continue;
2185
2186 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
2187
2188 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2189
2190 if (r) {
2191 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2192 adev->ip_blocks[i].version->funcs->name, r);
2193 return r;
2194 }
2195 adev->ip_blocks[i].status.hw = false;
2196 }
2197 }
2198
2199 return 0;
2200 }
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2214 {
2215 int i, r;
2216
2217 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2218 if (!adev->ip_blocks[i].status.valid)
2219 continue;
2220
2221 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2222 continue;
2223
2224 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2225
2226 if (r) {
2227 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2228 adev->ip_blocks[i].version->funcs->name, r);
2229 }
2230 adev->ip_blocks[i].status.hw = false;
2231
2232 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2233 if (is_support_sw_smu(adev)) {
2234
2235 } else if (adev->powerplay.pp_funcs &&
2236 adev->powerplay.pp_funcs->set_mp1_state) {
2237 r = adev->powerplay.pp_funcs->set_mp1_state(
2238 adev->powerplay.pp_handle,
2239 adev->mp1_state);
2240 if (r) {
2241 DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
2242 adev->mp1_state, r);
2243 return r;
2244 }
2245 }
2246 }
2247
2248 adev->ip_blocks[i].status.hw = false;
2249 }
2250
2251 return 0;
2252 }
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
2266 {
2267 int r;
2268
2269 if (amdgpu_sriov_vf(adev))
2270 amdgpu_virt_request_full_gpu(adev, false);
2271
2272 r = amdgpu_device_ip_suspend_phase1(adev);
2273 if (r)
2274 return r;
2275 r = amdgpu_device_ip_suspend_phase2(adev);
2276
2277 if (amdgpu_sriov_vf(adev))
2278 amdgpu_virt_release_full_gpu(adev, false);
2279
2280 return r;
2281 }
2282
2283 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
2284 {
2285 int i, r;
2286
2287 static enum amd_ip_block_type ip_order[] = {
2288 AMD_IP_BLOCK_TYPE_GMC,
2289 AMD_IP_BLOCK_TYPE_COMMON,
2290 AMD_IP_BLOCK_TYPE_PSP,
2291 AMD_IP_BLOCK_TYPE_IH,
2292 };
2293
2294 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2295 int j;
2296 struct amdgpu_ip_block *block;
2297
2298 for (j = 0; j < adev->num_ip_blocks; j++) {
2299 block = &adev->ip_blocks[j];
2300
2301 block->status.hw = false;
2302 if (block->version->type != ip_order[i] ||
2303 !block->status.valid)
2304 continue;
2305
2306 r = block->version->funcs->hw_init(adev);
2307 DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2308 if (r)
2309 return r;
2310 block->status.hw = true;
2311 }
2312 }
2313
2314 return 0;
2315 }
2316
2317 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
2318 {
2319 int i, r;
2320
2321 static enum amd_ip_block_type ip_order[] = {
2322 AMD_IP_BLOCK_TYPE_SMC,
2323 AMD_IP_BLOCK_TYPE_DCE,
2324 AMD_IP_BLOCK_TYPE_GFX,
2325 AMD_IP_BLOCK_TYPE_SDMA,
2326 AMD_IP_BLOCK_TYPE_UVD,
2327 AMD_IP_BLOCK_TYPE_VCE
2328 };
2329
2330 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2331 int j;
2332 struct amdgpu_ip_block *block;
2333
2334 for (j = 0; j < adev->num_ip_blocks; j++) {
2335 block = &adev->ip_blocks[j];
2336
2337 if (block->version->type != ip_order[i] ||
2338 !block->status.valid ||
2339 block->status.hw)
2340 continue;
2341
2342 r = block->version->funcs->hw_init(adev);
2343 DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2344 if (r)
2345 return r;
2346 block->status.hw = true;
2347 }
2348 }
2349
2350 return 0;
2351 }
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
2366 {
2367 int i, r;
2368
2369 for (i = 0; i < adev->num_ip_blocks; i++) {
2370 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
2371 continue;
2372 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2373 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2374 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2375
2376 r = adev->ip_blocks[i].version->funcs->resume(adev);
2377 if (r) {
2378 DRM_ERROR("resume of IP block <%s> failed %d\n",
2379 adev->ip_blocks[i].version->funcs->name, r);
2380 return r;
2381 }
2382 adev->ip_blocks[i].status.hw = true;
2383 }
2384 }
2385
2386 return 0;
2387 }
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
2403 {
2404 int i, r;
2405
2406 for (i = 0; i < adev->num_ip_blocks; i++) {
2407 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
2408 continue;
2409 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2410 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2411 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
2412 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
2413 continue;
2414 r = adev->ip_blocks[i].version->funcs->resume(adev);
2415 if (r) {
2416 DRM_ERROR("resume of IP block <%s> failed %d\n",
2417 adev->ip_blocks[i].version->funcs->name, r);
2418 return r;
2419 }
2420 adev->ip_blocks[i].status.hw = true;
2421 }
2422
2423 return 0;
2424 }
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
2439 {
2440 int r;
2441
2442 r = amdgpu_device_ip_resume_phase1(adev);
2443 if (r)
2444 return r;
2445
2446 r = amdgpu_device_fw_loading(adev);
2447 if (r)
2448 return r;
2449
2450 r = amdgpu_device_ip_resume_phase2(adev);
2451
2452 return r;
2453 }
2454
2455
2456
2457
2458
2459
2460
2461
2462 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
2463 {
2464 if (amdgpu_sriov_vf(adev)) {
2465 if (adev->is_atom_fw) {
2466 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
2467 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2468 } else {
2469 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
2470 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2471 }
2472
2473 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
2474 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
2475 }
2476 }
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
2487 {
2488 switch (asic_type) {
2489 #if defined(CONFIG_DRM_AMD_DC)
2490 case CHIP_BONAIRE:
2491 case CHIP_KAVERI:
2492 case CHIP_KABINI:
2493 case CHIP_MULLINS:
2494
2495
2496
2497
2498
2499
2500
2501 return amdgpu_dc > 0;
2502 case CHIP_HAWAII:
2503 case CHIP_CARRIZO:
2504 case CHIP_STONEY:
2505 case CHIP_POLARIS10:
2506 case CHIP_POLARIS11:
2507 case CHIP_POLARIS12:
2508 case CHIP_VEGAM:
2509 case CHIP_TONGA:
2510 case CHIP_FIJI:
2511 case CHIP_VEGA10:
2512 case CHIP_VEGA12:
2513 case CHIP_VEGA20:
2514 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2515 case CHIP_RAVEN:
2516 #endif
2517 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2518 case CHIP_NAVI10:
2519 case CHIP_NAVI14:
2520 case CHIP_NAVI12:
2521 #endif
2522 #if defined(CONFIG_DRM_AMD_DC_DCN2_1)
2523 case CHIP_RENOIR:
2524 #endif
2525 return amdgpu_dc != 0;
2526 #endif
2527 default:
2528 return false;
2529 }
2530 }
2531
2532
2533
2534
2535
2536
2537
2538
2539 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
2540 {
2541 if (amdgpu_sriov_vf(adev))
2542 return false;
2543
2544 return amdgpu_device_asic_has_dc_support(adev->asic_type);
2545 }
2546
2547
2548 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
2549 {
2550 struct amdgpu_device *adev =
2551 container_of(__work, struct amdgpu_device, xgmi_reset_work);
2552
2553 adev->asic_reset_res = amdgpu_asic_reset(adev);
2554 if (adev->asic_reset_res)
2555 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
2556 adev->asic_reset_res, adev->ddev->unique);
2557 }
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572 int amdgpu_device_init(struct amdgpu_device *adev,
2573 struct drm_device *ddev,
2574 struct pci_dev *pdev,
2575 uint32_t flags)
2576 {
2577 int r, i;
2578 bool runtime = false;
2579 u32 max_MBps;
2580
2581 adev->shutdown = false;
2582 adev->dev = &pdev->dev;
2583 adev->ddev = ddev;
2584 adev->pdev = pdev;
2585 adev->flags = flags;
2586 adev->asic_type = flags & AMD_ASIC_MASK;
2587 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
2588 if (amdgpu_emu_mode == 1)
2589 adev->usec_timeout *= 2;
2590 adev->gmc.gart_size = 512 * 1024 * 1024;
2591 adev->accel_working = false;
2592 adev->num_rings = 0;
2593 adev->mman.buffer_funcs = NULL;
2594 adev->mman.buffer_funcs_ring = NULL;
2595 adev->vm_manager.vm_pte_funcs = NULL;
2596 adev->vm_manager.vm_pte_num_rqs = 0;
2597 adev->gmc.gmc_funcs = NULL;
2598 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2599 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
2600
2601 adev->smc_rreg = &amdgpu_invalid_rreg;
2602 adev->smc_wreg = &amdgpu_invalid_wreg;
2603 adev->pcie_rreg = &amdgpu_invalid_rreg;
2604 adev->pcie_wreg = &amdgpu_invalid_wreg;
2605 adev->pciep_rreg = &amdgpu_invalid_rreg;
2606 adev->pciep_wreg = &amdgpu_invalid_wreg;
2607 adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
2608 adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
2609 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
2610 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
2611 adev->didt_rreg = &amdgpu_invalid_rreg;
2612 adev->didt_wreg = &amdgpu_invalid_wreg;
2613 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
2614 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
2615 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
2616 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
2617
2618 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
2619 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
2620 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
2621
2622
2623
2624 atomic_set(&adev->irq.ih.lock, 0);
2625 mutex_init(&adev->firmware.mutex);
2626 mutex_init(&adev->pm.mutex);
2627 mutex_init(&adev->gfx.gpu_clock_mutex);
2628 mutex_init(&adev->srbm_mutex);
2629 mutex_init(&adev->gfx.pipe_reserve_mutex);
2630 mutex_init(&adev->gfx.gfx_off_mutex);
2631 mutex_init(&adev->grbm_idx_mutex);
2632 mutex_init(&adev->mn_lock);
2633 mutex_init(&adev->virt.vf_errors.lock);
2634 hash_init(adev->mn_hash);
2635 mutex_init(&adev->lock_reset);
2636 mutex_init(&adev->virt.dpm_mutex);
2637 mutex_init(&adev->psp.mutex);
2638
2639 r = amdgpu_device_check_arguments(adev);
2640 if (r)
2641 return r;
2642
2643 spin_lock_init(&adev->mmio_idx_lock);
2644 spin_lock_init(&adev->smc_idx_lock);
2645 spin_lock_init(&adev->pcie_idx_lock);
2646 spin_lock_init(&adev->uvd_ctx_idx_lock);
2647 spin_lock_init(&adev->didt_idx_lock);
2648 spin_lock_init(&adev->gc_cac_idx_lock);
2649 spin_lock_init(&adev->se_cac_idx_lock);
2650 spin_lock_init(&adev->audio_endpt_idx_lock);
2651 spin_lock_init(&adev->mm_stats.lock);
2652
2653 INIT_LIST_HEAD(&adev->shadow_list);
2654 mutex_init(&adev->shadow_list_lock);
2655
2656 INIT_LIST_HEAD(&adev->ring_lru_list);
2657 spin_lock_init(&adev->ring_lru_list_lock);
2658
2659 INIT_DELAYED_WORK(&adev->delayed_init_work,
2660 amdgpu_device_delayed_init_work_handler);
2661 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
2662 amdgpu_device_delay_enable_gfx_off);
2663
2664 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
2665
2666 adev->gfx.gfx_off_req_count = 1;
2667 adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false;
2668
2669
2670
2671 if (adev->asic_type >= CHIP_BONAIRE) {
2672 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
2673 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
2674 } else {
2675 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
2676 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
2677 }
2678
2679 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
2680 if (adev->rmmio == NULL) {
2681 return -ENOMEM;
2682 }
2683 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
2684 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
2685
2686
2687 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2688 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
2689 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
2690 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
2691 break;
2692 }
2693 }
2694 if (adev->rio_mem == NULL)
2695 DRM_INFO("PCI I/O BAR is not found.\n");
2696
2697
2698 r = pci_enable_atomic_ops_to_root(adev->pdev,
2699 PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
2700 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
2701 if (r) {
2702 adev->have_atomics_support = false;
2703 DRM_INFO("PCIE atomic ops is not supported\n");
2704 } else {
2705 adev->have_atomics_support = true;
2706 }
2707
2708 amdgpu_device_get_pcie_info(adev);
2709
2710 if (amdgpu_mcbp)
2711 DRM_INFO("MCBP is enabled\n");
2712
2713 if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
2714 adev->enable_mes = true;
2715
2716 if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) {
2717 r = amdgpu_discovery_init(adev);
2718 if (r) {
2719 dev_err(adev->dev, "amdgpu_discovery_init failed\n");
2720 return r;
2721 }
2722 }
2723
2724
2725 r = amdgpu_device_ip_early_init(adev);
2726 if (r)
2727 return r;
2728
2729
2730 amdgpu_device_doorbell_init(adev);
2731
2732
2733
2734
2735 vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
2736
2737 if (amdgpu_device_is_px(ddev))
2738 runtime = true;
2739 if (!pci_is_thunderbolt_attached(adev->pdev))
2740 vga_switcheroo_register_client(adev->pdev,
2741 &amdgpu_switcheroo_ops, runtime);
2742 if (runtime)
2743 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
2744
2745 if (amdgpu_emu_mode == 1) {
2746
2747 emu_soc_asic_init(adev);
2748 goto fence_driver_init;
2749 }
2750
2751
2752 amdgpu_device_detect_sriov_bios(adev);
2753
2754
2755
2756
2757 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
2758 r = amdgpu_asic_reset(adev);
2759 if (r) {
2760 dev_err(adev->dev, "asic reset on init failed\n");
2761 goto failed;
2762 }
2763 }
2764
2765
2766 if (amdgpu_device_need_post(adev)) {
2767 if (!adev->bios) {
2768 dev_err(adev->dev, "no vBIOS found\n");
2769 r = -EINVAL;
2770 goto failed;
2771 }
2772 DRM_INFO("GPU posting now...\n");
2773 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2774 if (r) {
2775 dev_err(adev->dev, "gpu post error!\n");
2776 goto failed;
2777 }
2778 }
2779
2780 if (adev->is_atom_fw) {
2781
2782 r = amdgpu_atomfirmware_get_clock_info(adev);
2783 if (r) {
2784 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
2785 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
2786 goto failed;
2787 }
2788 } else {
2789
2790 r = amdgpu_atombios_get_clock_info(adev);
2791 if (r) {
2792 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
2793 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
2794 goto failed;
2795 }
2796
2797 if (!amdgpu_device_has_dc_support(adev))
2798 amdgpu_atombios_i2c_init(adev);
2799 }
2800
2801 fence_driver_init:
2802
2803 r = amdgpu_fence_driver_init(adev);
2804 if (r) {
2805 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
2806 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
2807 goto failed;
2808 }
2809
2810
2811 drm_mode_config_init(adev->ddev);
2812
2813 r = amdgpu_device_ip_init(adev);
2814 if (r) {
2815
2816 if (amdgpu_sriov_vf(adev) &&
2817 !amdgpu_sriov_runtime(adev) &&
2818 amdgpu_virt_mmio_blocked(adev) &&
2819 !amdgpu_virt_wait_reset(adev)) {
2820 dev_err(adev->dev, "VF exclusive mode timeout\n");
2821
2822 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
2823 adev->virt.ops = NULL;
2824 r = -EAGAIN;
2825 goto failed;
2826 }
2827 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
2828 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
2829 if (amdgpu_virt_request_full_gpu(adev, false))
2830 amdgpu_virt_release_full_gpu(adev, false);
2831 goto failed;
2832 }
2833
2834 adev->accel_working = true;
2835
2836 amdgpu_vm_check_compute_bug(adev);
2837
2838
2839 if (amdgpu_moverate >= 0)
2840 max_MBps = amdgpu_moverate;
2841 else
2842 max_MBps = 8;
2843
2844 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2845
2846 amdgpu_fbdev_init(adev);
2847
2848 if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev))
2849 amdgpu_pm_virt_sysfs_init(adev);
2850
2851 r = amdgpu_pm_sysfs_init(adev);
2852 if (r)
2853 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
2854
2855 r = amdgpu_ucode_sysfs_init(adev);
2856 if (r)
2857 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
2858
2859 r = amdgpu_debugfs_gem_init(adev);
2860 if (r)
2861 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
2862
2863 r = amdgpu_debugfs_regs_init(adev);
2864 if (r)
2865 DRM_ERROR("registering register debugfs failed (%d).\n", r);
2866
2867 r = amdgpu_debugfs_firmware_init(adev);
2868 if (r)
2869 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
2870
2871 r = amdgpu_debugfs_init(adev);
2872 if (r)
2873 DRM_ERROR("Creating debugfs files failed (%d).\n", r);
2874
2875 if ((amdgpu_testing & 1)) {
2876 if (adev->accel_working)
2877 amdgpu_test_moves(adev);
2878 else
2879 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2880 }
2881 if (amdgpu_benchmarking) {
2882 if (adev->accel_working)
2883 amdgpu_benchmark(adev, amdgpu_benchmarking);
2884 else
2885 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2886 }
2887
2888
2889
2890
2891
2892
2893 amdgpu_register_gpu_instance(adev);
2894
2895
2896
2897
2898 r = amdgpu_device_ip_late_init(adev);
2899 if (r) {
2900 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
2901 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
2902 goto failed;
2903 }
2904
2905
2906 amdgpu_ras_resume(adev);
2907
2908 queue_delayed_work(system_wq, &adev->delayed_init_work,
2909 msecs_to_jiffies(AMDGPU_RESUME_MS));
2910
2911 r = device_create_file(adev->dev, &dev_attr_pcie_replay_count);
2912 if (r) {
2913 dev_err(adev->dev, "Could not create pcie_replay_count");
2914 return r;
2915 }
2916
2917 if (IS_ENABLED(CONFIG_PERF_EVENTS))
2918 r = amdgpu_pmu_init(adev);
2919 if (r)
2920 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
2921
2922 return 0;
2923
2924 failed:
2925 amdgpu_vf_error_trans_all(adev);
2926 if (runtime)
2927 vga_switcheroo_fini_domain_pm_ops(adev->dev);
2928
2929 return r;
2930 }
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940 void amdgpu_device_fini(struct amdgpu_device *adev)
2941 {
2942 int r;
2943
2944 DRM_INFO("amdgpu: finishing device.\n");
2945 adev->shutdown = true;
2946
2947 amdgpu_irq_disable_all(adev);
2948 if (adev->mode_info.mode_config_initialized){
2949 if (!amdgpu_device_has_dc_support(adev))
2950 drm_helper_force_disable_all(adev->ddev);
2951 else
2952 drm_atomic_helper_shutdown(adev->ddev);
2953 }
2954 amdgpu_fence_driver_fini(adev);
2955 amdgpu_pm_sysfs_fini(adev);
2956 amdgpu_fbdev_fini(adev);
2957 r = amdgpu_device_ip_fini(adev);
2958 if (adev->firmware.gpu_info_fw) {
2959 release_firmware(adev->firmware.gpu_info_fw);
2960 adev->firmware.gpu_info_fw = NULL;
2961 }
2962 adev->accel_working = false;
2963 cancel_delayed_work_sync(&adev->delayed_init_work);
2964
2965 if (!amdgpu_device_has_dc_support(adev))
2966 amdgpu_i2c_fini(adev);
2967
2968 if (amdgpu_emu_mode != 1)
2969 amdgpu_atombios_fini(adev);
2970
2971 kfree(adev->bios);
2972 adev->bios = NULL;
2973 if (!pci_is_thunderbolt_attached(adev->pdev))
2974 vga_switcheroo_unregister_client(adev->pdev);
2975 if (adev->flags & AMD_IS_PX)
2976 vga_switcheroo_fini_domain_pm_ops(adev->dev);
2977 vga_client_register(adev->pdev, NULL, NULL, NULL);
2978 if (adev->rio_mem)
2979 pci_iounmap(adev->pdev, adev->rio_mem);
2980 adev->rio_mem = NULL;
2981 iounmap(adev->rmmio);
2982 adev->rmmio = NULL;
2983 amdgpu_device_doorbell_fini(adev);
2984 if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev))
2985 amdgpu_pm_virt_sysfs_fini(adev);
2986
2987 amdgpu_debugfs_regs_cleanup(adev);
2988 device_remove_file(adev->dev, &dev_attr_pcie_replay_count);
2989 amdgpu_ucode_sysfs_fini(adev);
2990 if (IS_ENABLED(CONFIG_PERF_EVENTS))
2991 amdgpu_pmu_fini(adev);
2992 amdgpu_debugfs_preempt_cleanup(adev);
2993 if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
2994 amdgpu_discovery_fini(adev);
2995 }
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012 int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
3013 {
3014 struct amdgpu_device *adev;
3015 struct drm_crtc *crtc;
3016 struct drm_connector *connector;
3017 int r;
3018
3019 if (dev == NULL || dev->dev_private == NULL) {
3020 return -ENODEV;
3021 }
3022
3023 adev = dev->dev_private;
3024
3025 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3026 return 0;
3027
3028 adev->in_suspend = true;
3029 drm_kms_helper_poll_disable(dev);
3030
3031 if (fbcon)
3032 amdgpu_fbdev_set_suspend(adev, 1);
3033
3034 cancel_delayed_work_sync(&adev->delayed_init_work);
3035
3036 if (!amdgpu_device_has_dc_support(adev)) {
3037
3038 drm_modeset_lock_all(dev);
3039 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
3040 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
3041 }
3042 drm_modeset_unlock_all(dev);
3043
3044 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3045 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3046 struct drm_framebuffer *fb = crtc->primary->fb;
3047 struct amdgpu_bo *robj;
3048
3049 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
3050 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
3051 r = amdgpu_bo_reserve(aobj, true);
3052 if (r == 0) {
3053 amdgpu_bo_unpin(aobj);
3054 amdgpu_bo_unreserve(aobj);
3055 }
3056 }
3057
3058 if (fb == NULL || fb->obj[0] == NULL) {
3059 continue;
3060 }
3061 robj = gem_to_amdgpu_bo(fb->obj[0]);
3062
3063 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
3064 r = amdgpu_bo_reserve(robj, true);
3065 if (r == 0) {
3066 amdgpu_bo_unpin(robj);
3067 amdgpu_bo_unreserve(robj);
3068 }
3069 }
3070 }
3071 }
3072
3073 amdgpu_ras_suspend(adev);
3074
3075 r = amdgpu_device_ip_suspend_phase1(adev);
3076
3077 amdgpu_amdkfd_suspend(adev);
3078
3079
3080 amdgpu_bo_evict_vram(adev);
3081
3082 amdgpu_fence_driver_suspend(adev);
3083
3084 r = amdgpu_device_ip_suspend_phase2(adev);
3085
3086
3087
3088
3089
3090 amdgpu_bo_evict_vram(adev);
3091
3092 pci_save_state(dev->pdev);
3093 if (suspend) {
3094
3095 pci_disable_device(dev->pdev);
3096 pci_set_power_state(dev->pdev, PCI_D3hot);
3097 } else {
3098 r = amdgpu_asic_reset(adev);
3099 if (r)
3100 DRM_ERROR("amdgpu asic reset failed\n");
3101 }
3102
3103 return 0;
3104 }
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117 int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
3118 {
3119 struct drm_connector *connector;
3120 struct amdgpu_device *adev = dev->dev_private;
3121 struct drm_crtc *crtc;
3122 int r = 0;
3123
3124 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3125 return 0;
3126
3127 if (resume) {
3128 pci_set_power_state(dev->pdev, PCI_D0);
3129 pci_restore_state(dev->pdev);
3130 r = pci_enable_device(dev->pdev);
3131 if (r)
3132 return r;
3133 }
3134
3135
3136 if (amdgpu_device_need_post(adev)) {
3137 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
3138 if (r)
3139 DRM_ERROR("amdgpu asic init failed\n");
3140 }
3141
3142 r = amdgpu_device_ip_resume(adev);
3143 if (r) {
3144 DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
3145 return r;
3146 }
3147 amdgpu_fence_driver_resume(adev);
3148
3149
3150 r = amdgpu_device_ip_late_init(adev);
3151 if (r)
3152 return r;
3153
3154 queue_delayed_work(system_wq, &adev->delayed_init_work,
3155 msecs_to_jiffies(AMDGPU_RESUME_MS));
3156
3157 if (!amdgpu_device_has_dc_support(adev)) {
3158
3159 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3160 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3161
3162 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
3163 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
3164 r = amdgpu_bo_reserve(aobj, true);
3165 if (r == 0) {
3166 r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
3167 if (r != 0)
3168 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
3169 amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
3170 amdgpu_bo_unreserve(aobj);
3171 }
3172 }
3173 }
3174 }
3175 r = amdgpu_amdkfd_resume(adev);
3176 if (r)
3177 return r;
3178
3179
3180 flush_delayed_work(&adev->delayed_init_work);
3181
3182
3183 if (fbcon) {
3184 if (!amdgpu_device_has_dc_support(adev)) {
3185
3186 drm_helper_resume_force_mode(dev);
3187
3188
3189 drm_modeset_lock_all(dev);
3190 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
3191 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
3192 }
3193 drm_modeset_unlock_all(dev);
3194 }
3195 amdgpu_fbdev_set_suspend(adev, 0);
3196 }
3197
3198 drm_kms_helper_poll_enable(dev);
3199
3200 amdgpu_ras_resume(adev);
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211 #ifdef CONFIG_PM
3212 dev->dev->power.disable_depth++;
3213 #endif
3214 if (!amdgpu_device_has_dc_support(adev))
3215 drm_helper_hpd_irq_event(dev);
3216 else
3217 drm_kms_helper_hotplug_event(dev);
3218 #ifdef CONFIG_PM
3219 dev->dev->power.disable_depth--;
3220 #endif
3221 adev->in_suspend = false;
3222
3223 return 0;
3224 }
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
3237 {
3238 int i;
3239 bool asic_hang = false;
3240
3241 if (amdgpu_sriov_vf(adev))
3242 return true;
3243
3244 if (amdgpu_asic_need_full_reset(adev))
3245 return true;
3246
3247 for (i = 0; i < adev->num_ip_blocks; i++) {
3248 if (!adev->ip_blocks[i].status.valid)
3249 continue;
3250 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
3251 adev->ip_blocks[i].status.hang =
3252 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
3253 if (adev->ip_blocks[i].status.hang) {
3254 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
3255 asic_hang = true;
3256 }
3257 }
3258 return asic_hang;
3259 }
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
3273 {
3274 int i, r = 0;
3275
3276 for (i = 0; i < adev->num_ip_blocks; i++) {
3277 if (!adev->ip_blocks[i].status.valid)
3278 continue;
3279 if (adev->ip_blocks[i].status.hang &&
3280 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
3281 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
3282 if (r)
3283 return r;
3284 }
3285 }
3286
3287 return 0;
3288 }
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
3300 {
3301 int i;
3302
3303 if (amdgpu_asic_need_full_reset(adev))
3304 return true;
3305
3306 for (i = 0; i < adev->num_ip_blocks; i++) {
3307 if (!adev->ip_blocks[i].status.valid)
3308 continue;
3309 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
3310 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
3311 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
3312 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
3313 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3314 if (adev->ip_blocks[i].status.hang) {
3315 DRM_INFO("Some block need full reset!\n");
3316 return true;
3317 }
3318 }
3319 }
3320 return false;
3321 }
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
3335 {
3336 int i, r = 0;
3337
3338 for (i = 0; i < adev->num_ip_blocks; i++) {
3339 if (!adev->ip_blocks[i].status.valid)
3340 continue;
3341 if (adev->ip_blocks[i].status.hang &&
3342 adev->ip_blocks[i].version->funcs->soft_reset) {
3343 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
3344 if (r)
3345 return r;
3346 }
3347 }
3348
3349 return 0;
3350 }
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
3364 {
3365 int i, r = 0;
3366
3367 for (i = 0; i < adev->num_ip_blocks; i++) {
3368 if (!adev->ip_blocks[i].status.valid)
3369 continue;
3370 if (adev->ip_blocks[i].status.hang &&
3371 adev->ip_blocks[i].version->funcs->post_soft_reset)
3372 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
3373 if (r)
3374 return r;
3375 }
3376
3377 return 0;
3378 }
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
3393 {
3394 struct dma_fence *fence = NULL, *next = NULL;
3395 struct amdgpu_bo *shadow;
3396 long r = 1, tmo;
3397
3398 if (amdgpu_sriov_runtime(adev))
3399 tmo = msecs_to_jiffies(8000);
3400 else
3401 tmo = msecs_to_jiffies(100);
3402
3403 DRM_INFO("recover vram bo from shadow start\n");
3404 mutex_lock(&adev->shadow_list_lock);
3405 list_for_each_entry(shadow, &adev->shadow_list, shadow_list) {
3406
3407
3408 if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
3409 shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET ||
3410 shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
3411 continue;
3412
3413 r = amdgpu_bo_restore_shadow(shadow, &next);
3414 if (r)
3415 break;
3416
3417 if (fence) {
3418 tmo = dma_fence_wait_timeout(fence, false, tmo);
3419 dma_fence_put(fence);
3420 fence = next;
3421 if (tmo == 0) {
3422 r = -ETIMEDOUT;
3423 break;
3424 } else if (tmo < 0) {
3425 r = tmo;
3426 break;
3427 }
3428 } else {
3429 fence = next;
3430 }
3431 }
3432 mutex_unlock(&adev->shadow_list_lock);
3433
3434 if (fence)
3435 tmo = dma_fence_wait_timeout(fence, false, tmo);
3436 dma_fence_put(fence);
3437
3438 if (r < 0 || tmo <= 0) {
3439 DRM_ERROR("recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
3440 return -EIO;
3441 }
3442
3443 DRM_INFO("recover vram bo from shadow done\n");
3444 return 0;
3445 }
3446
3447
3448
3449
3450
3451
3452
3453
3454
3455
3456
3457 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
3458 bool from_hypervisor)
3459 {
3460 int r;
3461
3462 if (from_hypervisor)
3463 r = amdgpu_virt_request_full_gpu(adev, true);
3464 else
3465 r = amdgpu_virt_reset_gpu(adev);
3466 if (r)
3467 return r;
3468
3469
3470 r = amdgpu_device_ip_reinit_early_sriov(adev);
3471 if (r)
3472 goto error;
3473
3474
3475 amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
3476
3477 r = amdgpu_device_fw_loading(adev);
3478 if (r)
3479 return r;
3480
3481
3482 r = amdgpu_device_ip_reinit_late_sriov(adev);
3483 if (r)
3484 goto error;
3485
3486 amdgpu_irq_gpu_reset_resume_helper(adev);
3487 r = amdgpu_ib_ring_tests(adev);
3488 amdgpu_amdkfd_post_reset(adev);
3489
3490 error:
3491 amdgpu_virt_init_data_exchange(adev);
3492 amdgpu_virt_release_full_gpu(adev, true);
3493 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
3494 amdgpu_inc_vram_lost(adev);
3495 r = amdgpu_device_recover_vram(adev);
3496 }
3497
3498 return r;
3499 }
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
3510 {
3511 if (!amdgpu_device_ip_check_soft_reset(adev)) {
3512 DRM_INFO("Timeout, but no hardware hang detected.\n");
3513 return false;
3514 }
3515
3516 if (amdgpu_gpu_recovery == 0)
3517 goto disabled;
3518
3519 if (amdgpu_sriov_vf(adev))
3520 return true;
3521
3522 if (amdgpu_gpu_recovery == -1) {
3523 switch (adev->asic_type) {
3524 case CHIP_BONAIRE:
3525 case CHIP_HAWAII:
3526 case CHIP_TOPAZ:
3527 case CHIP_TONGA:
3528 case CHIP_FIJI:
3529 case CHIP_POLARIS10:
3530 case CHIP_POLARIS11:
3531 case CHIP_POLARIS12:
3532 case CHIP_VEGAM:
3533 case CHIP_VEGA20:
3534 case CHIP_VEGA10:
3535 case CHIP_VEGA12:
3536 case CHIP_RAVEN:
3537 break;
3538 default:
3539 goto disabled;
3540 }
3541 }
3542
3543 return true;
3544
3545 disabled:
3546 DRM_INFO("GPU recovery disabled.\n");
3547 return false;
3548 }
3549
3550
3551 static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
3552 struct amdgpu_job *job,
3553 bool *need_full_reset_arg)
3554 {
3555 int i, r = 0;
3556 bool need_full_reset = *need_full_reset_arg;
3557
3558
3559 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3560 struct amdgpu_ring *ring = adev->rings[i];
3561
3562 if (!ring || !ring->sched.thread)
3563 continue;
3564
3565
3566 amdgpu_fence_driver_force_completion(ring);
3567 }
3568
3569 if(job)
3570 drm_sched_increase_karma(&job->base);
3571
3572
3573 if (!amdgpu_sriov_vf(adev)) {
3574
3575 if (!need_full_reset)
3576 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
3577
3578 if (!need_full_reset) {
3579 amdgpu_device_ip_pre_soft_reset(adev);
3580 r = amdgpu_device_ip_soft_reset(adev);
3581 amdgpu_device_ip_post_soft_reset(adev);
3582 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
3583 DRM_INFO("soft reset failed, will fallback to full reset!\n");
3584 need_full_reset = true;
3585 }
3586 }
3587
3588 if (need_full_reset)
3589 r = amdgpu_device_ip_suspend(adev);
3590
3591 *need_full_reset_arg = need_full_reset;
3592 }
3593
3594 return r;
3595 }
3596
3597 static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
3598 struct list_head *device_list_handle,
3599 bool *need_full_reset_arg)
3600 {
3601 struct amdgpu_device *tmp_adev = NULL;
3602 bool need_full_reset = *need_full_reset_arg, vram_lost = false;
3603 int r = 0;
3604
3605
3606
3607
3608
3609 if (need_full_reset) {
3610 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
3611
3612 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
3613 if (!queue_work(system_highpri_wq, &tmp_adev->xgmi_reset_work))
3614 r = -EALREADY;
3615 } else
3616 r = amdgpu_asic_reset(tmp_adev);
3617
3618 if (r) {
3619 DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s",
3620 r, tmp_adev->ddev->unique);
3621 break;
3622 }
3623 }
3624
3625
3626 if (!r) {
3627 list_for_each_entry(tmp_adev, device_list_handle,
3628 gmc.xgmi.head) {
3629 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
3630 flush_work(&tmp_adev->xgmi_reset_work);
3631 r = tmp_adev->asic_reset_res;
3632 if (r)
3633 break;
3634 }
3635 }
3636
3637 list_for_each_entry(tmp_adev, device_list_handle,
3638 gmc.xgmi.head) {
3639 amdgpu_ras_reserve_bad_pages(tmp_adev);
3640 }
3641 }
3642 }
3643
3644
3645 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
3646 if (need_full_reset) {
3647
3648 if (amdgpu_atom_asic_init(tmp_adev->mode_info.atom_context))
3649 DRM_WARN("asic atom init failed!");
3650
3651 if (!r) {
3652 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
3653 r = amdgpu_device_ip_resume_phase1(tmp_adev);
3654 if (r)
3655 goto out;
3656
3657 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
3658 if (vram_lost) {
3659 DRM_INFO("VRAM is lost due to GPU reset!\n");
3660 amdgpu_inc_vram_lost(tmp_adev);
3661 }
3662
3663 r = amdgpu_gtt_mgr_recover(
3664 &tmp_adev->mman.bdev.man[TTM_PL_TT]);
3665 if (r)
3666 goto out;
3667
3668 r = amdgpu_device_fw_loading(tmp_adev);
3669 if (r)
3670 return r;
3671
3672 r = amdgpu_device_ip_resume_phase2(tmp_adev);
3673 if (r)
3674 goto out;
3675
3676 if (vram_lost)
3677 amdgpu_device_fill_reset_magic(tmp_adev);
3678
3679
3680
3681
3682
3683 amdgpu_register_gpu_instance(tmp_adev);
3684
3685 r = amdgpu_device_ip_late_init(tmp_adev);
3686 if (r)
3687 goto out;
3688
3689
3690 amdgpu_ras_resume(tmp_adev);
3691
3692
3693 if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
3694 r = amdgpu_xgmi_update_topology(hive, tmp_adev);
3695 }
3696 }
3697
3698
3699 out:
3700 if (!r) {
3701 amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
3702 r = amdgpu_ib_ring_tests(tmp_adev);
3703 if (r) {
3704 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
3705 r = amdgpu_device_ip_suspend(tmp_adev);
3706 need_full_reset = true;
3707 r = -EAGAIN;
3708 goto end;
3709 }
3710 }
3711
3712 if (!r)
3713 r = amdgpu_device_recover_vram(tmp_adev);
3714 else
3715 tmp_adev->asic_reset_res = r;
3716 }
3717
3718 end:
3719 *need_full_reset_arg = need_full_reset;
3720 return r;
3721 }
3722
3723 static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, bool trylock)
3724 {
3725 if (trylock) {
3726 if (!mutex_trylock(&adev->lock_reset))
3727 return false;
3728 } else
3729 mutex_lock(&adev->lock_reset);
3730
3731 atomic_inc(&adev->gpu_reset_counter);
3732 adev->in_gpu_reset = 1;
3733 switch (amdgpu_asic_reset_method(adev)) {
3734 case AMD_RESET_METHOD_MODE1:
3735 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
3736 break;
3737 case AMD_RESET_METHOD_MODE2:
3738 adev->mp1_state = PP_MP1_STATE_RESET;
3739 break;
3740 default:
3741 adev->mp1_state = PP_MP1_STATE_NONE;
3742 break;
3743 }
3744
3745 if (!amdgpu_sriov_vf(adev))
3746 amdgpu_amdkfd_pre_reset(adev);
3747
3748 return true;
3749 }
3750
3751 static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
3752 {
3753
3754 if (!amdgpu_sriov_vf(adev))
3755 amdgpu_amdkfd_post_reset(adev);
3756 amdgpu_vf_error_trans_all(adev);
3757 adev->mp1_state = PP_MP1_STATE_NONE;
3758 adev->in_gpu_reset = 0;
3759 mutex_unlock(&adev->lock_reset);
3760 }
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
3775 struct amdgpu_job *job)
3776 {
3777 struct list_head device_list, *device_list_handle = NULL;
3778 bool need_full_reset, job_signaled;
3779 struct amdgpu_hive_info *hive = NULL;
3780 struct amdgpu_device *tmp_adev = NULL;
3781 int i, r = 0;
3782
3783 need_full_reset = job_signaled = false;
3784 INIT_LIST_HEAD(&device_list);
3785
3786 dev_info(adev->dev, "GPU reset begin!\n");
3787
3788 cancel_delayed_work_sync(&adev->delayed_init_work);
3789
3790 hive = amdgpu_get_xgmi_hive(adev, false);
3791
3792
3793
3794
3795
3796
3797
3798
3799
3800 if (hive && !mutex_trylock(&hive->reset_lock)) {
3801 DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
3802 job ? job->base.id : -1, hive->hive_id);
3803 return 0;
3804 }
3805
3806
3807 if (!amdgpu_device_lock_adev(adev, !hive)) {
3808 DRM_INFO("Bailing on TDR for s_job:%llx, as another already in progress",
3809 job ? job->base.id : -1);
3810 return 0;
3811 }
3812
3813
3814 if (adev->gmc.xgmi.num_physical_nodes > 1) {
3815 if (!hive) {
3816 amdgpu_device_unlock_adev(adev);
3817 return -ENODEV;
3818 }
3819
3820
3821
3822
3823
3824
3825 device_list_handle = &hive->device_list;
3826 } else {
3827 list_add_tail(&adev->gmc.xgmi.head, &device_list);
3828 device_list_handle = &device_list;
3829 }
3830
3831
3832
3833
3834
3835 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head)
3836 amdgpu_unregister_gpu_instance(tmp_adev);
3837
3838
3839 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
3840
3841 if (amdgpu_device_ip_need_full_reset(tmp_adev))
3842 amdgpu_ras_suspend(tmp_adev);
3843
3844 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3845 struct amdgpu_ring *ring = tmp_adev->rings[i];
3846
3847 if (!ring || !ring->sched.thread)
3848 continue;
3849
3850 drm_sched_stop(&ring->sched, job ? &job->base : NULL);
3851 }
3852 }
3853
3854
3855
3856
3857
3858
3859
3860
3861 if (job && job->base.s_fence->parent &&
3862 dma_fence_is_signaled(job->base.s_fence->parent))
3863 job_signaled = true;
3864
3865 if (!amdgpu_device_ip_need_full_reset(adev))
3866 device_list_handle = &device_list;
3867
3868 if (job_signaled) {
3869 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
3870 goto skip_hw_reset;
3871 }
3872
3873
3874
3875 r = amdgpu_device_pre_asic_reset(adev, job, &need_full_reset);
3876 if (r) {
3877
3878 DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
3879 r, adev->ddev->unique);
3880 adev->asic_reset_res = r;
3881 }
3882
3883 retry:
3884 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
3885
3886 if (tmp_adev == adev)
3887 continue;
3888
3889 amdgpu_device_lock_adev(tmp_adev, false);
3890 r = amdgpu_device_pre_asic_reset(tmp_adev,
3891 NULL,
3892 &need_full_reset);
3893
3894 if (r) {
3895 DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
3896 r, tmp_adev->ddev->unique);
3897 tmp_adev->asic_reset_res = r;
3898 }
3899 }
3900
3901
3902
3903 if (amdgpu_sriov_vf(adev)) {
3904 r = amdgpu_device_reset_sriov(adev, job ? false : true);
3905 if (r)
3906 adev->asic_reset_res = r;
3907 } else {
3908 r = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset);
3909 if (r && r == -EAGAIN)
3910 goto retry;
3911 }
3912
3913 skip_hw_reset:
3914
3915
3916 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
3917 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3918 struct amdgpu_ring *ring = tmp_adev->rings[i];
3919
3920 if (!ring || !ring->sched.thread)
3921 continue;
3922
3923
3924 if (!tmp_adev->asic_reset_res && !job_signaled)
3925 drm_sched_resubmit_jobs(&ring->sched);
3926
3927 drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
3928 }
3929
3930 if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
3931 drm_helper_resume_force_mode(tmp_adev->ddev);
3932 }
3933
3934 tmp_adev->asic_reset_res = 0;
3935
3936 if (r) {
3937
3938 dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
3939 amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
3940 } else {
3941 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&adev->gpu_reset_counter));
3942 }
3943
3944 amdgpu_device_unlock_adev(tmp_adev);
3945 }
3946
3947 if (hive)
3948 mutex_unlock(&hive->reset_lock);
3949
3950 if (r)
3951 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
3952 return r;
3953 }
3954
3955
3956
3957
3958
3959
3960
3961
3962
3963
3964 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
3965 {
3966 struct pci_dev *pdev;
3967 enum pci_bus_speed speed_cap, platform_speed_cap;
3968 enum pcie_link_width platform_link_width;
3969
3970 if (amdgpu_pcie_gen_cap)
3971 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
3972
3973 if (amdgpu_pcie_lane_cap)
3974 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
3975
3976
3977 if (pci_is_root_bus(adev->pdev->bus)) {
3978 if (adev->pm.pcie_gen_mask == 0)
3979 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3980 if (adev->pm.pcie_mlw_mask == 0)
3981 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
3982 return;
3983 }
3984
3985 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
3986 return;
3987
3988 pcie_bandwidth_available(adev->pdev, NULL,
3989 &platform_speed_cap, &platform_link_width);
3990
3991 if (adev->pm.pcie_gen_mask == 0) {
3992
3993 pdev = adev->pdev;
3994 speed_cap = pcie_get_speed_cap(pdev);
3995 if (speed_cap == PCI_SPEED_UNKNOWN) {
3996 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3997 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3998 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
3999 } else {
4000 if (speed_cap == PCIE_SPEED_16_0GT)
4001 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4002 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4003 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4004 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
4005 else if (speed_cap == PCIE_SPEED_8_0GT)
4006 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4007 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4008 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
4009 else if (speed_cap == PCIE_SPEED_5_0GT)
4010 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4011 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
4012 else
4013 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
4014 }
4015
4016 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
4017 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4018 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
4019 } else {
4020 if (platform_speed_cap == PCIE_SPEED_16_0GT)
4021 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4022 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4023 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4024 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
4025 else if (platform_speed_cap == PCIE_SPEED_8_0GT)
4026 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4027 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4028 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
4029 else if (platform_speed_cap == PCIE_SPEED_5_0GT)
4030 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4031 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
4032 else
4033 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
4034
4035 }
4036 }
4037 if (adev->pm.pcie_mlw_mask == 0) {
4038 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
4039 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
4040 } else {
4041 switch (platform_link_width) {
4042 case PCIE_LNK_X32:
4043 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
4044 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
4045 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4046 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4047 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4048 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4049 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4050 break;
4051 case PCIE_LNK_X16:
4052 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
4053 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4054 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4055 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4056 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4057 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4058 break;
4059 case PCIE_LNK_X12:
4060 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4061 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4062 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4063 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4064 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4065 break;
4066 case PCIE_LNK_X8:
4067 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4068 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4069 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4070 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4071 break;
4072 case PCIE_LNK_X4:
4073 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4074 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4075 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4076 break;
4077 case PCIE_LNK_X2:
4078 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4079 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4080 break;
4081 case PCIE_LNK_X1:
4082 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
4083 break;
4084 default:
4085 break;
4086 }
4087 }
4088 }
4089 }
4090