1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21 #ifndef __AMDGPU_UMC_H__
22 #define __AMDGPU_UMC_H__
23
24
25 #define RREG64_UMC(reg) (RREG32(reg) | \
26 ((uint64_t)RREG32((reg) + 1) << 32))
27 #define WREG64_UMC(reg, v) \
28 do { \
29 WREG32((reg), lower_32_bits(v)); \
30 WREG32((reg) + 1, upper_32_bits(v)); \
31 } while (0)
32
33
34
35
36
37 #define amdgpu_umc_for_each_channel(func) \
38 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; \
39 uint32_t umc_inst, channel_inst, umc_reg_offset, channel_index; \
40 for (umc_inst = 0; umc_inst < adev->umc.umc_inst_num; umc_inst++) { \
41 \
42 adev->umc.funcs->enable_umc_index_mode(adev, umc_inst); \
43 for (channel_inst = 0; \
44 channel_inst < adev->umc.channel_inst_num; \
45 channel_inst++) { \
46 \
47 umc_reg_offset = adev->umc.channel_offs * channel_inst; \
48 \
49 channel_index = adev->umc.channel_idx_tbl[ \
50 umc_inst * adev->umc.channel_inst_num + channel_inst]; \
51 (func)(adev, err_data, umc_reg_offset, channel_index); \
52 } \
53 } \
54 adev->umc.funcs->disable_umc_index_mode(adev);
55
56 struct amdgpu_umc_funcs {
57 void (*ras_init)(struct amdgpu_device *adev);
58 void (*query_ras_error_count)(struct amdgpu_device *adev,
59 void *ras_error_status);
60 void (*query_ras_error_address)(struct amdgpu_device *adev,
61 void *ras_error_status);
62 void (*enable_umc_index_mode)(struct amdgpu_device *adev,
63 uint32_t umc_instance);
64 void (*disable_umc_index_mode)(struct amdgpu_device *adev);
65 };
66
67 struct amdgpu_umc {
68
69 uint32_t max_ras_err_cnt_per_query;
70
71 uint32_t channel_inst_num;
72
73 uint32_t umc_inst_num;
74
75 uint32_t channel_offs;
76
77 const uint32_t *channel_idx_tbl;
78
79 const struct amdgpu_umc_funcs *funcs;
80 };
81
82 #endif