This source file includes following definitions.
- nbio_v7_4_remap_hdp_registers
- nbio_v7_4_get_rev_id
- nbio_v7_4_mc_access_enable
- nbio_v7_4_hdp_flush
- nbio_v7_4_get_memsize
- nbio_v7_4_sdma_doorbell_range
- nbio_v7_4_vcn_doorbell_range
- nbio_v7_4_enable_doorbell_aperture
- nbio_v7_4_enable_doorbell_selfring_aperture
- nbio_v7_4_ih_doorbell_range
- nbio_v7_4_update_medium_grain_clock_gating
- nbio_v7_4_update_medium_grain_light_sleep
- nbio_v7_4_get_clockgating_state
- nbio_v7_4_ih_control
- nbio_v7_4_get_hdp_flush_req_offset
- nbio_v7_4_get_hdp_flush_done_offset
- nbio_v7_4_get_pcie_index_offset
- nbio_v7_4_get_pcie_data_offset
- nbio_v7_4_detect_hw_virt
- nbio_v7_4_init_registers
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23 #include "amdgpu.h"
24 #include "amdgpu_atombios.h"
25 #include "nbio_v7_4.h"
26
27 #include "nbio/nbio_7_4_offset.h"
28 #include "nbio/nbio_7_4_sh_mask.h"
29 #include "nbio/nbio_7_4_0_smn.h"
30 #include <uapi/linux/kfd_ioctl.h>
31
32 #define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c
33
34
35
36
37
38 #define GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
39 #define GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
40 #define GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
41 #define GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
42 #define GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
43 #define GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
44
45 #define mmBIF_MMSCH1_DOORBELL_RANGE 0x01dc
46 #define mmBIF_MMSCH1_DOORBELL_RANGE_BASE_IDX 2
47
48 #define BIF_MMSCH1_DOORBELL_RANGE__OFFSET__SHIFT 0x2
49 #define BIF_MMSCH1_DOORBELL_RANGE__SIZE__SHIFT 0x10
50 #define BIF_MMSCH1_DOORBELL_RANGE__OFFSET_MASK 0x00000FFCL
51 #define BIF_MMSCH1_DOORBELL_RANGE__SIZE_MASK 0x001F0000L
52
53 static void nbio_v7_4_remap_hdp_registers(struct amdgpu_device *adev)
54 {
55 WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL,
56 adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
57 WREG32_SOC15(NBIO, 0, mmREMAP_HDP_REG_FLUSH_CNTL,
58 adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
59 }
60
61 static u32 nbio_v7_4_get_rev_id(struct amdgpu_device *adev)
62 {
63 u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
64
65 tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
66 tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
67
68 return tmp;
69 }
70
71 static void nbio_v7_4_mc_access_enable(struct amdgpu_device *adev, bool enable)
72 {
73 if (enable)
74 WREG32_SOC15(NBIO, 0, mmBIF_FB_EN,
75 BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
76 else
77 WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
78 }
79
80 static void nbio_v7_4_hdp_flush(struct amdgpu_device *adev,
81 struct amdgpu_ring *ring)
82 {
83 if (!ring || !ring->funcs->emit_wreg)
84 WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
85 else
86 amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
87 }
88
89 static u32 nbio_v7_4_get_memsize(struct amdgpu_device *adev)
90 {
91 return RREG32_SOC15(NBIO, 0, mmRCC_CONFIG_MEMSIZE);
92 }
93
94 static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
95 bool use_doorbell, int doorbell_index, int doorbell_size)
96 {
97 u32 reg, doorbell_range;
98
99 if (instance < 2)
100 reg = instance +
101 SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE);
102 else
103
104
105
106
107
108
109
110
111 reg = instance + 0x4 +
112 SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE);
113
114 doorbell_range = RREG32(reg);
115
116 if (use_doorbell) {
117 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
118 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, doorbell_size);
119 } else
120 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
121
122 WREG32(reg, doorbell_range);
123 }
124
125 static void nbio_v7_4_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell,
126 int doorbell_index, int instance)
127 {
128 u32 reg;
129 u32 doorbell_range;
130
131 if (instance)
132 reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH1_DOORBELL_RANGE);
133 else
134 reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH0_DOORBELL_RANGE);
135
136 doorbell_range = RREG32(reg);
137
138 if (use_doorbell) {
139 doorbell_range = REG_SET_FIELD(doorbell_range,
140 BIF_MMSCH0_DOORBELL_RANGE, OFFSET,
141 doorbell_index);
142 doorbell_range = REG_SET_FIELD(doorbell_range,
143 BIF_MMSCH0_DOORBELL_RANGE, SIZE, 8);
144 } else
145 doorbell_range = REG_SET_FIELD(doorbell_range,
146 BIF_MMSCH0_DOORBELL_RANGE, SIZE, 0);
147
148 WREG32(reg, doorbell_range);
149 }
150
151 static void nbio_v7_4_enable_doorbell_aperture(struct amdgpu_device *adev,
152 bool enable)
153 {
154 WREG32_FIELD15(NBIO, 0, RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, enable ? 1 : 0);
155 }
156
157 static void nbio_v7_4_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
158 bool enable)
159 {
160 u32 tmp = 0;
161
162 if (enable) {
163 tmp = REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_EN, 1) |
164 REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_MODE, 1) |
165 REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_SIZE, 0);
166
167 WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_LOW,
168 lower_32_bits(adev->doorbell.base));
169 WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_HIGH,
170 upper_32_bits(adev->doorbell.base));
171 }
172
173 WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_CNTL, tmp);
174 }
175
176 static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev,
177 bool use_doorbell, int doorbell_index)
178 {
179 u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0 , mmBIF_IH_DOORBELL_RANGE);
180
181 if (use_doorbell) {
182 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index);
183 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 2);
184 } else
185 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 0);
186
187 WREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE, ih_doorbell_range);
188 }
189
190
191 static void nbio_v7_4_update_medium_grain_clock_gating(struct amdgpu_device *adev,
192 bool enable)
193 {
194
195 }
196
197 static void nbio_v7_4_update_medium_grain_light_sleep(struct amdgpu_device *adev,
198 bool enable)
199 {
200 uint32_t def, data;
201
202 def = data = RREG32_PCIE(smnPCIE_CNTL2);
203 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
204 data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
205 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
206 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
207 } else {
208 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
209 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
210 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
211 }
212
213 if (def != data)
214 WREG32_PCIE(smnPCIE_CNTL2, data);
215 }
216
217 static void nbio_v7_4_get_clockgating_state(struct amdgpu_device *adev,
218 u32 *flags)
219 {
220 int data;
221
222
223 data = RREG32_PCIE(smnCPM_CONTROL);
224 if (data & CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
225 *flags |= AMD_CG_SUPPORT_BIF_MGCG;
226
227
228 data = RREG32_PCIE(smnPCIE_CNTL2);
229 if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
230 *flags |= AMD_CG_SUPPORT_BIF_LS;
231 }
232
233 static void nbio_v7_4_ih_control(struct amdgpu_device *adev)
234 {
235 u32 interrupt_cntl;
236
237
238 WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
239 interrupt_cntl = RREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL);
240
241
242
243 interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0);
244
245 interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0);
246 WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl);
247 }
248
249 static u32 nbio_v7_4_get_hdp_flush_req_offset(struct amdgpu_device *adev)
250 {
251 return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_REQ);
252 }
253
254 static u32 nbio_v7_4_get_hdp_flush_done_offset(struct amdgpu_device *adev)
255 {
256 return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_DONE);
257 }
258
259 static u32 nbio_v7_4_get_pcie_index_offset(struct amdgpu_device *adev)
260 {
261 return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2);
262 }
263
264 static u32 nbio_v7_4_get_pcie_data_offset(struct amdgpu_device *adev)
265 {
266 return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
267 }
268
269 static const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
270 .ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK,
271 .ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK,
272 .ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK,
273 .ref_and_mask_cp3 = GPU_HDP_FLUSH_DONE__CP3_MASK,
274 .ref_and_mask_cp4 = GPU_HDP_FLUSH_DONE__CP4_MASK,
275 .ref_and_mask_cp5 = GPU_HDP_FLUSH_DONE__CP5_MASK,
276 .ref_and_mask_cp6 = GPU_HDP_FLUSH_DONE__CP6_MASK,
277 .ref_and_mask_cp7 = GPU_HDP_FLUSH_DONE__CP7_MASK,
278 .ref_and_mask_cp8 = GPU_HDP_FLUSH_DONE__CP8_MASK,
279 .ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK,
280 .ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__SDMA0_MASK,
281 .ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,
282 .ref_and_mask_sdma2 = GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK,
283 .ref_and_mask_sdma3 = GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK,
284 .ref_and_mask_sdma4 = GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK,
285 .ref_and_mask_sdma5 = GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK,
286 .ref_and_mask_sdma6 = GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK,
287 .ref_and_mask_sdma7 = GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK,
288 };
289
290 static void nbio_v7_4_detect_hw_virt(struct amdgpu_device *adev)
291 {
292 uint32_t reg;
293
294 reg = RREG32_SOC15(NBIO, 0, mmRCC_IOV_FUNC_IDENTIFIER);
295 if (reg & 1)
296 adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
297
298 if (reg & 0x80000000)
299 adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
300
301 if (!reg) {
302 if (is_virtual_machine())
303 adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
304 }
305 }
306
307 static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
308 {
309 uint32_t def, data;
310
311 def = data = RREG32_PCIE(smnPCIE_CI_CNTL);
312 data = REG_SET_FIELD(data, PCIE_CI_CNTL, CI_SLV_ORDERING_DIS, 1);
313
314 if (def != data)
315 WREG32_PCIE(smnPCIE_CI_CNTL, data);
316 }
317
318 const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
319 .hdp_flush_reg = &nbio_v7_4_hdp_flush_reg,
320 .get_hdp_flush_req_offset = nbio_v7_4_get_hdp_flush_req_offset,
321 .get_hdp_flush_done_offset = nbio_v7_4_get_hdp_flush_done_offset,
322 .get_pcie_index_offset = nbio_v7_4_get_pcie_index_offset,
323 .get_pcie_data_offset = nbio_v7_4_get_pcie_data_offset,
324 .get_rev_id = nbio_v7_4_get_rev_id,
325 .mc_access_enable = nbio_v7_4_mc_access_enable,
326 .hdp_flush = nbio_v7_4_hdp_flush,
327 .get_memsize = nbio_v7_4_get_memsize,
328 .sdma_doorbell_range = nbio_v7_4_sdma_doorbell_range,
329 .vcn_doorbell_range = nbio_v7_4_vcn_doorbell_range,
330 .enable_doorbell_aperture = nbio_v7_4_enable_doorbell_aperture,
331 .enable_doorbell_selfring_aperture = nbio_v7_4_enable_doorbell_selfring_aperture,
332 .ih_doorbell_range = nbio_v7_4_ih_doorbell_range,
333 .update_medium_grain_clock_gating = nbio_v7_4_update_medium_grain_clock_gating,
334 .update_medium_grain_light_sleep = nbio_v7_4_update_medium_grain_light_sleep,
335 .get_clockgating_state = nbio_v7_4_get_clockgating_state,
336 .ih_control = nbio_v7_4_ih_control,
337 .init_registers = nbio_v7_4_init_registers,
338 .detect_hw_virt = nbio_v7_4_detect_hw_virt,
339 .remap_hdp_registers = nbio_v7_4_remap_hdp_registers,
340 };