This source file includes following definitions.
- si_ih_enable_interrupts
- si_ih_disable_interrupts
- si_ih_irq_init
- si_ih_irq_disable
- si_ih_get_wptr
- si_ih_decode_iv
- si_ih_set_rptr
- si_ih_early_init
- si_ih_sw_init
- si_ih_sw_fini
- si_ih_hw_init
- si_ih_hw_fini
- si_ih_suspend
- si_ih_resume
- si_ih_is_idle
- si_ih_wait_for_idle
- si_ih_soft_reset
- si_ih_set_clockgating_state
- si_ih_set_powergating_state
- si_ih_set_interrupt_funcs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24 #include <linux/pci.h>
25
26 #include "amdgpu.h"
27 #include "amdgpu_ih.h"
28 #include "sid.h"
29 #include "si_ih.h"
30
31 static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev);
32
33 static void si_ih_enable_interrupts(struct amdgpu_device *adev)
34 {
35 u32 ih_cntl = RREG32(IH_CNTL);
36 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
37
38 ih_cntl |= ENABLE_INTR;
39 ih_rb_cntl |= IH_RB_ENABLE;
40 WREG32(IH_CNTL, ih_cntl);
41 WREG32(IH_RB_CNTL, ih_rb_cntl);
42 adev->irq.ih.enabled = true;
43 }
44
45 static void si_ih_disable_interrupts(struct amdgpu_device *adev)
46 {
47 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
48 u32 ih_cntl = RREG32(IH_CNTL);
49
50 ih_rb_cntl &= ~IH_RB_ENABLE;
51 ih_cntl &= ~ENABLE_INTR;
52 WREG32(IH_RB_CNTL, ih_rb_cntl);
53 WREG32(IH_CNTL, ih_cntl);
54 WREG32(IH_RB_RPTR, 0);
55 WREG32(IH_RB_WPTR, 0);
56 adev->irq.ih.enabled = false;
57 adev->irq.ih.rptr = 0;
58 }
59
60 static int si_ih_irq_init(struct amdgpu_device *adev)
61 {
62 struct amdgpu_ih_ring *ih = &adev->irq.ih;
63 int rb_bufsz;
64 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
65
66 si_ih_disable_interrupts(adev);
67
68 WREG32(INTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
69 interrupt_cntl = RREG32(INTERRUPT_CNTL);
70 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
71 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
72 WREG32(INTERRUPT_CNTL, interrupt_cntl);
73
74 WREG32(IH_RB_BASE, adev->irq.ih.gpu_addr >> 8);
75 rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
76
77 ih_rb_cntl = IH_WPTR_OVERFLOW_ENABLE |
78 IH_WPTR_OVERFLOW_CLEAR |
79 (rb_bufsz << 1) |
80 IH_WPTR_WRITEBACK_ENABLE;
81
82 WREG32(IH_RB_WPTR_ADDR_LO, lower_32_bits(ih->wptr_addr));
83 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(ih->wptr_addr) & 0xFF);
84 WREG32(IH_RB_CNTL, ih_rb_cntl);
85 WREG32(IH_RB_RPTR, 0);
86 WREG32(IH_RB_WPTR, 0);
87
88 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
89 if (adev->irq.msi_enabled)
90 ih_cntl |= RPTR_REARM;
91 WREG32(IH_CNTL, ih_cntl);
92
93 pci_set_master(adev->pdev);
94 si_ih_enable_interrupts(adev);
95
96 return 0;
97 }
98
99 static void si_ih_irq_disable(struct amdgpu_device *adev)
100 {
101 si_ih_disable_interrupts(adev);
102 mdelay(1);
103 }
104
105 static u32 si_ih_get_wptr(struct amdgpu_device *adev,
106 struct amdgpu_ih_ring *ih)
107 {
108 u32 wptr, tmp;
109
110 wptr = le32_to_cpu(*ih->wptr_cpu);
111
112 if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) {
113 wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK;
114 dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
115 wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
116 ih->rptr = (wptr + 16) & ih->ptr_mask;
117 tmp = RREG32(IH_RB_CNTL);
118 tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
119 WREG32(IH_RB_CNTL, tmp);
120 }
121 return (wptr & ih->ptr_mask);
122 }
123
124 static void si_ih_decode_iv(struct amdgpu_device *adev,
125 struct amdgpu_ih_ring *ih,
126 struct amdgpu_iv_entry *entry)
127 {
128 u32 ring_index = ih->rptr >> 2;
129 uint32_t dw[4];
130
131 dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
132 dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
133 dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
134 dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
135
136 entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
137 entry->src_id = dw[0] & 0xff;
138 entry->src_data[0] = dw[1] & 0xfffffff;
139 entry->ring_id = dw[2] & 0xff;
140 entry->vmid = (dw[2] >> 8) & 0xff;
141
142 ih->rptr += 16;
143 }
144
145 static void si_ih_set_rptr(struct amdgpu_device *adev,
146 struct amdgpu_ih_ring *ih)
147 {
148 WREG32(IH_RB_RPTR, ih->rptr);
149 }
150
151 static int si_ih_early_init(void *handle)
152 {
153 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
154
155 si_ih_set_interrupt_funcs(adev);
156
157 return 0;
158 }
159
160 static int si_ih_sw_init(void *handle)
161 {
162 int r;
163 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
164
165 r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false);
166 if (r)
167 return r;
168
169 return amdgpu_irq_init(adev);
170 }
171
172 static int si_ih_sw_fini(void *handle)
173 {
174 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
175
176 amdgpu_irq_fini(adev);
177 amdgpu_ih_ring_fini(adev, &adev->irq.ih);
178
179 return 0;
180 }
181
182 static int si_ih_hw_init(void *handle)
183 {
184 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
185
186 return si_ih_irq_init(adev);
187 }
188
189 static int si_ih_hw_fini(void *handle)
190 {
191 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
192
193 si_ih_irq_disable(adev);
194
195 return 0;
196 }
197
198 static int si_ih_suspend(void *handle)
199 {
200 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
201
202 return si_ih_hw_fini(adev);
203 }
204
205 static int si_ih_resume(void *handle)
206 {
207 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
208
209 return si_ih_hw_init(adev);
210 }
211
212 static bool si_ih_is_idle(void *handle)
213 {
214 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
215 u32 tmp = RREG32(SRBM_STATUS);
216
217 if (tmp & SRBM_STATUS__IH_BUSY_MASK)
218 return false;
219
220 return true;
221 }
222
223 static int si_ih_wait_for_idle(void *handle)
224 {
225 unsigned i;
226 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
227
228 for (i = 0; i < adev->usec_timeout; i++) {
229 if (si_ih_is_idle(handle))
230 return 0;
231 udelay(1);
232 }
233 return -ETIMEDOUT;
234 }
235
236 static int si_ih_soft_reset(void *handle)
237 {
238 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
239
240 u32 srbm_soft_reset = 0;
241 u32 tmp = RREG32(SRBM_STATUS);
242
243 if (tmp & SRBM_STATUS__IH_BUSY_MASK)
244 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK;
245
246 if (srbm_soft_reset) {
247 tmp = RREG32(SRBM_SOFT_RESET);
248 tmp |= srbm_soft_reset;
249 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
250 WREG32(SRBM_SOFT_RESET, tmp);
251 tmp = RREG32(SRBM_SOFT_RESET);
252
253 udelay(50);
254
255 tmp &= ~srbm_soft_reset;
256 WREG32(SRBM_SOFT_RESET, tmp);
257 tmp = RREG32(SRBM_SOFT_RESET);
258
259 udelay(50);
260 }
261
262 return 0;
263 }
264
265 static int si_ih_set_clockgating_state(void *handle,
266 enum amd_clockgating_state state)
267 {
268 return 0;
269 }
270
271 static int si_ih_set_powergating_state(void *handle,
272 enum amd_powergating_state state)
273 {
274 return 0;
275 }
276
277 static const struct amd_ip_funcs si_ih_ip_funcs = {
278 .name = "si_ih",
279 .early_init = si_ih_early_init,
280 .late_init = NULL,
281 .sw_init = si_ih_sw_init,
282 .sw_fini = si_ih_sw_fini,
283 .hw_init = si_ih_hw_init,
284 .hw_fini = si_ih_hw_fini,
285 .suspend = si_ih_suspend,
286 .resume = si_ih_resume,
287 .is_idle = si_ih_is_idle,
288 .wait_for_idle = si_ih_wait_for_idle,
289 .soft_reset = si_ih_soft_reset,
290 .set_clockgating_state = si_ih_set_clockgating_state,
291 .set_powergating_state = si_ih_set_powergating_state,
292 };
293
294 static const struct amdgpu_ih_funcs si_ih_funcs = {
295 .get_wptr = si_ih_get_wptr,
296 .decode_iv = si_ih_decode_iv,
297 .set_rptr = si_ih_set_rptr
298 };
299
300 static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev)
301 {
302 adev->irq.ih_funcs = &si_ih_funcs;
303 }
304
305 const struct amdgpu_ip_block_version si_ih_ip_block =
306 {
307 .type = AMD_IP_BLOCK_TYPE_IH,
308 .major = 1,
309 .minor = 0,
310 .rev = 0,
311 .funcs = &si_ih_ip_funcs,
312 };