This source file includes following definitions.
- radeon_driver_irq_handler_kms
- radeon_hotplug_work_func
- radeon_dp_work_func
- radeon_driver_irq_preinstall_kms
- radeon_driver_irq_postinstall_kms
- radeon_driver_irq_uninstall_kms
- radeon_msi_ok
- radeon_irq_kms_init
- radeon_irq_kms_fini
- radeon_irq_kms_sw_irq_get
- radeon_irq_kms_sw_irq_get_delayed
- radeon_irq_kms_sw_irq_put
- radeon_irq_kms_pflip_irq_get
- radeon_irq_kms_pflip_irq_put
- radeon_irq_kms_enable_afmt
- radeon_irq_kms_disable_afmt
- radeon_irq_kms_enable_hpd
- radeon_irq_kms_disable_hpd
- radeon_irq_kms_set_irq_n_enabled
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29 #include <linux/pm_runtime.h>
30
31 #include <drm/drm_crtc_helper.h>
32 #include <drm/drm_device.h>
33 #include <drm/drm_irq.h>
34 #include <drm/drm_pci.h>
35 #include <drm/drm_probe_helper.h>
36 #include <drm/drm_vblank.h>
37 #include <drm/radeon_drm.h>
38
39 #include "atom.h"
40 #include "radeon.h"
41 #include "radeon_reg.h"
42
43
44 #define RADEON_WAIT_IDLE_TIMEOUT 200
45
46
47
48
49
50
51
52
53
54
55 irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg)
56 {
57 struct drm_device *dev = (struct drm_device *) arg;
58 struct radeon_device *rdev = dev->dev_private;
59 irqreturn_t ret;
60
61 ret = radeon_irq_process(rdev);
62 if (ret == IRQ_HANDLED)
63 pm_runtime_mark_last_busy(dev->dev);
64 return ret;
65 }
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81 static void radeon_hotplug_work_func(struct work_struct *work)
82 {
83 struct radeon_device *rdev = container_of(work, struct radeon_device,
84 hotplug_work.work);
85 struct drm_device *dev = rdev->ddev;
86 struct drm_mode_config *mode_config = &dev->mode_config;
87 struct drm_connector *connector;
88
89
90
91 if (!rdev->mode_info.mode_config_initialized)
92 return;
93
94 mutex_lock(&mode_config->mutex);
95 list_for_each_entry(connector, &mode_config->connector_list, head)
96 radeon_connector_hotplug(connector);
97 mutex_unlock(&mode_config->mutex);
98
99 drm_helper_hpd_irq_event(dev);
100 }
101
102 static void radeon_dp_work_func(struct work_struct *work)
103 {
104 struct radeon_device *rdev = container_of(work, struct radeon_device,
105 dp_work);
106 struct drm_device *dev = rdev->ddev;
107 struct drm_mode_config *mode_config = &dev->mode_config;
108 struct drm_connector *connector;
109
110
111 list_for_each_entry(connector, &mode_config->connector_list, head)
112 radeon_connector_hotplug(connector);
113 }
114
115
116
117
118
119
120
121
122 void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
123 {
124 struct radeon_device *rdev = dev->dev_private;
125 unsigned long irqflags;
126 unsigned i;
127
128 spin_lock_irqsave(&rdev->irq.lock, irqflags);
129
130 for (i = 0; i < RADEON_NUM_RINGS; i++)
131 atomic_set(&rdev->irq.ring_int[i], 0);
132 rdev->irq.dpm_thermal = false;
133 for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
134 rdev->irq.hpd[i] = false;
135 for (i = 0; i < RADEON_MAX_CRTCS; i++) {
136 rdev->irq.crtc_vblank_int[i] = false;
137 atomic_set(&rdev->irq.pflip[i], 0);
138 rdev->irq.afmt[i] = false;
139 }
140 radeon_irq_set(rdev);
141 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
142
143 radeon_irq_process(rdev);
144 }
145
146
147
148
149
150
151
152
153
154 int radeon_driver_irq_postinstall_kms(struct drm_device *dev)
155 {
156 struct radeon_device *rdev = dev->dev_private;
157
158 if (ASIC_IS_AVIVO(rdev))
159 dev->max_vblank_count = 0x00ffffff;
160 else
161 dev->max_vblank_count = 0x001fffff;
162
163 return 0;
164 }
165
166
167
168
169
170
171
172
173 void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
174 {
175 struct radeon_device *rdev = dev->dev_private;
176 unsigned long irqflags;
177 unsigned i;
178
179 if (rdev == NULL) {
180 return;
181 }
182 spin_lock_irqsave(&rdev->irq.lock, irqflags);
183
184 for (i = 0; i < RADEON_NUM_RINGS; i++)
185 atomic_set(&rdev->irq.ring_int[i], 0);
186 rdev->irq.dpm_thermal = false;
187 for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
188 rdev->irq.hpd[i] = false;
189 for (i = 0; i < RADEON_MAX_CRTCS; i++) {
190 rdev->irq.crtc_vblank_int[i] = false;
191 atomic_set(&rdev->irq.pflip[i], 0);
192 rdev->irq.afmt[i] = false;
193 }
194 radeon_irq_set(rdev);
195 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
196 }
197
198
199
200
201
202
203
204
205
206
207
208 static bool radeon_msi_ok(struct radeon_device *rdev)
209 {
210
211 if (rdev->family < CHIP_RV380)
212 return false;
213
214
215 if (rdev->flags & RADEON_IS_AGP)
216 return false;
217
218
219
220
221
222
223 if (rdev->family < CHIP_BONAIRE) {
224 dev_info(rdev->dev, "radeon: MSI limited to 32-bit\n");
225 rdev->pdev->no_64bit_msi = 1;
226 }
227
228
229 if (radeon_msi == 1)
230 return true;
231 else if (radeon_msi == 0)
232 return false;
233
234
235
236 if ((rdev->pdev->device == 0x791f) &&
237 (rdev->pdev->subsystem_vendor == 0x103c) &&
238 (rdev->pdev->subsystem_device == 0x30c2))
239 return true;
240
241
242 if ((rdev->pdev->device == 0x791f) &&
243 (rdev->pdev->subsystem_vendor == 0x1028) &&
244 (rdev->pdev->subsystem_device == 0x01fc))
245 return true;
246
247
248 if ((rdev->pdev->device == 0x791f) &&
249 (rdev->pdev->subsystem_vendor == 0x1028) &&
250 (rdev->pdev->subsystem_device == 0x01fd))
251 return true;
252
253
254 if ((rdev->pdev->device == 0x791f) &&
255 (rdev->pdev->subsystem_vendor == 0x107b) &&
256 (rdev->pdev->subsystem_device == 0x0185))
257 return true;
258
259
260 if (rdev->family == CHIP_RS690)
261 return true;
262
263
264
265
266
267 if (rdev->family == CHIP_RV515)
268 return false;
269 if (rdev->flags & RADEON_IS_IGP) {
270
271 if (rdev->family >= CHIP_PALM)
272 return true;
273
274 return false;
275 }
276
277 return true;
278 }
279
280
281
282
283
284
285
286
287
288 int radeon_irq_kms_init(struct radeon_device *rdev)
289 {
290 int r = 0;
291
292 spin_lock_init(&rdev->irq.lock);
293
294
295 rdev->ddev->vblank_disable_immediate = true;
296
297 r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
298 if (r) {
299 return r;
300 }
301
302
303 rdev->msi_enabled = 0;
304
305 if (radeon_msi_ok(rdev)) {
306 int ret = pci_enable_msi(rdev->pdev);
307 if (!ret) {
308 rdev->msi_enabled = 1;
309 dev_info(rdev->dev, "radeon: using MSI.\n");
310 }
311 }
312
313 INIT_DELAYED_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
314 INIT_WORK(&rdev->dp_work, radeon_dp_work_func);
315 INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
316
317 rdev->irq.installed = true;
318 r = drm_irq_install(rdev->ddev, rdev->ddev->pdev->irq);
319 if (r) {
320 rdev->irq.installed = false;
321 flush_delayed_work(&rdev->hotplug_work);
322 return r;
323 }
324
325 DRM_INFO("radeon: irq initialized.\n");
326 return 0;
327 }
328
329
330
331
332
333
334
335
336 void radeon_irq_kms_fini(struct radeon_device *rdev)
337 {
338 if (rdev->irq.installed) {
339 drm_irq_uninstall(rdev->ddev);
340 rdev->irq.installed = false;
341 if (rdev->msi_enabled)
342 pci_disable_msi(rdev->pdev);
343 flush_delayed_work(&rdev->hotplug_work);
344 }
345 }
346
347
348
349
350
351
352
353
354
355
356
357 void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring)
358 {
359 unsigned long irqflags;
360
361 if (!rdev->ddev->irq_enabled)
362 return;
363
364 if (atomic_inc_return(&rdev->irq.ring_int[ring]) == 1) {
365 spin_lock_irqsave(&rdev->irq.lock, irqflags);
366 radeon_irq_set(rdev);
367 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
368 }
369 }
370
371
372
373
374
375
376
377
378
379
380
381 bool radeon_irq_kms_sw_irq_get_delayed(struct radeon_device *rdev, int ring)
382 {
383 return atomic_inc_return(&rdev->irq.ring_int[ring]) == 1;
384 }
385
386
387
388
389
390
391
392
393
394
395
396 void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring)
397 {
398 unsigned long irqflags;
399
400 if (!rdev->ddev->irq_enabled)
401 return;
402
403 if (atomic_dec_and_test(&rdev->irq.ring_int[ring])) {
404 spin_lock_irqsave(&rdev->irq.lock, irqflags);
405 radeon_irq_set(rdev);
406 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
407 }
408 }
409
410
411
412
413
414
415
416
417
418
419 void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc)
420 {
421 unsigned long irqflags;
422
423 if (crtc < 0 || crtc >= rdev->num_crtc)
424 return;
425
426 if (!rdev->ddev->irq_enabled)
427 return;
428
429 if (atomic_inc_return(&rdev->irq.pflip[crtc]) == 1) {
430 spin_lock_irqsave(&rdev->irq.lock, irqflags);
431 radeon_irq_set(rdev);
432 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
433 }
434 }
435
436
437
438
439
440
441
442
443
444
445 void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc)
446 {
447 unsigned long irqflags;
448
449 if (crtc < 0 || crtc >= rdev->num_crtc)
450 return;
451
452 if (!rdev->ddev->irq_enabled)
453 return;
454
455 if (atomic_dec_and_test(&rdev->irq.pflip[crtc])) {
456 spin_lock_irqsave(&rdev->irq.lock, irqflags);
457 radeon_irq_set(rdev);
458 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
459 }
460 }
461
462
463
464
465
466
467
468
469
470 void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block)
471 {
472 unsigned long irqflags;
473
474 if (!rdev->ddev->irq_enabled)
475 return;
476
477 spin_lock_irqsave(&rdev->irq.lock, irqflags);
478 rdev->irq.afmt[block] = true;
479 radeon_irq_set(rdev);
480 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
481
482 }
483
484
485
486
487
488
489
490
491
492 void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block)
493 {
494 unsigned long irqflags;
495
496 if (!rdev->ddev->irq_enabled)
497 return;
498
499 spin_lock_irqsave(&rdev->irq.lock, irqflags);
500 rdev->irq.afmt[block] = false;
501 radeon_irq_set(rdev);
502 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
503 }
504
505
506
507
508
509
510
511
512
513 void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
514 {
515 unsigned long irqflags;
516 int i;
517
518 if (!rdev->ddev->irq_enabled)
519 return;
520
521 spin_lock_irqsave(&rdev->irq.lock, irqflags);
522 for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
523 rdev->irq.hpd[i] |= !!(hpd_mask & (1 << i));
524 radeon_irq_set(rdev);
525 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
526 }
527
528
529
530
531
532
533
534
535
536 void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
537 {
538 unsigned long irqflags;
539 int i;
540
541 if (!rdev->ddev->irq_enabled)
542 return;
543
544 spin_lock_irqsave(&rdev->irq.lock, irqflags);
545 for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
546 rdev->irq.hpd[i] &= !(hpd_mask & (1 << i));
547 radeon_irq_set(rdev);
548 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
549 }
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568 void radeon_irq_kms_set_irq_n_enabled(struct radeon_device *rdev,
569 u32 reg, u32 mask,
570 bool enable, const char *name, unsigned n)
571 {
572 u32 tmp = RREG32(reg);
573
574
575 if (!!(tmp & mask) == enable)
576 return;
577
578 if (enable) {
579 DRM_DEBUG("%s%d interrupts enabled\n", name, n);
580 WREG32(reg, tmp |= mask);
581 } else {
582 DRM_DEBUG("%s%d interrupts disabled\n", name, n);
583 WREG32(reg, tmp & ~mask);
584 }
585 }