This source file includes following definitions.
- setup_forced_irqthreads
- __synchronize_hardirq
- synchronize_hardirq
- synchronize_irq
- __irq_can_set_affinity
- irq_can_set_affinity
- irq_can_set_affinity_usr
- irq_set_thread_affinity
- irq_validate_effective_affinity
- irq_do_set_affinity
- irq_set_affinity_pending
- irq_set_affinity_pending
- irq_try_set_affinity
- irq_set_affinity_locked
- __irq_set_affinity
- irq_set_affinity_hint
- irq_affinity_notify
- irq_set_affinity_notifier
- irq_setup_affinity
- irq_setup_affinity
- irq_set_vcpu_affinity
- __disable_irq
- __disable_irq_nosync
- disable_irq_nosync
- disable_irq
- disable_hardirq
- disable_nmi_nosync
- __enable_irq
- enable_irq
- enable_nmi
- set_irq_wake_real
- irq_set_irq_wake
- can_request_irq
- __irq_set_trigger
- irq_set_parent
- irq_default_primary_handler
- irq_nested_primary_handler
- irq_forced_secondary_handler
- irq_wait_for_interrupt
- irq_finalize_oneshot
- irq_thread_check_affinity
- irq_thread_check_affinity
- irq_forced_thread_fn
- irq_thread_fn
- wake_threads_waitq
- irq_thread_dtor
- irq_wake_secondary
- irq_thread
- irq_wake_thread
- irq_setup_forced_threading
- irq_request_resources
- irq_release_resources
- irq_supports_nmi
- irq_nmi_setup
- irq_nmi_teardown
- setup_irq_thread
- __setup_irq
- setup_irq
- __free_irq
- remove_irq
- free_irq
- __cleanup_nmi
- free_nmi
- request_threaded_irq
- request_any_context_irq
- request_nmi
- enable_percpu_irq
- enable_percpu_nmi
- irq_percpu_is_enabled
- disable_percpu_irq
- disable_percpu_nmi
- __free_percpu_irq
- remove_percpu_irq
- free_percpu_irq
- free_percpu_nmi
- setup_percpu_irq
- __request_percpu_irq
- request_percpu_nmi
- prepare_percpu_nmi
- teardown_percpu_nmi
- __irq_get_irqchip_state
- irq_get_irqchip_state
- irq_set_irqchip_state
1
2
3
4
5
6
7
8
9 #define pr_fmt(fmt) "genirq: " fmt
10
11 #include <linux/irq.h>
12 #include <linux/kthread.h>
13 #include <linux/module.h>
14 #include <linux/random.h>
15 #include <linux/interrupt.h>
16 #include <linux/irqdomain.h>
17 #include <linux/slab.h>
18 #include <linux/sched.h>
19 #include <linux/sched/rt.h>
20 #include <linux/sched/task.h>
21 #include <uapi/linux/sched/types.h>
22 #include <linux/task_work.h>
23
24 #include "internals.h"
25
26 #if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT)
27 __read_mostly bool force_irqthreads;
28 EXPORT_SYMBOL_GPL(force_irqthreads);
29
30 static int __init setup_forced_irqthreads(char *arg)
31 {
32 force_irqthreads = true;
33 return 0;
34 }
35 early_param("threadirqs", setup_forced_irqthreads);
36 #endif
37
38 static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
39 {
40 struct irq_data *irqd = irq_desc_get_irq_data(desc);
41 bool inprogress;
42
43 do {
44 unsigned long flags;
45
46
47
48
49
50 while (irqd_irq_inprogress(&desc->irq_data))
51 cpu_relax();
52
53
54 raw_spin_lock_irqsave(&desc->lock, flags);
55 inprogress = irqd_irq_inprogress(&desc->irq_data);
56
57
58
59
60
61
62 if (!inprogress && sync_chip) {
63
64
65
66
67 __irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE,
68 &inprogress);
69 }
70 raw_spin_unlock_irqrestore(&desc->lock, flags);
71
72
73 } while (inprogress);
74 }
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98 bool synchronize_hardirq(unsigned int irq)
99 {
100 struct irq_desc *desc = irq_to_desc(irq);
101
102 if (desc) {
103 __synchronize_hardirq(desc, false);
104 return !atomic_read(&desc->threads_active);
105 }
106
107 return true;
108 }
109 EXPORT_SYMBOL(synchronize_hardirq);
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126 void synchronize_irq(unsigned int irq)
127 {
128 struct irq_desc *desc = irq_to_desc(irq);
129
130 if (desc) {
131 __synchronize_hardirq(desc, true);
132
133
134
135
136
137 wait_event(desc->wait_for_threads,
138 !atomic_read(&desc->threads_active));
139 }
140 }
141 EXPORT_SYMBOL(synchronize_irq);
142
143 #ifdef CONFIG_SMP
144 cpumask_var_t irq_default_affinity;
145
146 static bool __irq_can_set_affinity(struct irq_desc *desc)
147 {
148 if (!desc || !irqd_can_balance(&desc->irq_data) ||
149 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
150 return false;
151 return true;
152 }
153
154
155
156
157
158
159 int irq_can_set_affinity(unsigned int irq)
160 {
161 return __irq_can_set_affinity(irq_to_desc(irq));
162 }
163
164
165
166
167
168
169
170
171 bool irq_can_set_affinity_usr(unsigned int irq)
172 {
173 struct irq_desc *desc = irq_to_desc(irq);
174
175 return __irq_can_set_affinity(desc) &&
176 !irqd_affinity_is_managed(&desc->irq_data);
177 }
178
179
180
181
182
183
184
185
186
187
188 void irq_set_thread_affinity(struct irq_desc *desc)
189 {
190 struct irqaction *action;
191
192 for_each_action_of_desc(desc, action)
193 if (action->thread)
194 set_bit(IRQTF_AFFINITY, &action->thread_flags);
195 }
196
197 static void irq_validate_effective_affinity(struct irq_data *data)
198 {
199 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
200 const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
201 struct irq_chip *chip = irq_data_get_irq_chip(data);
202
203 if (!cpumask_empty(m))
204 return;
205 pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
206 chip->name, data->irq);
207 #endif
208 }
209
210 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
211 bool force)
212 {
213 struct irq_desc *desc = irq_data_to_desc(data);
214 struct irq_chip *chip = irq_data_get_irq_chip(data);
215 int ret;
216
217 if (!chip || !chip->irq_set_affinity)
218 return -EINVAL;
219
220 ret = chip->irq_set_affinity(data, mask, force);
221 switch (ret) {
222 case IRQ_SET_MASK_OK:
223 case IRQ_SET_MASK_OK_DONE:
224 cpumask_copy(desc->irq_common_data.affinity, mask);
225
226 case IRQ_SET_MASK_OK_NOCOPY:
227 irq_validate_effective_affinity(data);
228 irq_set_thread_affinity(desc);
229 ret = 0;
230 }
231
232 return ret;
233 }
234
235 #ifdef CONFIG_GENERIC_PENDING_IRQ
236 static inline int irq_set_affinity_pending(struct irq_data *data,
237 const struct cpumask *dest)
238 {
239 struct irq_desc *desc = irq_data_to_desc(data);
240
241 irqd_set_move_pending(data);
242 irq_copy_pending(desc, dest);
243 return 0;
244 }
245 #else
246 static inline int irq_set_affinity_pending(struct irq_data *data,
247 const struct cpumask *dest)
248 {
249 return -EBUSY;
250 }
251 #endif
252
253 static int irq_try_set_affinity(struct irq_data *data,
254 const struct cpumask *dest, bool force)
255 {
256 int ret = irq_do_set_affinity(data, dest, force);
257
258
259
260
261
262
263 if (ret == -EBUSY && !force)
264 ret = irq_set_affinity_pending(data, dest);
265 return ret;
266 }
267
268 int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
269 bool force)
270 {
271 struct irq_chip *chip = irq_data_get_irq_chip(data);
272 struct irq_desc *desc = irq_data_to_desc(data);
273 int ret = 0;
274
275 if (!chip || !chip->irq_set_affinity)
276 return -EINVAL;
277
278 if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
279 ret = irq_try_set_affinity(data, mask, force);
280 } else {
281 irqd_set_move_pending(data);
282 irq_copy_pending(desc, mask);
283 }
284
285 if (desc->affinity_notify) {
286 kref_get(&desc->affinity_notify->kref);
287 if (!schedule_work(&desc->affinity_notify->work)) {
288
289 kref_put(&desc->affinity_notify->kref,
290 desc->affinity_notify->release);
291 }
292 }
293 irqd_set(data, IRQD_AFFINITY_SET);
294
295 return ret;
296 }
297
298 int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
299 {
300 struct irq_desc *desc = irq_to_desc(irq);
301 unsigned long flags;
302 int ret;
303
304 if (!desc)
305 return -EINVAL;
306
307 raw_spin_lock_irqsave(&desc->lock, flags);
308 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
309 raw_spin_unlock_irqrestore(&desc->lock, flags);
310 return ret;
311 }
312
313 int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
314 {
315 unsigned long flags;
316 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
317
318 if (!desc)
319 return -EINVAL;
320 desc->affinity_hint = m;
321 irq_put_desc_unlock(desc, flags);
322
323 if (m)
324 __irq_set_affinity(irq, m, false);
325 return 0;
326 }
327 EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
328
329 static void irq_affinity_notify(struct work_struct *work)
330 {
331 struct irq_affinity_notify *notify =
332 container_of(work, struct irq_affinity_notify, work);
333 struct irq_desc *desc = irq_to_desc(notify->irq);
334 cpumask_var_t cpumask;
335 unsigned long flags;
336
337 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
338 goto out;
339
340 raw_spin_lock_irqsave(&desc->lock, flags);
341 if (irq_move_pending(&desc->irq_data))
342 irq_get_pending(cpumask, desc);
343 else
344 cpumask_copy(cpumask, desc->irq_common_data.affinity);
345 raw_spin_unlock_irqrestore(&desc->lock, flags);
346
347 notify->notify(notify, cpumask);
348
349 free_cpumask_var(cpumask);
350 out:
351 kref_put(¬ify->kref, notify->release);
352 }
353
354
355
356
357
358
359
360
361
362
363
364
365 int
366 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
367 {
368 struct irq_desc *desc = irq_to_desc(irq);
369 struct irq_affinity_notify *old_notify;
370 unsigned long flags;
371
372
373 might_sleep();
374
375 if (!desc || desc->istate & IRQS_NMI)
376 return -EINVAL;
377
378
379 if (notify) {
380 notify->irq = irq;
381 kref_init(¬ify->kref);
382 INIT_WORK(¬ify->work, irq_affinity_notify);
383 }
384
385 raw_spin_lock_irqsave(&desc->lock, flags);
386 old_notify = desc->affinity_notify;
387 desc->affinity_notify = notify;
388 raw_spin_unlock_irqrestore(&desc->lock, flags);
389
390 if (old_notify) {
391 if (cancel_work_sync(&old_notify->work)) {
392
393 kref_put(&old_notify->kref, old_notify->release);
394 }
395 kref_put(&old_notify->kref, old_notify->release);
396 }
397
398 return 0;
399 }
400 EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
401
402 #ifndef CONFIG_AUTO_IRQ_AFFINITY
403
404
405
406 int irq_setup_affinity(struct irq_desc *desc)
407 {
408 struct cpumask *set = irq_default_affinity;
409 int ret, node = irq_desc_get_node(desc);
410 static DEFINE_RAW_SPINLOCK(mask_lock);
411 static struct cpumask mask;
412
413
414 if (!__irq_can_set_affinity(desc))
415 return 0;
416
417 raw_spin_lock(&mask_lock);
418
419
420
421
422 if (irqd_affinity_is_managed(&desc->irq_data) ||
423 irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
424 if (cpumask_intersects(desc->irq_common_data.affinity,
425 cpu_online_mask))
426 set = desc->irq_common_data.affinity;
427 else
428 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
429 }
430
431 cpumask_and(&mask, cpu_online_mask, set);
432 if (cpumask_empty(&mask))
433 cpumask_copy(&mask, cpu_online_mask);
434
435 if (node != NUMA_NO_NODE) {
436 const struct cpumask *nodemask = cpumask_of_node(node);
437
438
439 if (cpumask_intersects(&mask, nodemask))
440 cpumask_and(&mask, &mask, nodemask);
441 }
442 ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
443 raw_spin_unlock(&mask_lock);
444 return ret;
445 }
446 #else
447
448 int irq_setup_affinity(struct irq_desc *desc)
449 {
450 return irq_select_affinity(irq_desc_get_irq(desc));
451 }
452 #endif
453 #endif
454
455
456
457
458
459
460
461
462
463
464
465
466
467 int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
468 {
469 unsigned long flags;
470 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
471 struct irq_data *data;
472 struct irq_chip *chip;
473 int ret = -ENOSYS;
474
475 if (!desc)
476 return -EINVAL;
477
478 data = irq_desc_get_irq_data(desc);
479 do {
480 chip = irq_data_get_irq_chip(data);
481 if (chip && chip->irq_set_vcpu_affinity)
482 break;
483 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
484 data = data->parent_data;
485 #else
486 data = NULL;
487 #endif
488 } while (data);
489
490 if (data)
491 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
492 irq_put_desc_unlock(desc, flags);
493
494 return ret;
495 }
496 EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
497
498 void __disable_irq(struct irq_desc *desc)
499 {
500 if (!desc->depth++)
501 irq_disable(desc);
502 }
503
504 static int __disable_irq_nosync(unsigned int irq)
505 {
506 unsigned long flags;
507 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
508
509 if (!desc)
510 return -EINVAL;
511 __disable_irq(desc);
512 irq_put_desc_busunlock(desc, flags);
513 return 0;
514 }
515
516
517
518
519
520
521
522
523
524
525
526
527 void disable_irq_nosync(unsigned int irq)
528 {
529 __disable_irq_nosync(irq);
530 }
531 EXPORT_SYMBOL(disable_irq_nosync);
532
533
534
535
536
537
538
539
540
541
542
543
544
545 void disable_irq(unsigned int irq)
546 {
547 if (!__disable_irq_nosync(irq))
548 synchronize_irq(irq);
549 }
550 EXPORT_SYMBOL(disable_irq);
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569 bool disable_hardirq(unsigned int irq)
570 {
571 if (!__disable_irq_nosync(irq))
572 return synchronize_hardirq(irq);
573
574 return false;
575 }
576 EXPORT_SYMBOL_GPL(disable_hardirq);
577
578
579
580
581
582
583
584
585
586
587
588 void disable_nmi_nosync(unsigned int irq)
589 {
590 disable_irq_nosync(irq);
591 }
592
593 void __enable_irq(struct irq_desc *desc)
594 {
595 switch (desc->depth) {
596 case 0:
597 err_out:
598 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
599 irq_desc_get_irq(desc));
600 break;
601 case 1: {
602 if (desc->istate & IRQS_SUSPENDED)
603 goto err_out;
604
605 irq_settings_set_noprobe(desc);
606
607
608
609
610
611
612
613 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
614 break;
615 }
616 default:
617 desc->depth--;
618 }
619 }
620
621
622
623
624
625
626
627
628
629
630
631
632 void enable_irq(unsigned int irq)
633 {
634 unsigned long flags;
635 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
636
637 if (!desc)
638 return;
639 if (WARN(!desc->irq_data.chip,
640 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
641 goto out;
642
643 __enable_irq(desc);
644 out:
645 irq_put_desc_busunlock(desc, flags);
646 }
647 EXPORT_SYMBOL(enable_irq);
648
649
650
651
652
653
654
655
656
657
658 void enable_nmi(unsigned int irq)
659 {
660 enable_irq(irq);
661 }
662
663 static int set_irq_wake_real(unsigned int irq, unsigned int on)
664 {
665 struct irq_desc *desc = irq_to_desc(irq);
666 int ret = -ENXIO;
667
668 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
669 return 0;
670
671 if (desc->irq_data.chip->irq_set_wake)
672 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
673
674 return ret;
675 }
676
677
678
679
680
681
682
683
684
685
686
687
688
689 int irq_set_irq_wake(unsigned int irq, unsigned int on)
690 {
691 unsigned long flags;
692 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
693 int ret = 0;
694
695 if (!desc)
696 return -EINVAL;
697
698
699 if (desc->istate & IRQS_NMI) {
700 ret = -EINVAL;
701 goto out_unlock;
702 }
703
704
705
706
707 if (on) {
708 if (desc->wake_depth++ == 0) {
709 ret = set_irq_wake_real(irq, on);
710 if (ret)
711 desc->wake_depth = 0;
712 else
713 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
714 }
715 } else {
716 if (desc->wake_depth == 0) {
717 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
718 } else if (--desc->wake_depth == 0) {
719 ret = set_irq_wake_real(irq, on);
720 if (ret)
721 desc->wake_depth = 1;
722 else
723 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
724 }
725 }
726
727 out_unlock:
728 irq_put_desc_busunlock(desc, flags);
729 return ret;
730 }
731 EXPORT_SYMBOL(irq_set_irq_wake);
732
733
734
735
736
737
738 int can_request_irq(unsigned int irq, unsigned long irqflags)
739 {
740 unsigned long flags;
741 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
742 int canrequest = 0;
743
744 if (!desc)
745 return 0;
746
747 if (irq_settings_can_request(desc)) {
748 if (!desc->action ||
749 irqflags & desc->action->flags & IRQF_SHARED)
750 canrequest = 1;
751 }
752 irq_put_desc_unlock(desc, flags);
753 return canrequest;
754 }
755
756 int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
757 {
758 struct irq_chip *chip = desc->irq_data.chip;
759 int ret, unmask = 0;
760
761 if (!chip || !chip->irq_set_type) {
762
763
764
765
766 pr_debug("No set_type function for IRQ %d (%s)\n",
767 irq_desc_get_irq(desc),
768 chip ? (chip->name ? : "unknown") : "unknown");
769 return 0;
770 }
771
772 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
773 if (!irqd_irq_masked(&desc->irq_data))
774 mask_irq(desc);
775 if (!irqd_irq_disabled(&desc->irq_data))
776 unmask = 1;
777 }
778
779
780 flags &= IRQ_TYPE_SENSE_MASK;
781 ret = chip->irq_set_type(&desc->irq_data, flags);
782
783 switch (ret) {
784 case IRQ_SET_MASK_OK:
785 case IRQ_SET_MASK_OK_DONE:
786 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
787 irqd_set(&desc->irq_data, flags);
788
789
790 case IRQ_SET_MASK_OK_NOCOPY:
791 flags = irqd_get_trigger_type(&desc->irq_data);
792 irq_settings_set_trigger_mask(desc, flags);
793 irqd_clear(&desc->irq_data, IRQD_LEVEL);
794 irq_settings_clr_level(desc);
795 if (flags & IRQ_TYPE_LEVEL_MASK) {
796 irq_settings_set_level(desc);
797 irqd_set(&desc->irq_data, IRQD_LEVEL);
798 }
799
800 ret = 0;
801 break;
802 default:
803 pr_err("Setting trigger mode %lu for irq %u failed (%pS)\n",
804 flags, irq_desc_get_irq(desc), chip->irq_set_type);
805 }
806 if (unmask)
807 unmask_irq(desc);
808 return ret;
809 }
810
811 #ifdef CONFIG_HARDIRQS_SW_RESEND
812 int irq_set_parent(int irq, int parent_irq)
813 {
814 unsigned long flags;
815 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
816
817 if (!desc)
818 return -EINVAL;
819
820 desc->parent_irq = parent_irq;
821
822 irq_put_desc_unlock(desc, flags);
823 return 0;
824 }
825 EXPORT_SYMBOL_GPL(irq_set_parent);
826 #endif
827
828
829
830
831
832
833 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
834 {
835 return IRQ_WAKE_THREAD;
836 }
837
838
839
840
841
842 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
843 {
844 WARN(1, "Primary handler called for nested irq %d\n", irq);
845 return IRQ_NONE;
846 }
847
848 static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
849 {
850 WARN(1, "Secondary action handler called for irq %d\n", irq);
851 return IRQ_NONE;
852 }
853
854 static int irq_wait_for_interrupt(struct irqaction *action)
855 {
856 for (;;) {
857 set_current_state(TASK_INTERRUPTIBLE);
858
859 if (kthread_should_stop()) {
860
861 if (test_and_clear_bit(IRQTF_RUNTHREAD,
862 &action->thread_flags)) {
863 __set_current_state(TASK_RUNNING);
864 return 0;
865 }
866 __set_current_state(TASK_RUNNING);
867 return -1;
868 }
869
870 if (test_and_clear_bit(IRQTF_RUNTHREAD,
871 &action->thread_flags)) {
872 __set_current_state(TASK_RUNNING);
873 return 0;
874 }
875 schedule();
876 }
877 }
878
879
880
881
882
883
884 static void irq_finalize_oneshot(struct irq_desc *desc,
885 struct irqaction *action)
886 {
887 if (!(desc->istate & IRQS_ONESHOT) ||
888 action->handler == irq_forced_secondary_handler)
889 return;
890 again:
891 chip_bus_lock(desc);
892 raw_spin_lock_irq(&desc->lock);
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
909 raw_spin_unlock_irq(&desc->lock);
910 chip_bus_sync_unlock(desc);
911 cpu_relax();
912 goto again;
913 }
914
915
916
917
918
919
920 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
921 goto out_unlock;
922
923 desc->threads_oneshot &= ~action->thread_mask;
924
925 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
926 irqd_irq_masked(&desc->irq_data))
927 unmask_threaded_irq(desc);
928
929 out_unlock:
930 raw_spin_unlock_irq(&desc->lock);
931 chip_bus_sync_unlock(desc);
932 }
933
934 #ifdef CONFIG_SMP
935
936
937
938 static void
939 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
940 {
941 cpumask_var_t mask;
942 bool valid = true;
943
944 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
945 return;
946
947
948
949
950
951 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
952 set_bit(IRQTF_AFFINITY, &action->thread_flags);
953 return;
954 }
955
956 raw_spin_lock_irq(&desc->lock);
957
958
959
960
961 if (cpumask_available(desc->irq_common_data.affinity)) {
962 const struct cpumask *m;
963
964 m = irq_data_get_effective_affinity_mask(&desc->irq_data);
965 cpumask_copy(mask, m);
966 } else {
967 valid = false;
968 }
969 raw_spin_unlock_irq(&desc->lock);
970
971 if (valid)
972 set_cpus_allowed_ptr(current, mask);
973 free_cpumask_var(mask);
974 }
975 #else
976 static inline void
977 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
978 #endif
979
980
981
982
983
984
985
986 static irqreturn_t
987 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
988 {
989 irqreturn_t ret;
990
991 local_bh_disable();
992 ret = action->thread_fn(action->irq, action->dev_id);
993 if (ret == IRQ_HANDLED)
994 atomic_inc(&desc->threads_handled);
995
996 irq_finalize_oneshot(desc, action);
997 local_bh_enable();
998 return ret;
999 }
1000
1001
1002
1003
1004
1005
1006 static irqreturn_t irq_thread_fn(struct irq_desc *desc,
1007 struct irqaction *action)
1008 {
1009 irqreturn_t ret;
1010
1011 ret = action->thread_fn(action->irq, action->dev_id);
1012 if (ret == IRQ_HANDLED)
1013 atomic_inc(&desc->threads_handled);
1014
1015 irq_finalize_oneshot(desc, action);
1016 return ret;
1017 }
1018
1019 static void wake_threads_waitq(struct irq_desc *desc)
1020 {
1021 if (atomic_dec_and_test(&desc->threads_active))
1022 wake_up(&desc->wait_for_threads);
1023 }
1024
1025 static void irq_thread_dtor(struct callback_head *unused)
1026 {
1027 struct task_struct *tsk = current;
1028 struct irq_desc *desc;
1029 struct irqaction *action;
1030
1031 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
1032 return;
1033
1034 action = kthread_data(tsk);
1035
1036 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
1037 tsk->comm, tsk->pid, action->irq);
1038
1039
1040 desc = irq_to_desc(action->irq);
1041
1042
1043
1044
1045 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1046 wake_threads_waitq(desc);
1047
1048
1049 irq_finalize_oneshot(desc, action);
1050 }
1051
1052 static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
1053 {
1054 struct irqaction *secondary = action->secondary;
1055
1056 if (WARN_ON_ONCE(!secondary))
1057 return;
1058
1059 raw_spin_lock_irq(&desc->lock);
1060 __irq_wake_thread(desc, secondary);
1061 raw_spin_unlock_irq(&desc->lock);
1062 }
1063
1064
1065
1066
1067 static int irq_thread(void *data)
1068 {
1069 struct callback_head on_exit_work;
1070 struct irqaction *action = data;
1071 struct irq_desc *desc = irq_to_desc(action->irq);
1072 irqreturn_t (*handler_fn)(struct irq_desc *desc,
1073 struct irqaction *action);
1074
1075 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
1076 &action->thread_flags))
1077 handler_fn = irq_forced_thread_fn;
1078 else
1079 handler_fn = irq_thread_fn;
1080
1081 init_task_work(&on_exit_work, irq_thread_dtor);
1082 task_work_add(current, &on_exit_work, false);
1083
1084 irq_thread_check_affinity(desc, action);
1085
1086 while (!irq_wait_for_interrupt(action)) {
1087 irqreturn_t action_ret;
1088
1089 irq_thread_check_affinity(desc, action);
1090
1091 action_ret = handler_fn(desc, action);
1092 if (action_ret == IRQ_WAKE_THREAD)
1093 irq_wake_secondary(desc, action);
1094
1095 wake_threads_waitq(desc);
1096 }
1097
1098
1099
1100
1101
1102
1103
1104 task_work_cancel(current, irq_thread_dtor);
1105 return 0;
1106 }
1107
1108
1109
1110
1111
1112
1113
1114 void irq_wake_thread(unsigned int irq, void *dev_id)
1115 {
1116 struct irq_desc *desc = irq_to_desc(irq);
1117 struct irqaction *action;
1118 unsigned long flags;
1119
1120 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1121 return;
1122
1123 raw_spin_lock_irqsave(&desc->lock, flags);
1124 for_each_action_of_desc(desc, action) {
1125 if (action->dev_id == dev_id) {
1126 if (action->thread)
1127 __irq_wake_thread(desc, action);
1128 break;
1129 }
1130 }
1131 raw_spin_unlock_irqrestore(&desc->lock, flags);
1132 }
1133 EXPORT_SYMBOL_GPL(irq_wake_thread);
1134
1135 static int irq_setup_forced_threading(struct irqaction *new)
1136 {
1137 if (!force_irqthreads)
1138 return 0;
1139 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1140 return 0;
1141
1142
1143
1144
1145
1146 if (new->handler == irq_default_primary_handler)
1147 return 0;
1148
1149 new->flags |= IRQF_ONESHOT;
1150
1151
1152
1153
1154
1155
1156 if (new->handler && new->thread_fn) {
1157
1158 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1159 if (!new->secondary)
1160 return -ENOMEM;
1161 new->secondary->handler = irq_forced_secondary_handler;
1162 new->secondary->thread_fn = new->thread_fn;
1163 new->secondary->dev_id = new->dev_id;
1164 new->secondary->irq = new->irq;
1165 new->secondary->name = new->name;
1166 }
1167
1168 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1169 new->thread_fn = new->handler;
1170 new->handler = irq_default_primary_handler;
1171 return 0;
1172 }
1173
1174 static int irq_request_resources(struct irq_desc *desc)
1175 {
1176 struct irq_data *d = &desc->irq_data;
1177 struct irq_chip *c = d->chip;
1178
1179 return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1180 }
1181
1182 static void irq_release_resources(struct irq_desc *desc)
1183 {
1184 struct irq_data *d = &desc->irq_data;
1185 struct irq_chip *c = d->chip;
1186
1187 if (c->irq_release_resources)
1188 c->irq_release_resources(d);
1189 }
1190
1191 static bool irq_supports_nmi(struct irq_desc *desc)
1192 {
1193 struct irq_data *d = irq_desc_get_irq_data(desc);
1194
1195 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1196
1197 if (d->parent_data)
1198 return false;
1199 #endif
1200
1201 if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock)
1202 return false;
1203
1204 return d->chip->flags & IRQCHIP_SUPPORTS_NMI;
1205 }
1206
1207 static int irq_nmi_setup(struct irq_desc *desc)
1208 {
1209 struct irq_data *d = irq_desc_get_irq_data(desc);
1210 struct irq_chip *c = d->chip;
1211
1212 return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL;
1213 }
1214
1215 static void irq_nmi_teardown(struct irq_desc *desc)
1216 {
1217 struct irq_data *d = irq_desc_get_irq_data(desc);
1218 struct irq_chip *c = d->chip;
1219
1220 if (c->irq_nmi_teardown)
1221 c->irq_nmi_teardown(d);
1222 }
1223
1224 static int
1225 setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1226 {
1227 struct task_struct *t;
1228 struct sched_param param = {
1229 .sched_priority = MAX_USER_RT_PRIO/2,
1230 };
1231
1232 if (!secondary) {
1233 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1234 new->name);
1235 } else {
1236 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1237 new->name);
1238 param.sched_priority -= 1;
1239 }
1240
1241 if (IS_ERR(t))
1242 return PTR_ERR(t);
1243
1244 sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m);
1245
1246
1247
1248
1249
1250
1251 new->thread = get_task_struct(t);
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261 set_bit(IRQTF_AFFINITY, &new->thread_flags);
1262 return 0;
1263 }
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279 static int
1280 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1281 {
1282 struct irqaction *old, **old_ptr;
1283 unsigned long flags, thread_mask = 0;
1284 int ret, nested, shared = 0;
1285
1286 if (!desc)
1287 return -EINVAL;
1288
1289 if (desc->irq_data.chip == &no_irq_chip)
1290 return -ENOSYS;
1291 if (!try_module_get(desc->owner))
1292 return -ENODEV;
1293
1294 new->irq = irq;
1295
1296
1297
1298
1299
1300 if (!(new->flags & IRQF_TRIGGER_MASK))
1301 new->flags |= irqd_get_trigger_type(&desc->irq_data);
1302
1303
1304
1305
1306
1307 nested = irq_settings_is_nested_thread(desc);
1308 if (nested) {
1309 if (!new->thread_fn) {
1310 ret = -EINVAL;
1311 goto out_mput;
1312 }
1313
1314
1315
1316
1317
1318 new->handler = irq_nested_primary_handler;
1319 } else {
1320 if (irq_settings_can_thread(desc)) {
1321 ret = irq_setup_forced_threading(new);
1322 if (ret)
1323 goto out_mput;
1324 }
1325 }
1326
1327
1328
1329
1330
1331
1332 if (new->thread_fn && !nested) {
1333 ret = setup_irq_thread(new, irq, false);
1334 if (ret)
1335 goto out_mput;
1336 if (new->secondary) {
1337 ret = setup_irq_thread(new->secondary, irq, true);
1338 if (ret)
1339 goto out_thread;
1340 }
1341 }
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1353 new->flags &= ~IRQF_ONESHOT;
1354
1355
1356
1357
1358
1359
1360
1361
1362 mutex_lock(&desc->request_mutex);
1363
1364
1365
1366
1367
1368
1369 chip_bus_lock(desc);
1370
1371
1372 if (!desc->action) {
1373 ret = irq_request_resources(desc);
1374 if (ret) {
1375 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1376 new->name, irq, desc->irq_data.chip->name);
1377 goto out_bus_unlock;
1378 }
1379 }
1380
1381
1382
1383
1384
1385
1386
1387 raw_spin_lock_irqsave(&desc->lock, flags);
1388 old_ptr = &desc->action;
1389 old = *old_ptr;
1390 if (old) {
1391
1392
1393
1394
1395
1396
1397
1398
1399 unsigned int oldtype;
1400
1401 if (desc->istate & IRQS_NMI) {
1402 pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",
1403 new->name, irq, desc->irq_data.chip->name);
1404 ret = -EINVAL;
1405 goto out_unlock;
1406 }
1407
1408
1409
1410
1411
1412 if (irqd_trigger_type_was_set(&desc->irq_data)) {
1413 oldtype = irqd_get_trigger_type(&desc->irq_data);
1414 } else {
1415 oldtype = new->flags & IRQF_TRIGGER_MASK;
1416 irqd_set_trigger_type(&desc->irq_data, oldtype);
1417 }
1418
1419 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1420 (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
1421 ((old->flags ^ new->flags) & IRQF_ONESHOT))
1422 goto mismatch;
1423
1424
1425 if ((old->flags & IRQF_PERCPU) !=
1426 (new->flags & IRQF_PERCPU))
1427 goto mismatch;
1428
1429
1430 do {
1431
1432
1433
1434
1435
1436 thread_mask |= old->thread_mask;
1437 old_ptr = &old->next;
1438 old = *old_ptr;
1439 } while (old);
1440 shared = 1;
1441 }
1442
1443
1444
1445
1446
1447
1448 if (new->flags & IRQF_ONESHOT) {
1449
1450
1451
1452
1453 if (thread_mask == ~0UL) {
1454 ret = -EBUSY;
1455 goto out_unlock;
1456 }
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477 new->thread_mask = 1UL << ffz(thread_mask);
1478
1479 } else if (new->handler == irq_default_primary_handler &&
1480 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1497 irq);
1498 ret = -EINVAL;
1499 goto out_unlock;
1500 }
1501
1502 if (!shared) {
1503 init_waitqueue_head(&desc->wait_for_threads);
1504
1505
1506 if (new->flags & IRQF_TRIGGER_MASK) {
1507 ret = __irq_set_trigger(desc,
1508 new->flags & IRQF_TRIGGER_MASK);
1509
1510 if (ret)
1511 goto out_unlock;
1512 }
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525 ret = irq_activate(desc);
1526 if (ret)
1527 goto out_unlock;
1528
1529 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1530 IRQS_ONESHOT | IRQS_WAITING);
1531 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1532
1533 if (new->flags & IRQF_PERCPU) {
1534 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1535 irq_settings_set_per_cpu(desc);
1536 }
1537
1538 if (new->flags & IRQF_ONESHOT)
1539 desc->istate |= IRQS_ONESHOT;
1540
1541
1542 if (new->flags & IRQF_NOBALANCING) {
1543 irq_settings_set_no_balancing(desc);
1544 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1545 }
1546
1547 if (irq_settings_can_autoenable(desc)) {
1548 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
1549 } else {
1550
1551
1552
1553
1554
1555
1556 WARN_ON_ONCE(new->flags & IRQF_SHARED);
1557
1558 desc->depth = 1;
1559 }
1560
1561 } else if (new->flags & IRQF_TRIGGER_MASK) {
1562 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1563 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1564
1565 if (nmsk != omsk)
1566
1567 pr_warn("irq %d uses trigger mode %u; requested %u\n",
1568 irq, omsk, nmsk);
1569 }
1570
1571 *old_ptr = new;
1572
1573 irq_pm_install_action(desc, new);
1574
1575
1576 desc->irq_count = 0;
1577 desc->irqs_unhandled = 0;
1578
1579
1580
1581
1582
1583 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1584 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1585 __enable_irq(desc);
1586 }
1587
1588 raw_spin_unlock_irqrestore(&desc->lock, flags);
1589 chip_bus_sync_unlock(desc);
1590 mutex_unlock(&desc->request_mutex);
1591
1592 irq_setup_timings(desc, new);
1593
1594
1595
1596
1597
1598 if (new->thread)
1599 wake_up_process(new->thread);
1600 if (new->secondary)
1601 wake_up_process(new->secondary->thread);
1602
1603 register_irq_proc(irq, desc);
1604 new->dir = NULL;
1605 register_handler_proc(irq, new);
1606 return 0;
1607
1608 mismatch:
1609 if (!(new->flags & IRQF_PROBE_SHARED)) {
1610 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1611 irq, new->flags, new->name, old->flags, old->name);
1612 #ifdef CONFIG_DEBUG_SHIRQ
1613 dump_stack();
1614 #endif
1615 }
1616 ret = -EBUSY;
1617
1618 out_unlock:
1619 raw_spin_unlock_irqrestore(&desc->lock, flags);
1620
1621 if (!desc->action)
1622 irq_release_resources(desc);
1623 out_bus_unlock:
1624 chip_bus_sync_unlock(desc);
1625 mutex_unlock(&desc->request_mutex);
1626
1627 out_thread:
1628 if (new->thread) {
1629 struct task_struct *t = new->thread;
1630
1631 new->thread = NULL;
1632 kthread_stop(t);
1633 put_task_struct(t);
1634 }
1635 if (new->secondary && new->secondary->thread) {
1636 struct task_struct *t = new->secondary->thread;
1637
1638 new->secondary->thread = NULL;
1639 kthread_stop(t);
1640 put_task_struct(t);
1641 }
1642 out_mput:
1643 module_put(desc->owner);
1644 return ret;
1645 }
1646
1647
1648
1649
1650
1651
1652
1653
1654 int setup_irq(unsigned int irq, struct irqaction *act)
1655 {
1656 int retval;
1657 struct irq_desc *desc = irq_to_desc(irq);
1658
1659 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1660 return -EINVAL;
1661
1662 retval = irq_chip_pm_get(&desc->irq_data);
1663 if (retval < 0)
1664 return retval;
1665
1666 retval = __setup_irq(irq, desc, act);
1667
1668 if (retval)
1669 irq_chip_pm_put(&desc->irq_data);
1670
1671 return retval;
1672 }
1673 EXPORT_SYMBOL_GPL(setup_irq);
1674
1675
1676
1677
1678
1679 static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
1680 {
1681 unsigned irq = desc->irq_data.irq;
1682 struct irqaction *action, **action_ptr;
1683 unsigned long flags;
1684
1685 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1686
1687 mutex_lock(&desc->request_mutex);
1688 chip_bus_lock(desc);
1689 raw_spin_lock_irqsave(&desc->lock, flags);
1690
1691
1692
1693
1694
1695 action_ptr = &desc->action;
1696 for (;;) {
1697 action = *action_ptr;
1698
1699 if (!action) {
1700 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1701 raw_spin_unlock_irqrestore(&desc->lock, flags);
1702 chip_bus_sync_unlock(desc);
1703 mutex_unlock(&desc->request_mutex);
1704 return NULL;
1705 }
1706
1707 if (action->dev_id == dev_id)
1708 break;
1709 action_ptr = &action->next;
1710 }
1711
1712
1713 *action_ptr = action->next;
1714
1715 irq_pm_remove_action(desc, action);
1716
1717
1718 if (!desc->action) {
1719 irq_settings_clr_disable_unlazy(desc);
1720
1721 irq_shutdown(desc);
1722 }
1723
1724 #ifdef CONFIG_SMP
1725
1726 if (WARN_ON_ONCE(desc->affinity_hint))
1727 desc->affinity_hint = NULL;
1728 #endif
1729
1730 raw_spin_unlock_irqrestore(&desc->lock, flags);
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745 chip_bus_sync_unlock(desc);
1746
1747 unregister_handler_proc(irq, action);
1748
1749
1750
1751
1752
1753
1754 __synchronize_hardirq(desc, true);
1755
1756 #ifdef CONFIG_DEBUG_SHIRQ
1757
1758
1759
1760
1761
1762
1763
1764
1765 if (action->flags & IRQF_SHARED) {
1766 local_irq_save(flags);
1767 action->handler(irq, dev_id);
1768 local_irq_restore(flags);
1769 }
1770 #endif
1771
1772
1773
1774
1775
1776
1777
1778 if (action->thread) {
1779 kthread_stop(action->thread);
1780 put_task_struct(action->thread);
1781 if (action->secondary && action->secondary->thread) {
1782 kthread_stop(action->secondary->thread);
1783 put_task_struct(action->secondary->thread);
1784 }
1785 }
1786
1787
1788 if (!desc->action) {
1789
1790
1791
1792
1793 chip_bus_lock(desc);
1794
1795
1796
1797
1798 raw_spin_lock_irqsave(&desc->lock, flags);
1799 irq_domain_deactivate_irq(&desc->irq_data);
1800 raw_spin_unlock_irqrestore(&desc->lock, flags);
1801
1802 irq_release_resources(desc);
1803 chip_bus_sync_unlock(desc);
1804 irq_remove_timings(desc);
1805 }
1806
1807 mutex_unlock(&desc->request_mutex);
1808
1809 irq_chip_pm_put(&desc->irq_data);
1810 module_put(desc->owner);
1811 kfree(action->secondary);
1812 return action;
1813 }
1814
1815
1816
1817
1818
1819
1820
1821
1822 void remove_irq(unsigned int irq, struct irqaction *act)
1823 {
1824 struct irq_desc *desc = irq_to_desc(irq);
1825
1826 if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1827 __free_irq(desc, act->dev_id);
1828 }
1829 EXPORT_SYMBOL_GPL(remove_irq);
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847 const void *free_irq(unsigned int irq, void *dev_id)
1848 {
1849 struct irq_desc *desc = irq_to_desc(irq);
1850 struct irqaction *action;
1851 const char *devname;
1852
1853 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1854 return NULL;
1855
1856 #ifdef CONFIG_SMP
1857 if (WARN_ON(desc->affinity_notify))
1858 desc->affinity_notify = NULL;
1859 #endif
1860
1861 action = __free_irq(desc, dev_id);
1862
1863 if (!action)
1864 return NULL;
1865
1866 devname = action->name;
1867 kfree(action);
1868 return devname;
1869 }
1870 EXPORT_SYMBOL(free_irq);
1871
1872
1873 static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
1874 {
1875 const char *devname = NULL;
1876
1877 desc->istate &= ~IRQS_NMI;
1878
1879 if (!WARN_ON(desc->action == NULL)) {
1880 irq_pm_remove_action(desc, desc->action);
1881 devname = desc->action->name;
1882 unregister_handler_proc(irq, desc->action);
1883
1884 kfree(desc->action);
1885 desc->action = NULL;
1886 }
1887
1888 irq_settings_clr_disable_unlazy(desc);
1889 irq_shutdown_and_deactivate(desc);
1890
1891 irq_release_resources(desc);
1892
1893 irq_chip_pm_put(&desc->irq_data);
1894 module_put(desc->owner);
1895
1896 return devname;
1897 }
1898
1899 const void *free_nmi(unsigned int irq, void *dev_id)
1900 {
1901 struct irq_desc *desc = irq_to_desc(irq);
1902 unsigned long flags;
1903 const void *devname;
1904
1905 if (!desc || WARN_ON(!(desc->istate & IRQS_NMI)))
1906 return NULL;
1907
1908 if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1909 return NULL;
1910
1911
1912 if (WARN_ON(desc->depth == 0))
1913 disable_nmi_nosync(irq);
1914
1915 raw_spin_lock_irqsave(&desc->lock, flags);
1916
1917 irq_nmi_teardown(desc);
1918 devname = __cleanup_nmi(irq, desc);
1919
1920 raw_spin_unlock_irqrestore(&desc->lock, flags);
1921
1922 return devname;
1923 }
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967 int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1968 irq_handler_t thread_fn, unsigned long irqflags,
1969 const char *devname, void *dev_id)
1970 {
1971 struct irqaction *action;
1972 struct irq_desc *desc;
1973 int retval;
1974
1975 if (irq == IRQ_NOTCONNECTED)
1976 return -ENOTCONN;
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987 if (((irqflags & IRQF_SHARED) && !dev_id) ||
1988 (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
1989 ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
1990 return -EINVAL;
1991
1992 desc = irq_to_desc(irq);
1993 if (!desc)
1994 return -EINVAL;
1995
1996 if (!irq_settings_can_request(desc) ||
1997 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1998 return -EINVAL;
1999
2000 if (!handler) {
2001 if (!thread_fn)
2002 return -EINVAL;
2003 handler = irq_default_primary_handler;
2004 }
2005
2006 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2007 if (!action)
2008 return -ENOMEM;
2009
2010 action->handler = handler;
2011 action->thread_fn = thread_fn;
2012 action->flags = irqflags;
2013 action->name = devname;
2014 action->dev_id = dev_id;
2015
2016 retval = irq_chip_pm_get(&desc->irq_data);
2017 if (retval < 0) {
2018 kfree(action);
2019 return retval;
2020 }
2021
2022 retval = __setup_irq(irq, desc, action);
2023
2024 if (retval) {
2025 irq_chip_pm_put(&desc->irq_data);
2026 kfree(action->secondary);
2027 kfree(action);
2028 }
2029
2030 #ifdef CONFIG_DEBUG_SHIRQ_FIXME
2031 if (!retval && (irqflags & IRQF_SHARED)) {
2032
2033
2034
2035
2036
2037
2038 unsigned long flags;
2039
2040 disable_irq(irq);
2041 local_irq_save(flags);
2042
2043 handler(irq, dev_id);
2044
2045 local_irq_restore(flags);
2046 enable_irq(irq);
2047 }
2048 #endif
2049 return retval;
2050 }
2051 EXPORT_SYMBOL(request_threaded_irq);
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070 int request_any_context_irq(unsigned int irq, irq_handler_t handler,
2071 unsigned long flags, const char *name, void *dev_id)
2072 {
2073 struct irq_desc *desc;
2074 int ret;
2075
2076 if (irq == IRQ_NOTCONNECTED)
2077 return -ENOTCONN;
2078
2079 desc = irq_to_desc(irq);
2080 if (!desc)
2081 return -EINVAL;
2082
2083 if (irq_settings_is_nested_thread(desc)) {
2084 ret = request_threaded_irq(irq, NULL, handler,
2085 flags, name, dev_id);
2086 return !ret ? IRQC_IS_NESTED : ret;
2087 }
2088
2089 ret = request_irq(irq, handler, flags, name, dev_id);
2090 return !ret ? IRQC_IS_HARDIRQ : ret;
2091 }
2092 EXPORT_SYMBOL_GPL(request_any_context_irq);
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120 int request_nmi(unsigned int irq, irq_handler_t handler,
2121 unsigned long irqflags, const char *name, void *dev_id)
2122 {
2123 struct irqaction *action;
2124 struct irq_desc *desc;
2125 unsigned long flags;
2126 int retval;
2127
2128 if (irq == IRQ_NOTCONNECTED)
2129 return -ENOTCONN;
2130
2131
2132 if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL))
2133 return -EINVAL;
2134
2135 if (!(irqflags & IRQF_PERCPU))
2136 return -EINVAL;
2137
2138 if (!handler)
2139 return -EINVAL;
2140
2141 desc = irq_to_desc(irq);
2142
2143 if (!desc || irq_settings_can_autoenable(desc) ||
2144 !irq_settings_can_request(desc) ||
2145 WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||
2146 !irq_supports_nmi(desc))
2147 return -EINVAL;
2148
2149 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2150 if (!action)
2151 return -ENOMEM;
2152
2153 action->handler = handler;
2154 action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING;
2155 action->name = name;
2156 action->dev_id = dev_id;
2157
2158 retval = irq_chip_pm_get(&desc->irq_data);
2159 if (retval < 0)
2160 goto err_out;
2161
2162 retval = __setup_irq(irq, desc, action);
2163 if (retval)
2164 goto err_irq_setup;
2165
2166 raw_spin_lock_irqsave(&desc->lock, flags);
2167
2168
2169 desc->istate |= IRQS_NMI;
2170 retval = irq_nmi_setup(desc);
2171 if (retval) {
2172 __cleanup_nmi(irq, desc);
2173 raw_spin_unlock_irqrestore(&desc->lock, flags);
2174 return -EINVAL;
2175 }
2176
2177 raw_spin_unlock_irqrestore(&desc->lock, flags);
2178
2179 return 0;
2180
2181 err_irq_setup:
2182 irq_chip_pm_put(&desc->irq_data);
2183 err_out:
2184 kfree(action);
2185
2186 return retval;
2187 }
2188
2189 void enable_percpu_irq(unsigned int irq, unsigned int type)
2190 {
2191 unsigned int cpu = smp_processor_id();
2192 unsigned long flags;
2193 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2194
2195 if (!desc)
2196 return;
2197
2198
2199
2200
2201
2202 type &= IRQ_TYPE_SENSE_MASK;
2203 if (type == IRQ_TYPE_NONE)
2204 type = irqd_get_trigger_type(&desc->irq_data);
2205
2206 if (type != IRQ_TYPE_NONE) {
2207 int ret;
2208
2209 ret = __irq_set_trigger(desc, type);
2210
2211 if (ret) {
2212 WARN(1, "failed to set type for IRQ%d\n", irq);
2213 goto out;
2214 }
2215 }
2216
2217 irq_percpu_enable(desc, cpu);
2218 out:
2219 irq_put_desc_unlock(desc, flags);
2220 }
2221 EXPORT_SYMBOL_GPL(enable_percpu_irq);
2222
2223 void enable_percpu_nmi(unsigned int irq, unsigned int type)
2224 {
2225 enable_percpu_irq(irq, type);
2226 }
2227
2228
2229
2230
2231
2232
2233
2234
2235 bool irq_percpu_is_enabled(unsigned int irq)
2236 {
2237 unsigned int cpu = smp_processor_id();
2238 struct irq_desc *desc;
2239 unsigned long flags;
2240 bool is_enabled;
2241
2242 desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2243 if (!desc)
2244 return false;
2245
2246 is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
2247 irq_put_desc_unlock(desc, flags);
2248
2249 return is_enabled;
2250 }
2251 EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
2252
2253 void disable_percpu_irq(unsigned int irq)
2254 {
2255 unsigned int cpu = smp_processor_id();
2256 unsigned long flags;
2257 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2258
2259 if (!desc)
2260 return;
2261
2262 irq_percpu_disable(desc, cpu);
2263 irq_put_desc_unlock(desc, flags);
2264 }
2265 EXPORT_SYMBOL_GPL(disable_percpu_irq);
2266
2267 void disable_percpu_nmi(unsigned int irq)
2268 {
2269 disable_percpu_irq(irq);
2270 }
2271
2272
2273
2274
2275 static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2276 {
2277 struct irq_desc *desc = irq_to_desc(irq);
2278 struct irqaction *action;
2279 unsigned long flags;
2280
2281 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
2282
2283 if (!desc)
2284 return NULL;
2285
2286 raw_spin_lock_irqsave(&desc->lock, flags);
2287
2288 action = desc->action;
2289 if (!action || action->percpu_dev_id != dev_id) {
2290 WARN(1, "Trying to free already-free IRQ %d\n", irq);
2291 goto bad;
2292 }
2293
2294 if (!cpumask_empty(desc->percpu_enabled)) {
2295 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
2296 irq, cpumask_first(desc->percpu_enabled));
2297 goto bad;
2298 }
2299
2300
2301 desc->action = NULL;
2302
2303 desc->istate &= ~IRQS_NMI;
2304
2305 raw_spin_unlock_irqrestore(&desc->lock, flags);
2306
2307 unregister_handler_proc(irq, action);
2308
2309 irq_chip_pm_put(&desc->irq_data);
2310 module_put(desc->owner);
2311 return action;
2312
2313 bad:
2314 raw_spin_unlock_irqrestore(&desc->lock, flags);
2315 return NULL;
2316 }
2317
2318
2319
2320
2321
2322
2323
2324
2325 void remove_percpu_irq(unsigned int irq, struct irqaction *act)
2326 {
2327 struct irq_desc *desc = irq_to_desc(irq);
2328
2329 if (desc && irq_settings_is_per_cpu_devid(desc))
2330 __free_percpu_irq(irq, act->percpu_dev_id);
2331 }
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345 void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2346 {
2347 struct irq_desc *desc = irq_to_desc(irq);
2348
2349 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2350 return;
2351
2352 chip_bus_lock(desc);
2353 kfree(__free_percpu_irq(irq, dev_id));
2354 chip_bus_sync_unlock(desc);
2355 }
2356 EXPORT_SYMBOL_GPL(free_percpu_irq);
2357
2358 void free_percpu_nmi(unsigned int irq, void __percpu *dev_id)
2359 {
2360 struct irq_desc *desc = irq_to_desc(irq);
2361
2362 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2363 return;
2364
2365 if (WARN_ON(!(desc->istate & IRQS_NMI)))
2366 return;
2367
2368 kfree(__free_percpu_irq(irq, dev_id));
2369 }
2370
2371
2372
2373
2374
2375
2376
2377
2378 int setup_percpu_irq(unsigned int irq, struct irqaction *act)
2379 {
2380 struct irq_desc *desc = irq_to_desc(irq);
2381 int retval;
2382
2383 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2384 return -EINVAL;
2385
2386 retval = irq_chip_pm_get(&desc->irq_data);
2387 if (retval < 0)
2388 return retval;
2389
2390 retval = __setup_irq(irq, desc, act);
2391
2392 if (retval)
2393 irq_chip_pm_put(&desc->irq_data);
2394
2395 return retval;
2396 }
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415 int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2416 unsigned long flags, const char *devname,
2417 void __percpu *dev_id)
2418 {
2419 struct irqaction *action;
2420 struct irq_desc *desc;
2421 int retval;
2422
2423 if (!dev_id)
2424 return -EINVAL;
2425
2426 desc = irq_to_desc(irq);
2427 if (!desc || !irq_settings_can_request(desc) ||
2428 !irq_settings_is_per_cpu_devid(desc))
2429 return -EINVAL;
2430
2431 if (flags && flags != IRQF_TIMER)
2432 return -EINVAL;
2433
2434 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2435 if (!action)
2436 return -ENOMEM;
2437
2438 action->handler = handler;
2439 action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
2440 action->name = devname;
2441 action->percpu_dev_id = dev_id;
2442
2443 retval = irq_chip_pm_get(&desc->irq_data);
2444 if (retval < 0) {
2445 kfree(action);
2446 return retval;
2447 }
2448
2449 retval = __setup_irq(irq, desc, action);
2450
2451 if (retval) {
2452 irq_chip_pm_put(&desc->irq_data);
2453 kfree(action);
2454 }
2455
2456 return retval;
2457 }
2458 EXPORT_SYMBOL_GPL(__request_percpu_irq);
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481 int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
2482 const char *name, void __percpu *dev_id)
2483 {
2484 struct irqaction *action;
2485 struct irq_desc *desc;
2486 unsigned long flags;
2487 int retval;
2488
2489 if (!handler)
2490 return -EINVAL;
2491
2492 desc = irq_to_desc(irq);
2493
2494 if (!desc || !irq_settings_can_request(desc) ||
2495 !irq_settings_is_per_cpu_devid(desc) ||
2496 irq_settings_can_autoenable(desc) ||
2497 !irq_supports_nmi(desc))
2498 return -EINVAL;
2499
2500
2501 if (desc->istate & IRQS_NMI)
2502 return -EINVAL;
2503
2504 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2505 if (!action)
2506 return -ENOMEM;
2507
2508 action->handler = handler;
2509 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD
2510 | IRQF_NOBALANCING;
2511 action->name = name;
2512 action->percpu_dev_id = dev_id;
2513
2514 retval = irq_chip_pm_get(&desc->irq_data);
2515 if (retval < 0)
2516 goto err_out;
2517
2518 retval = __setup_irq(irq, desc, action);
2519 if (retval)
2520 goto err_irq_setup;
2521
2522 raw_spin_lock_irqsave(&desc->lock, flags);
2523 desc->istate |= IRQS_NMI;
2524 raw_spin_unlock_irqrestore(&desc->lock, flags);
2525
2526 return 0;
2527
2528 err_irq_setup:
2529 irq_chip_pm_put(&desc->irq_data);
2530 err_out:
2531 kfree(action);
2532
2533 return retval;
2534 }
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549 int prepare_percpu_nmi(unsigned int irq)
2550 {
2551 unsigned long flags;
2552 struct irq_desc *desc;
2553 int ret = 0;
2554
2555 WARN_ON(preemptible());
2556
2557 desc = irq_get_desc_lock(irq, &flags,
2558 IRQ_GET_DESC_CHECK_PERCPU);
2559 if (!desc)
2560 return -EINVAL;
2561
2562 if (WARN(!(desc->istate & IRQS_NMI),
2563 KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n",
2564 irq)) {
2565 ret = -EINVAL;
2566 goto out;
2567 }
2568
2569 ret = irq_nmi_setup(desc);
2570 if (ret) {
2571 pr_err("Failed to setup NMI delivery: irq %u\n", irq);
2572 goto out;
2573 }
2574
2575 out:
2576 irq_put_desc_unlock(desc, flags);
2577 return ret;
2578 }
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592 void teardown_percpu_nmi(unsigned int irq)
2593 {
2594 unsigned long flags;
2595 struct irq_desc *desc;
2596
2597 WARN_ON(preemptible());
2598
2599 desc = irq_get_desc_lock(irq, &flags,
2600 IRQ_GET_DESC_CHECK_PERCPU);
2601 if (!desc)
2602 return;
2603
2604 if (WARN_ON(!(desc->istate & IRQS_NMI)))
2605 goto out;
2606
2607 irq_nmi_teardown(desc);
2608 out:
2609 irq_put_desc_unlock(desc, flags);
2610 }
2611
2612 int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which,
2613 bool *state)
2614 {
2615 struct irq_chip *chip;
2616 int err = -EINVAL;
2617
2618 do {
2619 chip = irq_data_get_irq_chip(data);
2620 if (chip->irq_get_irqchip_state)
2621 break;
2622 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2623 data = data->parent_data;
2624 #else
2625 data = NULL;
2626 #endif
2627 } while (data);
2628
2629 if (data)
2630 err = chip->irq_get_irqchip_state(data, which, state);
2631 return err;
2632 }
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647 int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2648 bool *state)
2649 {
2650 struct irq_desc *desc;
2651 struct irq_data *data;
2652 unsigned long flags;
2653 int err = -EINVAL;
2654
2655 desc = irq_get_desc_buslock(irq, &flags, 0);
2656 if (!desc)
2657 return err;
2658
2659 data = irq_desc_get_irq_data(desc);
2660
2661 err = __irq_get_irqchip_state(data, which, state);
2662
2663 irq_put_desc_busunlock(desc, flags);
2664 return err;
2665 }
2666 EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680 int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2681 bool val)
2682 {
2683 struct irq_desc *desc;
2684 struct irq_data *data;
2685 struct irq_chip *chip;
2686 unsigned long flags;
2687 int err = -EINVAL;
2688
2689 desc = irq_get_desc_buslock(irq, &flags, 0);
2690 if (!desc)
2691 return err;
2692
2693 data = irq_desc_get_irq_data(desc);
2694
2695 do {
2696 chip = irq_data_get_irq_chip(data);
2697 if (chip->irq_set_irqchip_state)
2698 break;
2699 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2700 data = data->parent_data;
2701 #else
2702 data = NULL;
2703 #endif
2704 } while (data);
2705
2706 if (data)
2707 err = chip->irq_set_irqchip_state(data, which, val);
2708
2709 irq_put_desc_busunlock(desc, flags);
2710 return err;
2711 }
2712 EXPORT_SYMBOL_GPL(irq_set_irqchip_state);