This source file includes following definitions.
- irqd_is_setaffinity_pending
- irqd_is_per_cpu
- irqd_can_balance
- irqd_affinity_was_set
- irqd_mark_affinity_was_set
- irqd_trigger_type_was_set
- irqd_get_trigger_type
- irqd_set_trigger_type
- irqd_is_level_type
- irqd_set_single_target
- irqd_is_single_target
- irqd_is_wakeup_set
- irqd_can_move_in_process_context
- irqd_irq_disabled
- irqd_irq_masked
- irqd_irq_inprogress
- irqd_is_wakeup_armed
- irqd_is_forwarded_to_vcpu
- irqd_set_forwarded_to_vcpu
- irqd_clr_forwarded_to_vcpu
- irqd_affinity_is_managed
- irqd_is_activated
- irqd_set_activated
- irqd_clr_activated
- irqd_is_started
- irqd_is_managed_and_shutdown
- irqd_set_can_reserve
- irqd_clr_can_reserve
- irqd_can_reserve
- irqd_set_msi_nomask_quirk
- irqd_clr_msi_nomask_quirk
- irqd_msi_nomask_quirk
- irqd_to_hwirq
- irq_move_irq
- irq_move_irq
- irq_move_masked_irq
- irq_force_complete_move
- irq_set_parent
- irq_set_chip_and_handler
- irq_set_handler
- irq_set_chained_handler
- irq_set_status_flags
- irq_clear_status_flags
- irq_set_noprobe
- irq_set_probe
- irq_set_nothread
- irq_set_thread
- irq_set_nested_thread
- irq_set_percpu_devid_flags
- irq_get_chip
- irq_data_get_irq_chip
- irq_get_chip_data
- irq_data_get_irq_chip_data
- irq_get_handler_data
- irq_data_get_irq_handler_data
- irq_get_msi_desc
- irq_data_get_msi_desc
- irq_get_trigger_type
- irq_common_data_get_node
- irq_data_get_node
- irq_get_affinity_mask
- irq_data_get_affinity_mask
- irq_data_get_effective_affinity_mask
- irq_data_update_effective_affinity
- irq_data_update_effective_affinity
- irq_data_get_effective_affinity_mask
- irq_free_desc
- irq_alloc_hwirq
- irq_free_hwirq
- irq_free_generic_chip
- irq_destroy_generic_chip
- irq_data_get_chip_type
- irq_gc_lock
- irq_gc_unlock
- irq_gc_lock
- irq_gc_unlock
- irq_reg_writel
- irq_reg_readl
1
2 #ifndef _LINUX_IRQ_H
3 #define _LINUX_IRQ_H
4
5
6
7
8
9
10
11
12
13 #include <linux/cache.h>
14 #include <linux/spinlock.h>
15 #include <linux/cpumask.h>
16 #include <linux/irqhandler.h>
17 #include <linux/irqreturn.h>
18 #include <linux/irqnr.h>
19 #include <linux/topology.h>
20 #include <linux/io.h>
21 #include <linux/slab.h>
22
23 #include <asm/irq.h>
24 #include <asm/ptrace.h>
25 #include <asm/irq_regs.h>
26
27 struct seq_file;
28 struct module;
29 struct msi_msg;
30 struct irq_affinity_desc;
31 enum irqchip_irq_state;
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75 enum {
76 IRQ_TYPE_NONE = 0x00000000,
77 IRQ_TYPE_EDGE_RISING = 0x00000001,
78 IRQ_TYPE_EDGE_FALLING = 0x00000002,
79 IRQ_TYPE_EDGE_BOTH = (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING),
80 IRQ_TYPE_LEVEL_HIGH = 0x00000004,
81 IRQ_TYPE_LEVEL_LOW = 0x00000008,
82 IRQ_TYPE_LEVEL_MASK = (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH),
83 IRQ_TYPE_SENSE_MASK = 0x0000000f,
84 IRQ_TYPE_DEFAULT = IRQ_TYPE_SENSE_MASK,
85
86 IRQ_TYPE_PROBE = 0x00000010,
87
88 IRQ_LEVEL = (1 << 8),
89 IRQ_PER_CPU = (1 << 9),
90 IRQ_NOPROBE = (1 << 10),
91 IRQ_NOREQUEST = (1 << 11),
92 IRQ_NOAUTOEN = (1 << 12),
93 IRQ_NO_BALANCING = (1 << 13),
94 IRQ_MOVE_PCNTXT = (1 << 14),
95 IRQ_NESTED_THREAD = (1 << 15),
96 IRQ_NOTHREAD = (1 << 16),
97 IRQ_PER_CPU_DEVID = (1 << 17),
98 IRQ_IS_POLLED = (1 << 18),
99 IRQ_DISABLE_UNLAZY = (1 << 19),
100 };
101
102 #define IRQF_MODIFY_MASK \
103 (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
104 IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
105 IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
106 IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY)
107
108 #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
109
110
111
112
113
114
115
116
117
118
119 enum {
120 IRQ_SET_MASK_OK = 0,
121 IRQ_SET_MASK_OK_NOCOPY,
122 IRQ_SET_MASK_OK_DONE,
123 };
124
125 struct msi_desc;
126 struct irq_domain;
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143 struct irq_common_data {
144 unsigned int __private state_use_accessors;
145 #ifdef CONFIG_NUMA
146 unsigned int node;
147 #endif
148 void *handler_data;
149 struct msi_desc *msi_desc;
150 cpumask_var_t affinity;
151 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
152 cpumask_var_t effective_affinity;
153 #endif
154 #ifdef CONFIG_GENERIC_IRQ_IPI
155 unsigned int ipi_offset;
156 #endif
157 };
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173 struct irq_data {
174 u32 mask;
175 unsigned int irq;
176 unsigned long hwirq;
177 struct irq_common_data *common;
178 struct irq_chip *chip;
179 struct irq_domain *domain;
180 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
181 struct irq_data *parent_data;
182 #endif
183 void *chip_data;
184 };
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215 enum {
216 IRQD_TRIGGER_MASK = 0xf,
217 IRQD_SETAFFINITY_PENDING = (1 << 8),
218 IRQD_ACTIVATED = (1 << 9),
219 IRQD_NO_BALANCING = (1 << 10),
220 IRQD_PER_CPU = (1 << 11),
221 IRQD_AFFINITY_SET = (1 << 12),
222 IRQD_LEVEL = (1 << 13),
223 IRQD_WAKEUP_STATE = (1 << 14),
224 IRQD_MOVE_PCNTXT = (1 << 15),
225 IRQD_IRQ_DISABLED = (1 << 16),
226 IRQD_IRQ_MASKED = (1 << 17),
227 IRQD_IRQ_INPROGRESS = (1 << 18),
228 IRQD_WAKEUP_ARMED = (1 << 19),
229 IRQD_FORWARDED_TO_VCPU = (1 << 20),
230 IRQD_AFFINITY_MANAGED = (1 << 21),
231 IRQD_IRQ_STARTED = (1 << 22),
232 IRQD_MANAGED_SHUTDOWN = (1 << 23),
233 IRQD_SINGLE_TARGET = (1 << 24),
234 IRQD_DEFAULT_TRIGGER_SET = (1 << 25),
235 IRQD_CAN_RESERVE = (1 << 26),
236 IRQD_MSI_NOMASK_QUIRK = (1 << 27),
237 };
238
239 #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
240
241 static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
242 {
243 return __irqd_to_state(d) & IRQD_SETAFFINITY_PENDING;
244 }
245
246 static inline bool irqd_is_per_cpu(struct irq_data *d)
247 {
248 return __irqd_to_state(d) & IRQD_PER_CPU;
249 }
250
251 static inline bool irqd_can_balance(struct irq_data *d)
252 {
253 return !(__irqd_to_state(d) & (IRQD_PER_CPU | IRQD_NO_BALANCING));
254 }
255
256 static inline bool irqd_affinity_was_set(struct irq_data *d)
257 {
258 return __irqd_to_state(d) & IRQD_AFFINITY_SET;
259 }
260
261 static inline void irqd_mark_affinity_was_set(struct irq_data *d)
262 {
263 __irqd_to_state(d) |= IRQD_AFFINITY_SET;
264 }
265
266 static inline bool irqd_trigger_type_was_set(struct irq_data *d)
267 {
268 return __irqd_to_state(d) & IRQD_DEFAULT_TRIGGER_SET;
269 }
270
271 static inline u32 irqd_get_trigger_type(struct irq_data *d)
272 {
273 return __irqd_to_state(d) & IRQD_TRIGGER_MASK;
274 }
275
276
277
278
279
280 static inline void irqd_set_trigger_type(struct irq_data *d, u32 type)
281 {
282 __irqd_to_state(d) &= ~IRQD_TRIGGER_MASK;
283 __irqd_to_state(d) |= type & IRQD_TRIGGER_MASK;
284 __irqd_to_state(d) |= IRQD_DEFAULT_TRIGGER_SET;
285 }
286
287 static inline bool irqd_is_level_type(struct irq_data *d)
288 {
289 return __irqd_to_state(d) & IRQD_LEVEL;
290 }
291
292
293
294
295
296 static inline void irqd_set_single_target(struct irq_data *d)
297 {
298 __irqd_to_state(d) |= IRQD_SINGLE_TARGET;
299 }
300
301 static inline bool irqd_is_single_target(struct irq_data *d)
302 {
303 return __irqd_to_state(d) & IRQD_SINGLE_TARGET;
304 }
305
306 static inline bool irqd_is_wakeup_set(struct irq_data *d)
307 {
308 return __irqd_to_state(d) & IRQD_WAKEUP_STATE;
309 }
310
311 static inline bool irqd_can_move_in_process_context(struct irq_data *d)
312 {
313 return __irqd_to_state(d) & IRQD_MOVE_PCNTXT;
314 }
315
316 static inline bool irqd_irq_disabled(struct irq_data *d)
317 {
318 return __irqd_to_state(d) & IRQD_IRQ_DISABLED;
319 }
320
321 static inline bool irqd_irq_masked(struct irq_data *d)
322 {
323 return __irqd_to_state(d) & IRQD_IRQ_MASKED;
324 }
325
326 static inline bool irqd_irq_inprogress(struct irq_data *d)
327 {
328 return __irqd_to_state(d) & IRQD_IRQ_INPROGRESS;
329 }
330
331 static inline bool irqd_is_wakeup_armed(struct irq_data *d)
332 {
333 return __irqd_to_state(d) & IRQD_WAKEUP_ARMED;
334 }
335
336 static inline bool irqd_is_forwarded_to_vcpu(struct irq_data *d)
337 {
338 return __irqd_to_state(d) & IRQD_FORWARDED_TO_VCPU;
339 }
340
341 static inline void irqd_set_forwarded_to_vcpu(struct irq_data *d)
342 {
343 __irqd_to_state(d) |= IRQD_FORWARDED_TO_VCPU;
344 }
345
346 static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d)
347 {
348 __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU;
349 }
350
351 static inline bool irqd_affinity_is_managed(struct irq_data *d)
352 {
353 return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED;
354 }
355
356 static inline bool irqd_is_activated(struct irq_data *d)
357 {
358 return __irqd_to_state(d) & IRQD_ACTIVATED;
359 }
360
361 static inline void irqd_set_activated(struct irq_data *d)
362 {
363 __irqd_to_state(d) |= IRQD_ACTIVATED;
364 }
365
366 static inline void irqd_clr_activated(struct irq_data *d)
367 {
368 __irqd_to_state(d) &= ~IRQD_ACTIVATED;
369 }
370
371 static inline bool irqd_is_started(struct irq_data *d)
372 {
373 return __irqd_to_state(d) & IRQD_IRQ_STARTED;
374 }
375
376 static inline bool irqd_is_managed_and_shutdown(struct irq_data *d)
377 {
378 return __irqd_to_state(d) & IRQD_MANAGED_SHUTDOWN;
379 }
380
381 static inline void irqd_set_can_reserve(struct irq_data *d)
382 {
383 __irqd_to_state(d) |= IRQD_CAN_RESERVE;
384 }
385
386 static inline void irqd_clr_can_reserve(struct irq_data *d)
387 {
388 __irqd_to_state(d) &= ~IRQD_CAN_RESERVE;
389 }
390
391 static inline bool irqd_can_reserve(struct irq_data *d)
392 {
393 return __irqd_to_state(d) & IRQD_CAN_RESERVE;
394 }
395
396 static inline void irqd_set_msi_nomask_quirk(struct irq_data *d)
397 {
398 __irqd_to_state(d) |= IRQD_MSI_NOMASK_QUIRK;
399 }
400
401 static inline void irqd_clr_msi_nomask_quirk(struct irq_data *d)
402 {
403 __irqd_to_state(d) &= ~IRQD_MSI_NOMASK_QUIRK;
404 }
405
406 static inline bool irqd_msi_nomask_quirk(struct irq_data *d)
407 {
408 return __irqd_to_state(d) & IRQD_MSI_NOMASK_QUIRK;
409 }
410
411 #undef __irqd_to_state
412
413 static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
414 {
415 return d->hwirq;
416 }
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467 struct irq_chip {
468 struct device *parent_device;
469 const char *name;
470 unsigned int (*irq_startup)(struct irq_data *data);
471 void (*irq_shutdown)(struct irq_data *data);
472 void (*irq_enable)(struct irq_data *data);
473 void (*irq_disable)(struct irq_data *data);
474
475 void (*irq_ack)(struct irq_data *data);
476 void (*irq_mask)(struct irq_data *data);
477 void (*irq_mask_ack)(struct irq_data *data);
478 void (*irq_unmask)(struct irq_data *data);
479 void (*irq_eoi)(struct irq_data *data);
480
481 int (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force);
482 int (*irq_retrigger)(struct irq_data *data);
483 int (*irq_set_type)(struct irq_data *data, unsigned int flow_type);
484 int (*irq_set_wake)(struct irq_data *data, unsigned int on);
485
486 void (*irq_bus_lock)(struct irq_data *data);
487 void (*irq_bus_sync_unlock)(struct irq_data *data);
488
489 void (*irq_cpu_online)(struct irq_data *data);
490 void (*irq_cpu_offline)(struct irq_data *data);
491
492 void (*irq_suspend)(struct irq_data *data);
493 void (*irq_resume)(struct irq_data *data);
494 void (*irq_pm_shutdown)(struct irq_data *data);
495
496 void (*irq_calc_mask)(struct irq_data *data);
497
498 void (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
499 int (*irq_request_resources)(struct irq_data *data);
500 void (*irq_release_resources)(struct irq_data *data);
501
502 void (*irq_compose_msi_msg)(struct irq_data *data, struct msi_msg *msg);
503 void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg);
504
505 int (*irq_get_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool *state);
506 int (*irq_set_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool state);
507
508 int (*irq_set_vcpu_affinity)(struct irq_data *data, void *vcpu_info);
509
510 void (*ipi_send_single)(struct irq_data *data, unsigned int cpu);
511 void (*ipi_send_mask)(struct irq_data *data, const struct cpumask *dest);
512
513 int (*irq_nmi_setup)(struct irq_data *data);
514 void (*irq_nmi_teardown)(struct irq_data *data);
515
516 unsigned long flags;
517 };
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533 enum {
534 IRQCHIP_SET_TYPE_MASKED = (1 << 0),
535 IRQCHIP_EOI_IF_HANDLED = (1 << 1),
536 IRQCHIP_MASK_ON_SUSPEND = (1 << 2),
537 IRQCHIP_ONOFFLINE_ENABLED = (1 << 3),
538 IRQCHIP_SKIP_SET_WAKE = (1 << 4),
539 IRQCHIP_ONESHOT_SAFE = (1 << 5),
540 IRQCHIP_EOI_THREADED = (1 << 6),
541 IRQCHIP_SUPPORTS_LEVEL_MSI = (1 << 7),
542 IRQCHIP_SUPPORTS_NMI = (1 << 8),
543 };
544
545 #include <linux/irqdesc.h>
546
547
548
549
550 #include <asm/hw_irq.h>
551
552 #ifndef NR_IRQS_LEGACY
553 # define NR_IRQS_LEGACY 0
554 #endif
555
556 #ifndef ARCH_IRQ_INIT_FLAGS
557 # define ARCH_IRQ_INIT_FLAGS 0
558 #endif
559
560 #define IRQ_DEFAULT_INIT_FLAGS ARCH_IRQ_INIT_FLAGS
561
562 struct irqaction;
563 extern int setup_irq(unsigned int irq, struct irqaction *new);
564 extern void remove_irq(unsigned int irq, struct irqaction *act);
565 extern int setup_percpu_irq(unsigned int irq, struct irqaction *new);
566 extern void remove_percpu_irq(unsigned int irq, struct irqaction *act);
567
568 extern void irq_cpu_online(void);
569 extern void irq_cpu_offline(void);
570 extern int irq_set_affinity_locked(struct irq_data *data,
571 const struct cpumask *cpumask, bool force);
572 extern int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info);
573
574 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_IRQ_MIGRATION)
575 extern void irq_migrate_all_off_this_cpu(void);
576 extern int irq_affinity_online_cpu(unsigned int cpu);
577 #else
578 # define irq_affinity_online_cpu NULL
579 #endif
580
581 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
582 void __irq_move_irq(struct irq_data *data);
583 static inline void irq_move_irq(struct irq_data *data)
584 {
585 if (unlikely(irqd_is_setaffinity_pending(data)))
586 __irq_move_irq(data);
587 }
588 void irq_move_masked_irq(struct irq_data *data);
589 void irq_force_complete_move(struct irq_desc *desc);
590 #else
591 static inline void irq_move_irq(struct irq_data *data) { }
592 static inline void irq_move_masked_irq(struct irq_data *data) { }
593 static inline void irq_force_complete_move(struct irq_desc *desc) { }
594 #endif
595
596 extern int no_irq_affinity;
597
598 #ifdef CONFIG_HARDIRQS_SW_RESEND
599 int irq_set_parent(int irq, int parent_irq);
600 #else
601 static inline int irq_set_parent(int irq, int parent_irq)
602 {
603 return 0;
604 }
605 #endif
606
607
608
609
610
611 extern void handle_level_irq(struct irq_desc *desc);
612 extern void handle_fasteoi_irq(struct irq_desc *desc);
613 extern void handle_edge_irq(struct irq_desc *desc);
614 extern void handle_edge_eoi_irq(struct irq_desc *desc);
615 extern void handle_simple_irq(struct irq_desc *desc);
616 extern void handle_untracked_irq(struct irq_desc *desc);
617 extern void handle_percpu_irq(struct irq_desc *desc);
618 extern void handle_percpu_devid_irq(struct irq_desc *desc);
619 extern void handle_bad_irq(struct irq_desc *desc);
620 extern void handle_nested_irq(unsigned int irq);
621
622 extern void handle_fasteoi_nmi(struct irq_desc *desc);
623 extern void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc);
624
625 extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
626 extern int irq_chip_pm_get(struct irq_data *data);
627 extern int irq_chip_pm_put(struct irq_data *data);
628 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
629 extern void handle_fasteoi_ack_irq(struct irq_desc *desc);
630 extern void handle_fasteoi_mask_irq(struct irq_desc *desc);
631 extern void irq_chip_enable_parent(struct irq_data *data);
632 extern void irq_chip_disable_parent(struct irq_data *data);
633 extern void irq_chip_ack_parent(struct irq_data *data);
634 extern int irq_chip_retrigger_hierarchy(struct irq_data *data);
635 extern void irq_chip_mask_parent(struct irq_data *data);
636 extern void irq_chip_mask_ack_parent(struct irq_data *data);
637 extern void irq_chip_unmask_parent(struct irq_data *data);
638 extern void irq_chip_eoi_parent(struct irq_data *data);
639 extern int irq_chip_set_affinity_parent(struct irq_data *data,
640 const struct cpumask *dest,
641 bool force);
642 extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on);
643 extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data,
644 void *vcpu_info);
645 extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type);
646 extern int irq_chip_request_resources_parent(struct irq_data *data);
647 extern void irq_chip_release_resources_parent(struct irq_data *data);
648 #endif
649
650
651 extern void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret);
652
653
654
655 extern int noirqdebug_setup(char *str);
656
657
658 extern int can_request_irq(unsigned int irq, unsigned long irqflags);
659
660
661 extern struct irq_chip no_irq_chip;
662 extern struct irq_chip dummy_irq_chip;
663
664 extern void
665 irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
666 irq_flow_handler_t handle, const char *name);
667
668 static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *chip,
669 irq_flow_handler_t handle)
670 {
671 irq_set_chip_and_handler_name(irq, chip, handle, NULL);
672 }
673
674 extern int irq_set_percpu_devid(unsigned int irq);
675 extern int irq_set_percpu_devid_partition(unsigned int irq,
676 const struct cpumask *affinity);
677 extern int irq_get_percpu_devid_partition(unsigned int irq,
678 struct cpumask *affinity);
679
680 extern void
681 __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
682 const char *name);
683
684 static inline void
685 irq_set_handler(unsigned int irq, irq_flow_handler_t handle)
686 {
687 __irq_set_handler(irq, handle, 0, NULL);
688 }
689
690
691
692
693
694
695 static inline void
696 irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle)
697 {
698 __irq_set_handler(irq, handle, 1, NULL);
699 }
700
701
702
703
704
705
706 void
707 irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
708 void *data);
709
710 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set);
711
712 static inline void irq_set_status_flags(unsigned int irq, unsigned long set)
713 {
714 irq_modify_status(irq, 0, set);
715 }
716
717 static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr)
718 {
719 irq_modify_status(irq, clr, 0);
720 }
721
722 static inline void irq_set_noprobe(unsigned int irq)
723 {
724 irq_modify_status(irq, 0, IRQ_NOPROBE);
725 }
726
727 static inline void irq_set_probe(unsigned int irq)
728 {
729 irq_modify_status(irq, IRQ_NOPROBE, 0);
730 }
731
732 static inline void irq_set_nothread(unsigned int irq)
733 {
734 irq_modify_status(irq, 0, IRQ_NOTHREAD);
735 }
736
737 static inline void irq_set_thread(unsigned int irq)
738 {
739 irq_modify_status(irq, IRQ_NOTHREAD, 0);
740 }
741
742 static inline void irq_set_nested_thread(unsigned int irq, bool nest)
743 {
744 if (nest)
745 irq_set_status_flags(irq, IRQ_NESTED_THREAD);
746 else
747 irq_clear_status_flags(irq, IRQ_NESTED_THREAD);
748 }
749
750 static inline void irq_set_percpu_devid_flags(unsigned int irq)
751 {
752 irq_set_status_flags(irq,
753 IRQ_NOAUTOEN | IRQ_PER_CPU | IRQ_NOTHREAD |
754 IRQ_NOPROBE | IRQ_PER_CPU_DEVID);
755 }
756
757
758 extern int irq_set_chip(unsigned int irq, struct irq_chip *chip);
759 extern int irq_set_handler_data(unsigned int irq, void *data);
760 extern int irq_set_chip_data(unsigned int irq, void *data);
761 extern int irq_set_irq_type(unsigned int irq, unsigned int type);
762 extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry);
763 extern int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
764 struct msi_desc *entry);
765 extern struct irq_data *irq_get_irq_data(unsigned int irq);
766
767 static inline struct irq_chip *irq_get_chip(unsigned int irq)
768 {
769 struct irq_data *d = irq_get_irq_data(irq);
770 return d ? d->chip : NULL;
771 }
772
773 static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d)
774 {
775 return d->chip;
776 }
777
778 static inline void *irq_get_chip_data(unsigned int irq)
779 {
780 struct irq_data *d = irq_get_irq_data(irq);
781 return d ? d->chip_data : NULL;
782 }
783
784 static inline void *irq_data_get_irq_chip_data(struct irq_data *d)
785 {
786 return d->chip_data;
787 }
788
789 static inline void *irq_get_handler_data(unsigned int irq)
790 {
791 struct irq_data *d = irq_get_irq_data(irq);
792 return d ? d->common->handler_data : NULL;
793 }
794
795 static inline void *irq_data_get_irq_handler_data(struct irq_data *d)
796 {
797 return d->common->handler_data;
798 }
799
800 static inline struct msi_desc *irq_get_msi_desc(unsigned int irq)
801 {
802 struct irq_data *d = irq_get_irq_data(irq);
803 return d ? d->common->msi_desc : NULL;
804 }
805
806 static inline struct msi_desc *irq_data_get_msi_desc(struct irq_data *d)
807 {
808 return d->common->msi_desc;
809 }
810
811 static inline u32 irq_get_trigger_type(unsigned int irq)
812 {
813 struct irq_data *d = irq_get_irq_data(irq);
814 return d ? irqd_get_trigger_type(d) : 0;
815 }
816
817 static inline int irq_common_data_get_node(struct irq_common_data *d)
818 {
819 #ifdef CONFIG_NUMA
820 return d->node;
821 #else
822 return 0;
823 #endif
824 }
825
826 static inline int irq_data_get_node(struct irq_data *d)
827 {
828 return irq_common_data_get_node(d->common);
829 }
830
831 static inline struct cpumask *irq_get_affinity_mask(int irq)
832 {
833 struct irq_data *d = irq_get_irq_data(irq);
834
835 return d ? d->common->affinity : NULL;
836 }
837
838 static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
839 {
840 return d->common->affinity;
841 }
842
843 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
844 static inline
845 struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
846 {
847 return d->common->effective_affinity;
848 }
849 static inline void irq_data_update_effective_affinity(struct irq_data *d,
850 const struct cpumask *m)
851 {
852 cpumask_copy(d->common->effective_affinity, m);
853 }
854 #else
855 static inline void irq_data_update_effective_affinity(struct irq_data *d,
856 const struct cpumask *m)
857 {
858 }
859 static inline
860 struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
861 {
862 return d->common->affinity;
863 }
864 #endif
865
866 unsigned int arch_dynirq_lower_bound(unsigned int from);
867
868 int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
869 struct module *owner,
870 const struct irq_affinity_desc *affinity);
871
872 int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from,
873 unsigned int cnt, int node, struct module *owner,
874 const struct irq_affinity_desc *affinity);
875
876
877 #define irq_alloc_descs(irq, from, cnt, node) \
878 __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE, NULL)
879
880 #define irq_alloc_desc(node) \
881 irq_alloc_descs(-1, 0, 1, node)
882
883 #define irq_alloc_desc_at(at, node) \
884 irq_alloc_descs(at, at, 1, node)
885
886 #define irq_alloc_desc_from(from, node) \
887 irq_alloc_descs(-1, from, 1, node)
888
889 #define irq_alloc_descs_from(from, cnt, node) \
890 irq_alloc_descs(-1, from, cnt, node)
891
892 #define devm_irq_alloc_descs(dev, irq, from, cnt, node) \
893 __devm_irq_alloc_descs(dev, irq, from, cnt, node, THIS_MODULE, NULL)
894
895 #define devm_irq_alloc_desc(dev, node) \
896 devm_irq_alloc_descs(dev, -1, 0, 1, node)
897
898 #define devm_irq_alloc_desc_at(dev, at, node) \
899 devm_irq_alloc_descs(dev, at, at, 1, node)
900
901 #define devm_irq_alloc_desc_from(dev, from, node) \
902 devm_irq_alloc_descs(dev, -1, from, 1, node)
903
904 #define devm_irq_alloc_descs_from(dev, from, cnt, node) \
905 devm_irq_alloc_descs(dev, -1, from, cnt, node)
906
907 void irq_free_descs(unsigned int irq, unsigned int cnt);
908 static inline void irq_free_desc(unsigned int irq)
909 {
910 irq_free_descs(irq, 1);
911 }
912
913 #ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
914 unsigned int irq_alloc_hwirqs(int cnt, int node);
915 static inline unsigned int irq_alloc_hwirq(int node)
916 {
917 return irq_alloc_hwirqs(1, node);
918 }
919 void irq_free_hwirqs(unsigned int from, int cnt);
920 static inline void irq_free_hwirq(unsigned int irq)
921 {
922 return irq_free_hwirqs(irq, 1);
923 }
924 int arch_setup_hwirq(unsigned int irq, int node);
925 void arch_teardown_hwirq(unsigned int irq);
926 #endif
927
928 #ifdef CONFIG_GENERIC_IRQ_LEGACY
929 void irq_init_desc(unsigned int irq);
930 #endif
931
932
933
934
935
936
937
938
939
940
941
942 struct irq_chip_regs {
943 unsigned long enable;
944 unsigned long disable;
945 unsigned long mask;
946 unsigned long ack;
947 unsigned long eoi;
948 unsigned long type;
949 unsigned long polarity;
950 };
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965 struct irq_chip_type {
966 struct irq_chip chip;
967 struct irq_chip_regs regs;
968 irq_flow_handler_t handler;
969 u32 type;
970 u32 mask_cache_priv;
971 u32 *mask_cache;
972 };
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007 struct irq_chip_generic {
1008 raw_spinlock_t lock;
1009 void __iomem *reg_base;
1010 u32 (*reg_readl)(void __iomem *addr);
1011 void (*reg_writel)(u32 val, void __iomem *addr);
1012 void (*suspend)(struct irq_chip_generic *gc);
1013 void (*resume)(struct irq_chip_generic *gc);
1014 unsigned int irq_base;
1015 unsigned int irq_cnt;
1016 u32 mask_cache;
1017 u32 type_cache;
1018 u32 polarity_cache;
1019 u32 wake_enabled;
1020 u32 wake_active;
1021 unsigned int num_ct;
1022 void *private;
1023 unsigned long installed;
1024 unsigned long unused;
1025 struct irq_domain *domain;
1026 struct list_head list;
1027 struct irq_chip_type chip_types[0];
1028 };
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040 enum irq_gc_flags {
1041 IRQ_GC_INIT_MASK_CACHE = 1 << 0,
1042 IRQ_GC_INIT_NESTED_LOCK = 1 << 1,
1043 IRQ_GC_MASK_CACHE_PER_TYPE = 1 << 2,
1044 IRQ_GC_NO_MASK = 1 << 3,
1045 IRQ_GC_BE_IO = 1 << 4,
1046 };
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057 struct irq_domain_chip_generic {
1058 unsigned int irqs_per_chip;
1059 unsigned int num_chips;
1060 unsigned int irq_flags_to_clear;
1061 unsigned int irq_flags_to_set;
1062 enum irq_gc_flags gc_flags;
1063 struct irq_chip_generic *gc[0];
1064 };
1065
1066
1067 void irq_gc_noop(struct irq_data *d);
1068 void irq_gc_mask_disable_reg(struct irq_data *d);
1069 void irq_gc_mask_set_bit(struct irq_data *d);
1070 void irq_gc_mask_clr_bit(struct irq_data *d);
1071 void irq_gc_unmask_enable_reg(struct irq_data *d);
1072 void irq_gc_ack_set_bit(struct irq_data *d);
1073 void irq_gc_ack_clr_bit(struct irq_data *d);
1074 void irq_gc_mask_disable_and_ack_set(struct irq_data *d);
1075 void irq_gc_eoi(struct irq_data *d);
1076 int irq_gc_set_wake(struct irq_data *d, unsigned int on);
1077
1078
1079 int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
1080 irq_hw_number_t hw_irq);
1081 struct irq_chip_generic *
1082 irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base,
1083 void __iomem *reg_base, irq_flow_handler_t handler);
1084 void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
1085 enum irq_gc_flags flags, unsigned int clr,
1086 unsigned int set);
1087 int irq_setup_alt_chip(struct irq_data *d, unsigned int type);
1088 void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
1089 unsigned int clr, unsigned int set);
1090
1091 struct irq_chip_generic *
1092 devm_irq_alloc_generic_chip(struct device *dev, const char *name, int num_ct,
1093 unsigned int irq_base, void __iomem *reg_base,
1094 irq_flow_handler_t handler);
1095 int devm_irq_setup_generic_chip(struct device *dev, struct irq_chip_generic *gc,
1096 u32 msk, enum irq_gc_flags flags,
1097 unsigned int clr, unsigned int set);
1098
1099 struct irq_chip_generic *irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq);
1100
1101 int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
1102 int num_ct, const char *name,
1103 irq_flow_handler_t handler,
1104 unsigned int clr, unsigned int set,
1105 enum irq_gc_flags flags);
1106
1107 #define irq_alloc_domain_generic_chips(d, irqs_per_chip, num_ct, name, \
1108 handler, clr, set, flags) \
1109 ({ \
1110 MAYBE_BUILD_BUG_ON(irqs_per_chip > 32); \
1111 __irq_alloc_domain_generic_chips(d, irqs_per_chip, num_ct, name,\
1112 handler, clr, set, flags); \
1113 })
1114
1115 static inline void irq_free_generic_chip(struct irq_chip_generic *gc)
1116 {
1117 kfree(gc);
1118 }
1119
1120 static inline void irq_destroy_generic_chip(struct irq_chip_generic *gc,
1121 u32 msk, unsigned int clr,
1122 unsigned int set)
1123 {
1124 irq_remove_generic_chip(gc, msk, clr, set);
1125 irq_free_generic_chip(gc);
1126 }
1127
1128 static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d)
1129 {
1130 return container_of(d->chip, struct irq_chip_type, chip);
1131 }
1132
1133 #define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX)
1134
1135 #ifdef CONFIG_SMP
1136 static inline void irq_gc_lock(struct irq_chip_generic *gc)
1137 {
1138 raw_spin_lock(&gc->lock);
1139 }
1140
1141 static inline void irq_gc_unlock(struct irq_chip_generic *gc)
1142 {
1143 raw_spin_unlock(&gc->lock);
1144 }
1145 #else
1146 static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
1147 static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
1148 #endif
1149
1150
1151
1152
1153
1154 #define irq_gc_lock_irqsave(gc, flags) \
1155 raw_spin_lock_irqsave(&(gc)->lock, flags)
1156
1157 #define irq_gc_unlock_irqrestore(gc, flags) \
1158 raw_spin_unlock_irqrestore(&(gc)->lock, flags)
1159
1160 static inline void irq_reg_writel(struct irq_chip_generic *gc,
1161 u32 val, int reg_offset)
1162 {
1163 if (gc->reg_writel)
1164 gc->reg_writel(val, gc->reg_base + reg_offset);
1165 else
1166 writel(val, gc->reg_base + reg_offset);
1167 }
1168
1169 static inline u32 irq_reg_readl(struct irq_chip_generic *gc,
1170 int reg_offset)
1171 {
1172 if (gc->reg_readl)
1173 return gc->reg_readl(gc->reg_base + reg_offset);
1174 else
1175 return readl(gc->reg_base + reg_offset);
1176 }
1177
1178 struct irq_matrix;
1179 struct irq_matrix *irq_alloc_matrix(unsigned int matrix_bits,
1180 unsigned int alloc_start,
1181 unsigned int alloc_end);
1182 void irq_matrix_online(struct irq_matrix *m);
1183 void irq_matrix_offline(struct irq_matrix *m);
1184 void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace);
1185 int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk);
1186 void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk);
1187 int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
1188 unsigned int *mapped_cpu);
1189 void irq_matrix_reserve(struct irq_matrix *m);
1190 void irq_matrix_remove_reserved(struct irq_matrix *m);
1191 int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
1192 bool reserved, unsigned int *mapped_cpu);
1193 void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
1194 unsigned int bit, bool managed);
1195 void irq_matrix_assign(struct irq_matrix *m, unsigned int bit);
1196 unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown);
1197 unsigned int irq_matrix_allocated(struct irq_matrix *m);
1198 unsigned int irq_matrix_reserved(struct irq_matrix *m);
1199 void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind);
1200
1201
1202 #define INVALID_HWIRQ (~0UL)
1203 irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu);
1204 int __ipi_send_single(struct irq_desc *desc, unsigned int cpu);
1205 int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest);
1206 int ipi_send_single(unsigned int virq, unsigned int cpu);
1207 int ipi_send_mask(unsigned int virq, const struct cpumask *dest);
1208
1209 #ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
1210
1211
1212
1213
1214
1215
1216
1217
1218 int __init set_handle_irq(void (*handle_irq)(struct pt_regs *));
1219
1220
1221
1222
1223
1224 extern void (*handle_arch_irq)(struct pt_regs *) __ro_after_init;
1225 #endif
1226
1227 #endif