Lines Matching refs:cfg
49 struct irq_cfg *cfg; in alloc_irq_cfg() local
51 cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node); in alloc_irq_cfg()
52 if (!cfg) in alloc_irq_cfg()
54 if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node)) in alloc_irq_cfg()
56 if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node)) in alloc_irq_cfg()
59 INIT_LIST_HEAD(&cfg->irq_2_pin); in alloc_irq_cfg()
61 return cfg; in alloc_irq_cfg()
63 free_cpumask_var(cfg->domain); in alloc_irq_cfg()
65 kfree(cfg); in alloc_irq_cfg()
72 struct irq_cfg *cfg; in alloc_irq_and_cfg_at() local
77 cfg = irq_cfg(at); in alloc_irq_and_cfg_at()
78 if (cfg) in alloc_irq_and_cfg_at()
79 return cfg; in alloc_irq_and_cfg_at()
82 cfg = alloc_irq_cfg(at, node); in alloc_irq_and_cfg_at()
83 if (cfg) in alloc_irq_and_cfg_at()
84 irq_set_chip_data(at, cfg); in alloc_irq_and_cfg_at()
87 return cfg; in alloc_irq_and_cfg_at()
90 static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) in free_irq_cfg() argument
92 if (!cfg) in free_irq_cfg()
95 free_cpumask_var(cfg->domain); in free_irq_cfg()
96 free_cpumask_var(cfg->old_domain); in free_irq_cfg()
97 kfree(cfg); in free_irq_cfg()
101 __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) in __assign_irq_vector() argument
119 if (cfg->move_in_progress) in __assign_irq_vector()
127 cpumask_clear(cfg->old_domain); in __assign_irq_vector()
134 if (cpumask_subset(tmp_mask, cfg->domain)) { in __assign_irq_vector()
136 if (cpumask_equal(tmp_mask, cfg->domain)) in __assign_irq_vector()
143 cpumask_andnot(cfg->old_domain, cfg->domain, tmp_mask); in __assign_irq_vector()
144 cfg->move_in_progress = in __assign_irq_vector()
145 cpumask_intersects(cfg->old_domain, cpu_online_mask); in __assign_irq_vector()
146 cpumask_and(cfg->domain, cfg->domain, tmp_mask); in __assign_irq_vector()
160 cpumask_or(cfg->old_domain, cfg->old_domain, tmp_mask); in __assign_irq_vector()
161 cpumask_andnot(tmp_mask, mask, cfg->old_domain); in __assign_irq_vector()
177 if (cfg->vector) { in __assign_irq_vector()
178 cpumask_copy(cfg->old_domain, cfg->domain); in __assign_irq_vector()
179 cfg->move_in_progress = in __assign_irq_vector()
180 cpumask_intersects(cfg->old_domain, cpu_online_mask); in __assign_irq_vector()
184 cfg->vector = vector; in __assign_irq_vector()
185 cpumask_copy(cfg->domain, tmp_mask); in __assign_irq_vector()
194 int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) in assign_irq_vector() argument
200 err = __assign_irq_vector(irq, cfg, mask); in assign_irq_vector()
205 void clear_irq_vector(int irq, struct irq_cfg *cfg) in clear_irq_vector() argument
211 BUG_ON(!cfg->vector); in clear_irq_vector()
213 vector = cfg->vector; in clear_irq_vector()
214 for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) in clear_irq_vector()
217 cfg->vector = 0; in clear_irq_vector()
218 cpumask_clear(cfg->domain); in clear_irq_vector()
220 if (likely(!cfg->move_in_progress)) { in clear_irq_vector()
225 for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { in clear_irq_vector()
234 cfg->move_in_progress = 0; in clear_irq_vector()
270 struct irq_cfg *cfg; in __setup_vector_irq() local
280 cfg = irq_cfg(irq); in __setup_vector_irq()
281 if (!cfg) in __setup_vector_irq()
284 if (!cpumask_test_cpu(cpu, cfg->domain)) in __setup_vector_irq()
286 vector = cfg->vector; in __setup_vector_irq()
295 cfg = irq_cfg(irq); in __setup_vector_irq()
296 if (!cpumask_test_cpu(cpu, cfg->domain)) in __setup_vector_irq()
324 struct irq_cfg *cfg = irqd_cfg(data); in apic_retrigger_irq() local
329 cpu = cpumask_first_and(cfg->domain, cpu_online_mask); in apic_retrigger_irq()
330 apic->send_IPI_mask(cpumask_of(cpu), cfg->vector); in apic_retrigger_irq()
351 struct irq_cfg *cfg = irqd_cfg(data); in apic_set_affinity() local
361 err = assign_irq_vector(irq, cfg, mask); in apic_set_affinity()
365 err = apic->cpu_mask_to_apicid_and(mask, cfg->domain, dest_id); in apic_set_affinity()
367 if (assign_irq_vector(irq, cfg, data->affinity)) in apic_set_affinity()
378 void send_cleanup_vector(struct irq_cfg *cfg) in send_cleanup_vector() argument
385 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) in send_cleanup_vector()
389 cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask); in send_cleanup_vector()
393 cfg->move_in_progress = 0; in send_cleanup_vector()
409 struct irq_cfg *cfg; in smp_irq_move_cleanup_interrupt() local
420 cfg = irq_cfg(irq); in smp_irq_move_cleanup_interrupt()
421 if (!cfg) in smp_irq_move_cleanup_interrupt()
430 if (cfg->move_in_progress) in smp_irq_move_cleanup_interrupt()
433 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) in smp_irq_move_cleanup_interrupt()
456 static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector) in __irq_complete_move() argument
460 if (likely(!cfg->move_in_progress)) in __irq_complete_move()
465 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) in __irq_complete_move()
466 send_cleanup_vector(cfg); in __irq_complete_move()
469 void irq_complete_move(struct irq_cfg *cfg) in irq_complete_move() argument
471 __irq_complete_move(cfg, ~get_irq_regs()->orig_ax); in irq_complete_move()
476 struct irq_cfg *cfg = irq_cfg(irq); in irq_force_complete_move() local
478 if (!cfg) in irq_force_complete_move()
481 __irq_complete_move(cfg, cfg->vector); in irq_force_complete_move()
490 struct irq_cfg *cfg; in arch_setup_hwirq() local
494 cfg = alloc_irq_cfg(irq, node); in arch_setup_hwirq()
495 if (!cfg) in arch_setup_hwirq()
499 ret = __assign_irq_vector(irq, cfg, apic->target_cpus()); in arch_setup_hwirq()
503 irq_set_chip_data(irq, cfg); in arch_setup_hwirq()
505 free_irq_cfg(irq, cfg); in arch_setup_hwirq()
511 struct irq_cfg *cfg = irq_cfg(irq); in arch_teardown_hwirq() local
514 clear_irq_vector(irq, cfg); in arch_teardown_hwirq()
515 free_irq_cfg(irq, cfg); in arch_teardown_hwirq()