root/kernel/irq_work.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. irq_work_claim
  2. arch_irq_work_raise
  3. __irq_work_queue_local
  4. irq_work_queue
  5. irq_work_queue_on
  6. irq_work_needs_cpu
  7. irq_work_run_list
  8. irq_work_run
  9. irq_work_tick
  10. irq_work_sync

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
   4  *
   5  * Provides a framework for enqueueing and running callbacks from hardirq
   6  * context. The enqueueing is NMI-safe.
   7  */
   8 
   9 #include <linux/bug.h>
  10 #include <linux/kernel.h>
  11 #include <linux/export.h>
  12 #include <linux/irq_work.h>
  13 #include <linux/percpu.h>
  14 #include <linux/hardirq.h>
  15 #include <linux/irqflags.h>
  16 #include <linux/sched.h>
  17 #include <linux/tick.h>
  18 #include <linux/cpu.h>
  19 #include <linux/notifier.h>
  20 #include <linux/smp.h>
  21 #include <asm/processor.h>
  22 
  23 
  24 static DEFINE_PER_CPU(struct llist_head, raised_list);
  25 static DEFINE_PER_CPU(struct llist_head, lazy_list);
  26 
  27 /*
  28  * Claim the entry so that no one else will poke at it.
  29  */
  30 static bool irq_work_claim(struct irq_work *work)
  31 {
  32         unsigned long flags, oflags, nflags;
  33 
  34         /*
  35          * Start with our best wish as a premise but only trust any
  36          * flag value after cmpxchg() result.
  37          */
  38         flags = work->flags & ~IRQ_WORK_PENDING;
  39         for (;;) {
  40                 nflags = flags | IRQ_WORK_CLAIMED;
  41                 oflags = cmpxchg(&work->flags, flags, nflags);
  42                 if (oflags == flags)
  43                         break;
  44                 if (oflags & IRQ_WORK_PENDING)
  45                         return false;
  46                 flags = oflags;
  47                 cpu_relax();
  48         }
  49 
  50         return true;
  51 }
  52 
  53 void __weak arch_irq_work_raise(void)
  54 {
  55         /*
  56          * Lame architectures will get the timer tick callback
  57          */
  58 }
  59 
  60 /* Enqueue on current CPU, work must already be claimed and preempt disabled */
  61 static void __irq_work_queue_local(struct irq_work *work)
  62 {
  63         /* If the work is "lazy", handle it from next tick if any */
  64         if (work->flags & IRQ_WORK_LAZY) {
  65                 if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
  66                     tick_nohz_tick_stopped())
  67                         arch_irq_work_raise();
  68         } else {
  69                 if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
  70                         arch_irq_work_raise();
  71         }
  72 }
  73 
  74 /* Enqueue the irq work @work on the current CPU */
  75 bool irq_work_queue(struct irq_work *work)
  76 {
  77         /* Only queue if not already pending */
  78         if (!irq_work_claim(work))
  79                 return false;
  80 
  81         /* Queue the entry and raise the IPI if needed. */
  82         preempt_disable();
  83         __irq_work_queue_local(work);
  84         preempt_enable();
  85 
  86         return true;
  87 }
  88 EXPORT_SYMBOL_GPL(irq_work_queue);
  89 
  90 /*
  91  * Enqueue the irq_work @work on @cpu unless it's already pending
  92  * somewhere.
  93  *
  94  * Can be re-enqueued while the callback is still in progress.
  95  */
  96 bool irq_work_queue_on(struct irq_work *work, int cpu)
  97 {
  98 #ifndef CONFIG_SMP
  99         return irq_work_queue(work);
 100 
 101 #else /* CONFIG_SMP: */
 102         /* All work should have been flushed before going offline */
 103         WARN_ON_ONCE(cpu_is_offline(cpu));
 104 
 105         /* Only queue if not already pending */
 106         if (!irq_work_claim(work))
 107                 return false;
 108 
 109         preempt_disable();
 110         if (cpu != smp_processor_id()) {
 111                 /* Arch remote IPI send/receive backend aren't NMI safe */
 112                 WARN_ON_ONCE(in_nmi());
 113                 if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
 114                         arch_send_call_function_single_ipi(cpu);
 115         } else {
 116                 __irq_work_queue_local(work);
 117         }
 118         preempt_enable();
 119 
 120         return true;
 121 #endif /* CONFIG_SMP */
 122 }
 123 
 124 
 125 bool irq_work_needs_cpu(void)
 126 {
 127         struct llist_head *raised, *lazy;
 128 
 129         raised = this_cpu_ptr(&raised_list);
 130         lazy = this_cpu_ptr(&lazy_list);
 131 
 132         if (llist_empty(raised) || arch_irq_work_has_interrupt())
 133                 if (llist_empty(lazy))
 134                         return false;
 135 
 136         /* All work should have been flushed before going offline */
 137         WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
 138 
 139         return true;
 140 }
 141 
 142 static void irq_work_run_list(struct llist_head *list)
 143 {
 144         struct irq_work *work, *tmp;
 145         struct llist_node *llnode;
 146         unsigned long flags;
 147 
 148         BUG_ON(!irqs_disabled());
 149 
 150         if (llist_empty(list))
 151                 return;
 152 
 153         llnode = llist_del_all(list);
 154         llist_for_each_entry_safe(work, tmp, llnode, llnode) {
 155                 /*
 156                  * Clear the PENDING bit, after this point the @work
 157                  * can be re-used.
 158                  * Make it immediately visible so that other CPUs trying
 159                  * to claim that work don't rely on us to handle their data
 160                  * while we are in the middle of the func.
 161                  */
 162                 flags = work->flags & ~IRQ_WORK_PENDING;
 163                 xchg(&work->flags, flags);
 164 
 165                 work->func(work);
 166                 /*
 167                  * Clear the BUSY bit and return to the free state if
 168                  * no-one else claimed it meanwhile.
 169                  */
 170                 (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
 171         }
 172 }
 173 
 174 /*
 175  * hotplug calls this through:
 176  *  hotplug_cfd() -> flush_smp_call_function_queue()
 177  */
 178 void irq_work_run(void)
 179 {
 180         irq_work_run_list(this_cpu_ptr(&raised_list));
 181         irq_work_run_list(this_cpu_ptr(&lazy_list));
 182 }
 183 EXPORT_SYMBOL_GPL(irq_work_run);
 184 
 185 void irq_work_tick(void)
 186 {
 187         struct llist_head *raised = this_cpu_ptr(&raised_list);
 188 
 189         if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
 190                 irq_work_run_list(raised);
 191         irq_work_run_list(this_cpu_ptr(&lazy_list));
 192 }
 193 
 194 /*
 195  * Synchronize against the irq_work @entry, ensures the entry is not
 196  * currently in use.
 197  */
 198 void irq_work_sync(struct irq_work *work)
 199 {
 200         lockdep_assert_irqs_enabled();
 201 
 202         while (work->flags & IRQ_WORK_BUSY)
 203                 cpu_relax();
 204 }
 205 EXPORT_SYMBOL_GPL(irq_work_sync);

/* [<][>][^][v][top][bottom][index][help] */