root/block/blk-softirq.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. blk_done_softirq
  2. trigger_softirq
  3. raise_blk_irq
  4. raise_blk_irq
  5. blk_softirq_cpu_dead
  6. __blk_complete_request
  7. blk_softirq_init

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Functions related to softirq rq completions
   4  */
   5 #include <linux/kernel.h>
   6 #include <linux/module.h>
   7 #include <linux/init.h>
   8 #include <linux/bio.h>
   9 #include <linux/blkdev.h>
  10 #include <linux/interrupt.h>
  11 #include <linux/cpu.h>
  12 #include <linux/sched.h>
  13 #include <linux/sched/topology.h>
  14 
  15 #include "blk.h"
  16 
  17 static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
  18 
  19 /*
  20  * Softirq action handler - move entries to local list and loop over them
  21  * while passing them to the queue registered handler.
  22  */
  23 static __latent_entropy void blk_done_softirq(struct softirq_action *h)
  24 {
  25         struct list_head *cpu_list, local_list;
  26 
  27         local_irq_disable();
  28         cpu_list = this_cpu_ptr(&blk_cpu_done);
  29         list_replace_init(cpu_list, &local_list);
  30         local_irq_enable();
  31 
  32         while (!list_empty(&local_list)) {
  33                 struct request *rq;
  34 
  35                 rq = list_entry(local_list.next, struct request, ipi_list);
  36                 list_del_init(&rq->ipi_list);
  37                 rq->q->mq_ops->complete(rq);
  38         }
  39 }
  40 
  41 #ifdef CONFIG_SMP
  42 static void trigger_softirq(void *data)
  43 {
  44         struct request *rq = data;
  45         unsigned long flags;
  46         struct list_head *list;
  47 
  48         local_irq_save(flags);
  49         list = this_cpu_ptr(&blk_cpu_done);
  50         list_add_tail(&rq->ipi_list, list);
  51 
  52         if (list->next == &rq->ipi_list)
  53                 raise_softirq_irqoff(BLOCK_SOFTIRQ);
  54 
  55         local_irq_restore(flags);
  56 }
  57 
  58 /*
  59  * Setup and invoke a run of 'trigger_softirq' on the given cpu.
  60  */
  61 static int raise_blk_irq(int cpu, struct request *rq)
  62 {
  63         if (cpu_online(cpu)) {
  64                 call_single_data_t *data = &rq->csd;
  65 
  66                 data->func = trigger_softirq;
  67                 data->info = rq;
  68                 data->flags = 0;
  69 
  70                 smp_call_function_single_async(cpu, data);
  71                 return 0;
  72         }
  73 
  74         return 1;
  75 }
  76 #else /* CONFIG_SMP */
  77 static int raise_blk_irq(int cpu, struct request *rq)
  78 {
  79         return 1;
  80 }
  81 #endif
  82 
  83 static int blk_softirq_cpu_dead(unsigned int cpu)
  84 {
  85         /*
  86          * If a CPU goes away, splice its entries to the current CPU
  87          * and trigger a run of the softirq
  88          */
  89         local_irq_disable();
  90         list_splice_init(&per_cpu(blk_cpu_done, cpu),
  91                          this_cpu_ptr(&blk_cpu_done));
  92         raise_softirq_irqoff(BLOCK_SOFTIRQ);
  93         local_irq_enable();
  94 
  95         return 0;
  96 }
  97 
  98 void __blk_complete_request(struct request *req)
  99 {
 100         struct request_queue *q = req->q;
 101         int cpu, ccpu = req->mq_ctx->cpu;
 102         unsigned long flags;
 103         bool shared = false;
 104 
 105         BUG_ON(!q->mq_ops->complete);
 106 
 107         local_irq_save(flags);
 108         cpu = smp_processor_id();
 109 
 110         /*
 111          * Select completion CPU
 112          */
 113         if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) && ccpu != -1) {
 114                 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags))
 115                         shared = cpus_share_cache(cpu, ccpu);
 116         } else
 117                 ccpu = cpu;
 118 
 119         /*
 120          * If current CPU and requested CPU share a cache, run the softirq on
 121          * the current CPU. One might concern this is just like
 122          * QUEUE_FLAG_SAME_FORCE, but actually not. blk_complete_request() is
 123          * running in interrupt handler, and currently I/O controller doesn't
 124          * support multiple interrupts, so current CPU is unique actually. This
 125          * avoids IPI sending from current CPU to the first CPU of a group.
 126          */
 127         if (ccpu == cpu || shared) {
 128                 struct list_head *list;
 129 do_local:
 130                 list = this_cpu_ptr(&blk_cpu_done);
 131                 list_add_tail(&req->ipi_list, list);
 132 
 133                 /*
 134                  * if the list only contains our just added request,
 135                  * signal a raise of the softirq. If there are already
 136                  * entries there, someone already raised the irq but it
 137                  * hasn't run yet.
 138                  */
 139                 if (list->next == &req->ipi_list)
 140                         raise_softirq_irqoff(BLOCK_SOFTIRQ);
 141         } else if (raise_blk_irq(ccpu, req))
 142                 goto do_local;
 143 
 144         local_irq_restore(flags);
 145 }
 146 
 147 static __init int blk_softirq_init(void)
 148 {
 149         int i;
 150 
 151         for_each_possible_cpu(i)
 152                 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
 153 
 154         open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
 155         cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,
 156                                   "block/softirq:dead", NULL,
 157                                   blk_softirq_cpu_dead);
 158         return 0;
 159 }
 160 subsys_initcall(blk_softirq_init);

/* [<][>][^][v][top][bottom][index][help] */