This source file includes following definitions.
- blk_flush_policy
- blk_flush_cur_seq
- blk_flush_restore_request
- blk_flush_queue_rq
- blk_flush_complete_seq
- flush_end_io
- blk_kick_flush
- mq_flush_data_end_io
- blk_insert_flush
- blkdev_issue_flush
- blk_alloc_flush_queue
- blk_free_flush_queue
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66 #include <linux/kernel.h>
67 #include <linux/module.h>
68 #include <linux/bio.h>
69 #include <linux/blkdev.h>
70 #include <linux/gfp.h>
71 #include <linux/blk-mq.h>
72 #include <linux/lockdep.h>
73
74 #include "blk.h"
75 #include "blk-mq.h"
76 #include "blk-mq-tag.h"
77 #include "blk-mq-sched.h"
78
79
80 enum {
81 REQ_FSEQ_PREFLUSH = (1 << 0),
82 REQ_FSEQ_DATA = (1 << 1),
83 REQ_FSEQ_POSTFLUSH = (1 << 2),
84 REQ_FSEQ_DONE = (1 << 3),
85
86 REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
87 REQ_FSEQ_POSTFLUSH,
88
89
90
91
92
93 FLUSH_PENDING_TIMEOUT = 5 * HZ,
94 };
95
96 static void blk_kick_flush(struct request_queue *q,
97 struct blk_flush_queue *fq, unsigned int flags);
98
99 static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
100 {
101 unsigned int policy = 0;
102
103 if (blk_rq_sectors(rq))
104 policy |= REQ_FSEQ_DATA;
105
106 if (fflags & (1UL << QUEUE_FLAG_WC)) {
107 if (rq->cmd_flags & REQ_PREFLUSH)
108 policy |= REQ_FSEQ_PREFLUSH;
109 if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
110 (rq->cmd_flags & REQ_FUA))
111 policy |= REQ_FSEQ_POSTFLUSH;
112 }
113 return policy;
114 }
115
116 static unsigned int blk_flush_cur_seq(struct request *rq)
117 {
118 return 1 << ffz(rq->flush.seq);
119 }
120
121 static void blk_flush_restore_request(struct request *rq)
122 {
123
124
125
126
127
128 rq->bio = rq->biotail;
129
130
131 rq->rq_flags &= ~RQF_FLUSH_SEQ;
132 rq->end_io = rq->flush.saved_end_io;
133 }
134
135 static void blk_flush_queue_rq(struct request *rq, bool add_front)
136 {
137 blk_mq_add_to_requeue_list(rq, add_front, true);
138 }
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156 static void blk_flush_complete_seq(struct request *rq,
157 struct blk_flush_queue *fq,
158 unsigned int seq, blk_status_t error)
159 {
160 struct request_queue *q = rq->q;
161 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
162 unsigned int cmd_flags;
163
164 BUG_ON(rq->flush.seq & seq);
165 rq->flush.seq |= seq;
166 cmd_flags = rq->cmd_flags;
167
168 if (likely(!error))
169 seq = blk_flush_cur_seq(rq);
170 else
171 seq = REQ_FSEQ_DONE;
172
173 switch (seq) {
174 case REQ_FSEQ_PREFLUSH:
175 case REQ_FSEQ_POSTFLUSH:
176
177 if (list_empty(pending))
178 fq->flush_pending_since = jiffies;
179 list_move_tail(&rq->flush.list, pending);
180 break;
181
182 case REQ_FSEQ_DATA:
183 list_move_tail(&rq->flush.list, &fq->flush_data_in_flight);
184 blk_flush_queue_rq(rq, true);
185 break;
186
187 case REQ_FSEQ_DONE:
188
189
190
191
192
193
194 BUG_ON(!list_empty(&rq->queuelist));
195 list_del_init(&rq->flush.list);
196 blk_flush_restore_request(rq);
197 blk_mq_end_request(rq, error);
198 break;
199
200 default:
201 BUG();
202 }
203
204 blk_kick_flush(q, fq, cmd_flags);
205 }
206
207 static void flush_end_io(struct request *flush_rq, blk_status_t error)
208 {
209 struct request_queue *q = flush_rq->q;
210 struct list_head *running;
211 struct request *rq, *n;
212 unsigned long flags = 0;
213 struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
214 struct blk_mq_hw_ctx *hctx;
215
216
217 spin_lock_irqsave(&fq->mq_flush_lock, flags);
218
219 if (!refcount_dec_and_test(&flush_rq->ref)) {
220 fq->rq_status = error;
221 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
222 return;
223 }
224
225 if (fq->rq_status != BLK_STS_OK)
226 error = fq->rq_status;
227
228 hctx = flush_rq->mq_hctx;
229 if (!q->elevator) {
230 blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
231 flush_rq->tag = -1;
232 } else {
233 blk_mq_put_driver_tag(flush_rq);
234 flush_rq->internal_tag = -1;
235 }
236
237 running = &fq->flush_queue[fq->flush_running_idx];
238 BUG_ON(fq->flush_pending_idx == fq->flush_running_idx);
239
240
241 fq->flush_running_idx ^= 1;
242
243
244 list_for_each_entry_safe(rq, n, running, flush.list) {
245 unsigned int seq = blk_flush_cur_seq(rq);
246
247 BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
248 blk_flush_complete_seq(rq, fq, seq, error);
249 }
250
251 fq->flush_queue_delayed = 0;
252 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
253 }
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268 static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
269 unsigned int flags)
270 {
271 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
272 struct request *first_rq =
273 list_first_entry(pending, struct request, flush.list);
274 struct request *flush_rq = fq->flush_rq;
275
276
277 if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
278 return;
279
280
281
282
283
284
285
286 if (!list_empty(&fq->flush_data_in_flight) && q->elevator &&
287 time_before(jiffies,
288 fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
289 return;
290
291
292
293
294
295 fq->flush_pending_idx ^= 1;
296
297 blk_rq_init(q, flush_rq);
298
299
300
301
302
303
304
305
306
307 flush_rq->mq_ctx = first_rq->mq_ctx;
308 flush_rq->mq_hctx = first_rq->mq_hctx;
309
310 if (!q->elevator) {
311 fq->orig_rq = first_rq;
312 flush_rq->tag = first_rq->tag;
313 blk_mq_tag_set_rq(flush_rq->mq_hctx, first_rq->tag, flush_rq);
314 } else {
315 flush_rq->internal_tag = first_rq->internal_tag;
316 }
317
318 flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
319 flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK);
320 flush_rq->rq_flags |= RQF_FLUSH_SEQ;
321 flush_rq->rq_disk = first_rq->rq_disk;
322 flush_rq->end_io = flush_end_io;
323
324 blk_flush_queue_rq(flush_rq, false);
325 }
326
327 static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
328 {
329 struct request_queue *q = rq->q;
330 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
331 struct blk_mq_ctx *ctx = rq->mq_ctx;
332 unsigned long flags;
333 struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
334
335 if (q->elevator) {
336 WARN_ON(rq->tag < 0);
337 blk_mq_put_driver_tag(rq);
338 }
339
340
341
342
343
344 spin_lock_irqsave(&fq->mq_flush_lock, flags);
345 blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
346 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
347
348 blk_mq_sched_restart(hctx);
349 }
350
351
352
353
354
355
356
357
358
359
360 void blk_insert_flush(struct request *rq)
361 {
362 struct request_queue *q = rq->q;
363 unsigned long fflags = q->queue_flags;
364 unsigned int policy = blk_flush_policy(fflags, rq);
365 struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
366
367
368
369
370
371 rq->cmd_flags &= ~REQ_PREFLUSH;
372 if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
373 rq->cmd_flags &= ~REQ_FUA;
374
375
376
377
378
379
380 rq->cmd_flags |= REQ_SYNC;
381
382
383
384
385
386
387
388 if (!policy) {
389 blk_mq_end_request(rq, 0);
390 return;
391 }
392
393 BUG_ON(rq->bio != rq->biotail);
394
395
396
397
398
399
400 if ((policy & REQ_FSEQ_DATA) &&
401 !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
402 blk_mq_request_bypass_insert(rq, false, false);
403 return;
404 }
405
406
407
408
409
410 memset(&rq->flush, 0, sizeof(rq->flush));
411 INIT_LIST_HEAD(&rq->flush.list);
412 rq->rq_flags |= RQF_FLUSH_SEQ;
413 rq->flush.saved_end_io = rq->end_io;
414
415 rq->end_io = mq_flush_data_end_io;
416
417 spin_lock_irq(&fq->mq_flush_lock);
418 blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
419 spin_unlock_irq(&fq->mq_flush_lock);
420 }
421
422
423
424
425
426
427
428
429
430
431
432
433 int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
434 sector_t *error_sector)
435 {
436 struct request_queue *q;
437 struct bio *bio;
438 int ret = 0;
439
440 if (bdev->bd_disk == NULL)
441 return -ENXIO;
442
443 q = bdev_get_queue(bdev);
444 if (!q)
445 return -ENXIO;
446
447
448
449
450
451
452
453 if (!q->make_request_fn)
454 return -ENXIO;
455
456 bio = bio_alloc(gfp_mask, 0);
457 bio_set_dev(bio, bdev);
458 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
459
460 ret = submit_bio_wait(bio);
461
462
463
464
465
466
467 if (error_sector)
468 *error_sector = bio->bi_iter.bi_sector;
469
470 bio_put(bio);
471 return ret;
472 }
473 EXPORT_SYMBOL(blkdev_issue_flush);
474
475 struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
476 int node, int cmd_size, gfp_t flags)
477 {
478 struct blk_flush_queue *fq;
479 int rq_sz = sizeof(struct request);
480
481 fq = kzalloc_node(sizeof(*fq), flags, node);
482 if (!fq)
483 goto fail;
484
485 spin_lock_init(&fq->mq_flush_lock);
486
487 rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
488 fq->flush_rq = kzalloc_node(rq_sz, flags, node);
489 if (!fq->flush_rq)
490 goto fail_rq;
491
492 INIT_LIST_HEAD(&fq->flush_queue[0]);
493 INIT_LIST_HEAD(&fq->flush_queue[1]);
494 INIT_LIST_HEAD(&fq->flush_data_in_flight);
495
496 lockdep_register_key(&fq->key);
497 lockdep_set_class(&fq->mq_flush_lock, &fq->key);
498
499 return fq;
500
501 fail_rq:
502 kfree(fq);
503 fail:
504 return NULL;
505 }
506
507 void blk_free_flush_queue(struct blk_flush_queue *fq)
508 {
509
510 if (!fq)
511 return;
512
513 lockdep_unregister_key(&fq->key);
514 kfree(fq->flush_rq);
515 kfree(fq);
516 }