This source file includes following definitions.
- mmc_cqe_dcmd_busy
- mmc_cqe_check_busy
- mmc_cqe_can_dcmd
- mmc_cqe_issue_type
- __mmc_cqe_recovery_notifier
- mmc_cqe_recovery_notifier
- mmc_cqe_timed_out
- mmc_mq_timed_out
- mmc_mq_recovery_handler
- mmc_alloc_sg
- mmc_queue_setup_discard
- mmc_get_max_segments
- __mmc_init_request
- mmc_exit_request
- mmc_mq_init_request
- mmc_mq_exit_request
- mmc_mq_queue_rq
- mmc_setup_queue
- mmc_merge_capable
- mmc_init_queue
- mmc_queue_suspend
- mmc_queue_resume
- mmc_cleanup_queue
- mmc_queue_map_sg
1
2
3
4
5
6 #include <linux/slab.h>
7 #include <linux/module.h>
8 #include <linux/blkdev.h>
9 #include <linux/freezer.h>
10 #include <linux/kthread.h>
11 #include <linux/scatterlist.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/backing-dev.h>
14
15 #include <linux/mmc/card.h>
16 #include <linux/mmc/host.h>
17
18 #include "queue.h"
19 #include "block.h"
20 #include "core.h"
21 #include "card.h"
22 #include "host.h"
23
24 #define MMC_DMA_MAP_MERGE_SEGMENTS 512
25
26 static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq)
27 {
28
29 return mq->in_flight[MMC_ISSUE_DCMD];
30 }
31
32 void mmc_cqe_check_busy(struct mmc_queue *mq)
33 {
34 if ((mq->cqe_busy & MMC_CQE_DCMD_BUSY) && !mmc_cqe_dcmd_busy(mq))
35 mq->cqe_busy &= ~MMC_CQE_DCMD_BUSY;
36
37 mq->cqe_busy &= ~MMC_CQE_QUEUE_FULL;
38 }
39
40 static inline bool mmc_cqe_can_dcmd(struct mmc_host *host)
41 {
42 return host->caps2 & MMC_CAP2_CQE_DCMD;
43 }
44
45 static enum mmc_issue_type mmc_cqe_issue_type(struct mmc_host *host,
46 struct request *req)
47 {
48 switch (req_op(req)) {
49 case REQ_OP_DRV_IN:
50 case REQ_OP_DRV_OUT:
51 case REQ_OP_DISCARD:
52 case REQ_OP_SECURE_ERASE:
53 return MMC_ISSUE_SYNC;
54 case REQ_OP_FLUSH:
55 return mmc_cqe_can_dcmd(host) ? MMC_ISSUE_DCMD : MMC_ISSUE_SYNC;
56 default:
57 return MMC_ISSUE_ASYNC;
58 }
59 }
60
61 enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req)
62 {
63 struct mmc_host *host = mq->card->host;
64
65 if (mq->use_cqe)
66 return mmc_cqe_issue_type(host, req);
67
68 if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE)
69 return MMC_ISSUE_ASYNC;
70
71 return MMC_ISSUE_SYNC;
72 }
73
74 static void __mmc_cqe_recovery_notifier(struct mmc_queue *mq)
75 {
76 if (!mq->recovery_needed) {
77 mq->recovery_needed = true;
78 schedule_work(&mq->recovery_work);
79 }
80 }
81
82 void mmc_cqe_recovery_notifier(struct mmc_request *mrq)
83 {
84 struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
85 brq.mrq);
86 struct request *req = mmc_queue_req_to_req(mqrq);
87 struct request_queue *q = req->q;
88 struct mmc_queue *mq = q->queuedata;
89 unsigned long flags;
90
91 spin_lock_irqsave(&mq->lock, flags);
92 __mmc_cqe_recovery_notifier(mq);
93 spin_unlock_irqrestore(&mq->lock, flags);
94 }
95
96 static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
97 {
98 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
99 struct mmc_request *mrq = &mqrq->brq.mrq;
100 struct mmc_queue *mq = req->q->queuedata;
101 struct mmc_host *host = mq->card->host;
102 enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
103 bool recovery_needed = false;
104
105 switch (issue_type) {
106 case MMC_ISSUE_ASYNC:
107 case MMC_ISSUE_DCMD:
108 if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) {
109 if (recovery_needed)
110 mmc_cqe_recovery_notifier(mrq);
111 return BLK_EH_RESET_TIMER;
112 }
113
114 return BLK_EH_DONE;
115 default:
116
117 return BLK_EH_RESET_TIMER;
118 }
119 }
120
121 static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req,
122 bool reserved)
123 {
124 struct request_queue *q = req->q;
125 struct mmc_queue *mq = q->queuedata;
126 unsigned long flags;
127 bool ignore_tout;
128
129 spin_lock_irqsave(&mq->lock, flags);
130 ignore_tout = mq->recovery_needed || !mq->use_cqe;
131 spin_unlock_irqrestore(&mq->lock, flags);
132
133 return ignore_tout ? BLK_EH_RESET_TIMER : mmc_cqe_timed_out(req);
134 }
135
136 static void mmc_mq_recovery_handler(struct work_struct *work)
137 {
138 struct mmc_queue *mq = container_of(work, struct mmc_queue,
139 recovery_work);
140 struct request_queue *q = mq->queue;
141
142 mmc_get_card(mq->card, &mq->ctx);
143
144 mq->in_recovery = true;
145
146 if (mq->use_cqe)
147 mmc_blk_cqe_recovery(mq);
148 else
149 mmc_blk_mq_recovery(mq);
150
151 mq->in_recovery = false;
152
153 spin_lock_irq(&mq->lock);
154 mq->recovery_needed = false;
155 spin_unlock_irq(&mq->lock);
156
157 mmc_put_card(mq->card, &mq->ctx);
158
159 blk_mq_run_hw_queues(q, true);
160 }
161
162 static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp)
163 {
164 struct scatterlist *sg;
165
166 sg = kmalloc_array(sg_len, sizeof(*sg), gfp);
167 if (sg)
168 sg_init_table(sg, sg_len);
169
170 return sg;
171 }
172
173 static void mmc_queue_setup_discard(struct request_queue *q,
174 struct mmc_card *card)
175 {
176 unsigned max_discard;
177
178 max_discard = mmc_calc_max_discard(card);
179 if (!max_discard)
180 return;
181
182 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
183 blk_queue_max_discard_sectors(q, max_discard);
184 q->limits.discard_granularity = card->pref_erase << 9;
185
186 if (card->pref_erase > max_discard)
187 q->limits.discard_granularity = 0;
188 if (mmc_can_secure_erase_trim(card))
189 blk_queue_flag_set(QUEUE_FLAG_SECERASE, q);
190 }
191
192 static unsigned int mmc_get_max_segments(struct mmc_host *host)
193 {
194 return host->can_dma_map_merge ? MMC_DMA_MAP_MERGE_SEGMENTS :
195 host->max_segs;
196 }
197
198
199
200
201
202
203
204 static int __mmc_init_request(struct mmc_queue *mq, struct request *req,
205 gfp_t gfp)
206 {
207 struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
208 struct mmc_card *card = mq->card;
209 struct mmc_host *host = card->host;
210
211 mq_rq->sg = mmc_alloc_sg(mmc_get_max_segments(host), gfp);
212 if (!mq_rq->sg)
213 return -ENOMEM;
214
215 return 0;
216 }
217
218 static void mmc_exit_request(struct request_queue *q, struct request *req)
219 {
220 struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
221
222 kfree(mq_rq->sg);
223 mq_rq->sg = NULL;
224 }
225
226 static int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req,
227 unsigned int hctx_idx, unsigned int numa_node)
228 {
229 return __mmc_init_request(set->driver_data, req, GFP_KERNEL);
230 }
231
232 static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req,
233 unsigned int hctx_idx)
234 {
235 struct mmc_queue *mq = set->driver_data;
236
237 mmc_exit_request(mq->queue, req);
238 }
239
240 static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
241 const struct blk_mq_queue_data *bd)
242 {
243 struct request *req = bd->rq;
244 struct request_queue *q = req->q;
245 struct mmc_queue *mq = q->queuedata;
246 struct mmc_card *card = mq->card;
247 struct mmc_host *host = card->host;
248 enum mmc_issue_type issue_type;
249 enum mmc_issued issued;
250 bool get_card, cqe_retune_ok;
251 int ret;
252
253 if (mmc_card_removed(mq->card)) {
254 req->rq_flags |= RQF_QUIET;
255 return BLK_STS_IOERR;
256 }
257
258 issue_type = mmc_issue_type(mq, req);
259
260 spin_lock_irq(&mq->lock);
261
262 if (mq->recovery_needed || mq->busy) {
263 spin_unlock_irq(&mq->lock);
264 return BLK_STS_RESOURCE;
265 }
266
267 switch (issue_type) {
268 case MMC_ISSUE_DCMD:
269 if (mmc_cqe_dcmd_busy(mq)) {
270 mq->cqe_busy |= MMC_CQE_DCMD_BUSY;
271 spin_unlock_irq(&mq->lock);
272 return BLK_STS_RESOURCE;
273 }
274 break;
275 case MMC_ISSUE_ASYNC:
276 break;
277 default:
278
279
280
281
282
283
284
285 req->timeout = 600 * HZ;
286 break;
287 }
288
289
290 mq->busy = true;
291
292 mq->in_flight[issue_type] += 1;
293 get_card = (mmc_tot_in_flight(mq) == 1);
294 cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1);
295
296 spin_unlock_irq(&mq->lock);
297
298 if (!(req->rq_flags & RQF_DONTPREP)) {
299 req_to_mmc_queue_req(req)->retries = 0;
300 req->rq_flags |= RQF_DONTPREP;
301 }
302
303 if (get_card)
304 mmc_get_card(card, &mq->ctx);
305
306 if (mq->use_cqe) {
307 host->retune_now = host->need_retune && cqe_retune_ok &&
308 !host->hold_retune;
309 }
310
311 blk_mq_start_request(req);
312
313 issued = mmc_blk_mq_issue_rq(mq, req);
314
315 switch (issued) {
316 case MMC_REQ_BUSY:
317 ret = BLK_STS_RESOURCE;
318 break;
319 case MMC_REQ_FAILED_TO_START:
320 ret = BLK_STS_IOERR;
321 break;
322 default:
323 ret = BLK_STS_OK;
324 break;
325 }
326
327 if (issued != MMC_REQ_STARTED) {
328 bool put_card = false;
329
330 spin_lock_irq(&mq->lock);
331 mq->in_flight[issue_type] -= 1;
332 if (mmc_tot_in_flight(mq) == 0)
333 put_card = true;
334 mq->busy = false;
335 spin_unlock_irq(&mq->lock);
336 if (put_card)
337 mmc_put_card(card, &mq->ctx);
338 } else {
339 WRITE_ONCE(mq->busy, false);
340 }
341
342 return ret;
343 }
344
345 static const struct blk_mq_ops mmc_mq_ops = {
346 .queue_rq = mmc_mq_queue_rq,
347 .init_request = mmc_mq_init_request,
348 .exit_request = mmc_mq_exit_request,
349 .complete = mmc_blk_mq_complete,
350 .timeout = mmc_mq_timed_out,
351 };
352
353 static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
354 {
355 struct mmc_host *host = card->host;
356 unsigned block_size = 512;
357
358 blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue);
359 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue);
360 if (mmc_can_erase(card))
361 mmc_queue_setup_discard(mq->queue, card);
362
363 if (!mmc_dev(host)->dma_mask || !*mmc_dev(host)->dma_mask)
364 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
365 blk_queue_max_hw_sectors(mq->queue,
366 min(host->max_blk_count, host->max_req_size / 512));
367 if (host->can_dma_map_merge)
368 WARN(!blk_queue_can_use_dma_map_merging(mq->queue,
369 mmc_dev(host)),
370 "merging was advertised but not possible");
371 blk_queue_max_segments(mq->queue, mmc_get_max_segments(host));
372
373 if (mmc_card_mmc(card))
374 block_size = card->ext_csd.data_sector_size;
375
376 blk_queue_logical_block_size(mq->queue, block_size);
377
378
379
380
381
382 if (!host->can_dma_map_merge)
383 blk_queue_max_segment_size(mq->queue,
384 round_down(host->max_seg_size, block_size));
385
386 dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue));
387
388 INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
389 INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);
390
391 mutex_init(&mq->complete_lock);
392
393 init_waitqueue_head(&mq->wait);
394 }
395
396 static inline bool mmc_merge_capable(struct mmc_host *host)
397 {
398 return host->caps2 & MMC_CAP2_MERGE_CAPABLE;
399 }
400
401
402 #define MMC_QUEUE_DEPTH 64
403
404
405
406
407
408
409
410
411 int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
412 {
413 struct mmc_host *host = card->host;
414 int ret;
415
416 mq->card = card;
417 mq->use_cqe = host->cqe_enabled;
418
419 spin_lock_init(&mq->lock);
420
421 memset(&mq->tag_set, 0, sizeof(mq->tag_set));
422 mq->tag_set.ops = &mmc_mq_ops;
423
424
425
426
427 if (mq->use_cqe)
428 mq->tag_set.queue_depth =
429 min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth);
430 else
431 mq->tag_set.queue_depth = MMC_QUEUE_DEPTH;
432 mq->tag_set.numa_node = NUMA_NO_NODE;
433 mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
434 mq->tag_set.nr_hw_queues = 1;
435 mq->tag_set.cmd_size = sizeof(struct mmc_queue_req);
436 mq->tag_set.driver_data = mq;
437
438
439
440
441
442
443 if (mmc_merge_capable(host) &&
444 host->max_segs < MMC_DMA_MAP_MERGE_SEGMENTS &&
445 dma_get_merge_boundary(mmc_dev(host)))
446 host->can_dma_map_merge = 1;
447 else
448 host->can_dma_map_merge = 0;
449
450 ret = blk_mq_alloc_tag_set(&mq->tag_set);
451 if (ret)
452 return ret;
453
454 mq->queue = blk_mq_init_queue(&mq->tag_set);
455 if (IS_ERR(mq->queue)) {
456 ret = PTR_ERR(mq->queue);
457 goto free_tag_set;
458 }
459
460 if (mmc_host_is_spi(host) && host->use_spi_crc)
461 mq->queue->backing_dev_info->capabilities |=
462 BDI_CAP_STABLE_WRITES;
463
464 mq->queue->queuedata = mq;
465 blk_queue_rq_timeout(mq->queue, 60 * HZ);
466
467 mmc_setup_queue(mq, card);
468 return 0;
469
470 free_tag_set:
471 blk_mq_free_tag_set(&mq->tag_set);
472 return ret;
473 }
474
475 void mmc_queue_suspend(struct mmc_queue *mq)
476 {
477 blk_mq_quiesce_queue(mq->queue);
478
479
480
481
482
483 mmc_claim_host(mq->card->host);
484 mmc_release_host(mq->card->host);
485 }
486
487 void mmc_queue_resume(struct mmc_queue *mq)
488 {
489 blk_mq_unquiesce_queue(mq->queue);
490 }
491
492 void mmc_cleanup_queue(struct mmc_queue *mq)
493 {
494 struct request_queue *q = mq->queue;
495
496
497
498
499
500 if (blk_queue_quiesced(q))
501 blk_mq_unquiesce_queue(q);
502
503 blk_cleanup_queue(q);
504 blk_mq_free_tag_set(&mq->tag_set);
505
506
507
508
509
510
511 flush_work(&mq->complete_work);
512
513 mq->card = NULL;
514 }
515
516
517
518
519 unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
520 {
521 struct request *req = mmc_queue_req_to_req(mqrq);
522
523 return blk_rq_map_sg(mq->queue, req, mqrq->sg);
524 }