1 /*
2  * Block driver for media (i.e., flash cards)
3  *
4  * Copyright 2002 Hewlett-Packard Company
5  * Copyright 2005-2008 Pierre Ossman
6  *
7  * Use consistent with the GNU GPL is permitted,
8  * provided that this copyright notice is
9  * preserved in its entirety in all copies and derived works.
10  *
11  * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12  * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13  * FITNESS FOR ANY PARTICULAR PURPOSE.
14  *
15  * Many thanks to Alessandro Rubini and Jonathan Corbet!
16  *
17  * Author:  Andrew Christian
18  *          28 May 2002
19  */
20 #include <linux/moduleparam.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23 
24 #include <linux/kernel.h>
25 #include <linux/fs.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
28 #include <linux/hdreg.h>
29 #include <linux/kdev_t.h>
30 #include <linux/blkdev.h>
31 #include <linux/mutex.h>
32 #include <linux/scatterlist.h>
33 #include <linux/string_helpers.h>
34 #include <linux/delay.h>
35 #include <linux/capability.h>
36 #include <linux/compat.h>
37 #include <linux/pm_runtime.h>
38 
39 #include <linux/mmc/ioctl.h>
40 #include <linux/mmc/card.h>
41 #include <linux/mmc/host.h>
42 #include <linux/mmc/mmc.h>
43 #include <linux/mmc/sd.h>
44 
45 #include <asm/uaccess.h>
46 
47 #include "queue.h"
48 
49 MODULE_ALIAS("mmc:block");
50 #ifdef MODULE_PARAM_PREFIX
51 #undef MODULE_PARAM_PREFIX
52 #endif
53 #define MODULE_PARAM_PREFIX "mmcblk."
54 
55 #define INAND_CMD38_ARG_EXT_CSD  113
56 #define INAND_CMD38_ARG_ERASE    0x00
57 #define INAND_CMD38_ARG_TRIM     0x01
58 #define INAND_CMD38_ARG_SECERASE 0x80
59 #define INAND_CMD38_ARG_SECTRIM1 0x81
60 #define INAND_CMD38_ARG_SECTRIM2 0x88
61 #define MMC_BLK_TIMEOUT_MS  (10 * 60 * 1000)        /* 10 minute timeout */
62 #define MMC_SANITIZE_REQ_TIMEOUT 240000
63 #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
64 
65 #define mmc_req_rel_wr(req)	(((req->cmd_flags & REQ_FUA) || \
66 				  (req->cmd_flags & REQ_META)) && \
67 				  (rq_data_dir(req) == WRITE))
68 #define PACKED_CMD_VER	0x01
69 #define PACKED_CMD_WR	0x02
70 
71 static DEFINE_MUTEX(block_mutex);
72 
73 /*
74  * The defaults come from config options but can be overriden by module
75  * or bootarg options.
76  */
77 static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
78 
79 /*
80  * We've only got one major, so number of mmcblk devices is
81  * limited to (1 << 20) / number of minors per device.  It is also
82  * currently limited by the size of the static bitmaps below.
83  */
84 static int max_devices;
85 
86 #define MAX_DEVICES 256
87 
88 /* TODO: Replace these with struct ida */
89 static DECLARE_BITMAP(dev_use, MAX_DEVICES);
90 static DECLARE_BITMAP(name_use, MAX_DEVICES);
91 
92 /*
93  * There is one mmc_blk_data per slot.
94  */
95 struct mmc_blk_data {
96 	spinlock_t	lock;
97 	struct gendisk	*disk;
98 	struct mmc_queue queue;
99 	struct list_head part;
100 
101 	unsigned int	flags;
102 #define MMC_BLK_CMD23	(1 << 0)	/* Can do SET_BLOCK_COUNT for multiblock */
103 #define MMC_BLK_REL_WR	(1 << 1)	/* MMC Reliable write support */
104 #define MMC_BLK_PACKED_CMD	(1 << 2)	/* MMC packed command support */
105 
106 	unsigned int	usage;
107 	unsigned int	read_only;
108 	unsigned int	part_type;
109 	unsigned int	name_idx;
110 	unsigned int	reset_done;
111 #define MMC_BLK_READ		BIT(0)
112 #define MMC_BLK_WRITE		BIT(1)
113 #define MMC_BLK_DISCARD		BIT(2)
114 #define MMC_BLK_SECDISCARD	BIT(3)
115 
116 	/*
117 	 * Only set in main mmc_blk_data associated
118 	 * with mmc_card with dev_set_drvdata, and keeps
119 	 * track of the current selected device partition.
120 	 */
121 	unsigned int	part_curr;
122 	struct device_attribute force_ro;
123 	struct device_attribute power_ro_lock;
124 	int	area_type;
125 };
126 
127 static DEFINE_MUTEX(open_lock);
128 
129 enum {
130 	MMC_PACKED_NR_IDX = -1,
131 	MMC_PACKED_NR_ZERO,
132 	MMC_PACKED_NR_SINGLE,
133 };
134 
135 module_param(perdev_minors, int, 0444);
136 MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
137 
138 static inline int mmc_blk_part_switch(struct mmc_card *card,
139 				      struct mmc_blk_data *md);
140 static int get_card_status(struct mmc_card *card, u32 *status, int retries);
141 
mmc_blk_clear_packed(struct mmc_queue_req * mqrq)142 static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
143 {
144 	struct mmc_packed *packed = mqrq->packed;
145 
146 	BUG_ON(!packed);
147 
148 	mqrq->cmd_type = MMC_PACKED_NONE;
149 	packed->nr_entries = MMC_PACKED_NR_ZERO;
150 	packed->idx_failure = MMC_PACKED_NR_IDX;
151 	packed->retries = 0;
152 	packed->blocks = 0;
153 }
154 
mmc_blk_get(struct gendisk * disk)155 static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
156 {
157 	struct mmc_blk_data *md;
158 
159 	mutex_lock(&open_lock);
160 	md = disk->private_data;
161 	if (md && md->usage == 0)
162 		md = NULL;
163 	if (md)
164 		md->usage++;
165 	mutex_unlock(&open_lock);
166 
167 	return md;
168 }
169 
mmc_get_devidx(struct gendisk * disk)170 static inline int mmc_get_devidx(struct gendisk *disk)
171 {
172 	int devmaj = MAJOR(disk_devt(disk));
173 	int devidx = MINOR(disk_devt(disk)) / perdev_minors;
174 
175 	if (!devmaj)
176 		devidx = disk->first_minor / perdev_minors;
177 	return devidx;
178 }
179 
mmc_blk_put(struct mmc_blk_data * md)180 static void mmc_blk_put(struct mmc_blk_data *md)
181 {
182 	mutex_lock(&open_lock);
183 	md->usage--;
184 	if (md->usage == 0) {
185 		int devidx = mmc_get_devidx(md->disk);
186 		blk_cleanup_queue(md->queue.queue);
187 
188 		__clear_bit(devidx, dev_use);
189 
190 		put_disk(md->disk);
191 		kfree(md);
192 	}
193 	mutex_unlock(&open_lock);
194 }
195 
power_ro_lock_show(struct device * dev,struct device_attribute * attr,char * buf)196 static ssize_t power_ro_lock_show(struct device *dev,
197 		struct device_attribute *attr, char *buf)
198 {
199 	int ret;
200 	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
201 	struct mmc_card *card = md->queue.card;
202 	int locked = 0;
203 
204 	if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
205 		locked = 2;
206 	else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
207 		locked = 1;
208 
209 	ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
210 
211 	mmc_blk_put(md);
212 
213 	return ret;
214 }
215 
power_ro_lock_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)216 static ssize_t power_ro_lock_store(struct device *dev,
217 		struct device_attribute *attr, const char *buf, size_t count)
218 {
219 	int ret;
220 	struct mmc_blk_data *md, *part_md;
221 	struct mmc_card *card;
222 	unsigned long set;
223 
224 	if (kstrtoul(buf, 0, &set))
225 		return -EINVAL;
226 
227 	if (set != 1)
228 		return count;
229 
230 	md = mmc_blk_get(dev_to_disk(dev));
231 	card = md->queue.card;
232 
233 	mmc_get_card(card);
234 
235 	ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
236 				card->ext_csd.boot_ro_lock |
237 				EXT_CSD_BOOT_WP_B_PWR_WP_EN,
238 				card->ext_csd.part_time);
239 	if (ret)
240 		pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret);
241 	else
242 		card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN;
243 
244 	mmc_put_card(card);
245 
246 	if (!ret) {
247 		pr_info("%s: Locking boot partition ro until next power on\n",
248 			md->disk->disk_name);
249 		set_disk_ro(md->disk, 1);
250 
251 		list_for_each_entry(part_md, &md->part, part)
252 			if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
253 				pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
254 				set_disk_ro(part_md->disk, 1);
255 			}
256 	}
257 
258 	mmc_blk_put(md);
259 	return count;
260 }
261 
force_ro_show(struct device * dev,struct device_attribute * attr,char * buf)262 static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
263 			     char *buf)
264 {
265 	int ret;
266 	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
267 
268 	ret = snprintf(buf, PAGE_SIZE, "%d\n",
269 		       get_disk_ro(dev_to_disk(dev)) ^
270 		       md->read_only);
271 	mmc_blk_put(md);
272 	return ret;
273 }
274 
force_ro_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)275 static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
276 			      const char *buf, size_t count)
277 {
278 	int ret;
279 	char *end;
280 	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
281 	unsigned long set = simple_strtoul(buf, &end, 0);
282 	if (end == buf) {
283 		ret = -EINVAL;
284 		goto out;
285 	}
286 
287 	set_disk_ro(dev_to_disk(dev), set || md->read_only);
288 	ret = count;
289 out:
290 	mmc_blk_put(md);
291 	return ret;
292 }
293 
mmc_blk_open(struct block_device * bdev,fmode_t mode)294 static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
295 {
296 	struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
297 	int ret = -ENXIO;
298 
299 	mutex_lock(&block_mutex);
300 	if (md) {
301 		if (md->usage == 2)
302 			check_disk_change(bdev);
303 		ret = 0;
304 
305 		if ((mode & FMODE_WRITE) && md->read_only) {
306 			mmc_blk_put(md);
307 			ret = -EROFS;
308 		}
309 	}
310 	mutex_unlock(&block_mutex);
311 
312 	return ret;
313 }
314 
mmc_blk_release(struct gendisk * disk,fmode_t mode)315 static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
316 {
317 	struct mmc_blk_data *md = disk->private_data;
318 
319 	mutex_lock(&block_mutex);
320 	mmc_blk_put(md);
321 	mutex_unlock(&block_mutex);
322 }
323 
324 static int
mmc_blk_getgeo(struct block_device * bdev,struct hd_geometry * geo)325 mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
326 {
327 	geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
328 	geo->heads = 4;
329 	geo->sectors = 16;
330 	return 0;
331 }
332 
333 struct mmc_blk_ioc_data {
334 	struct mmc_ioc_cmd ic;
335 	unsigned char *buf;
336 	u64 buf_bytes;
337 };
338 
mmc_blk_ioctl_copy_from_user(struct mmc_ioc_cmd __user * user)339 static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
340 	struct mmc_ioc_cmd __user *user)
341 {
342 	struct mmc_blk_ioc_data *idata;
343 	int err;
344 
345 	idata = kzalloc(sizeof(*idata), GFP_KERNEL);
346 	if (!idata) {
347 		err = -ENOMEM;
348 		goto out;
349 	}
350 
351 	if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
352 		err = -EFAULT;
353 		goto idata_err;
354 	}
355 
356 	idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
357 	if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
358 		err = -EOVERFLOW;
359 		goto idata_err;
360 	}
361 
362 	if (!idata->buf_bytes)
363 		return idata;
364 
365 	idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
366 	if (!idata->buf) {
367 		err = -ENOMEM;
368 		goto idata_err;
369 	}
370 
371 	if (copy_from_user(idata->buf, (void __user *)(unsigned long)
372 					idata->ic.data_ptr, idata->buf_bytes)) {
373 		err = -EFAULT;
374 		goto copy_err;
375 	}
376 
377 	return idata;
378 
379 copy_err:
380 	kfree(idata->buf);
381 idata_err:
382 	kfree(idata);
383 out:
384 	return ERR_PTR(err);
385 }
386 
ioctl_rpmb_card_status_poll(struct mmc_card * card,u32 * status,u32 retries_max)387 static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
388 				       u32 retries_max)
389 {
390 	int err;
391 	u32 retry_count = 0;
392 
393 	if (!status || !retries_max)
394 		return -EINVAL;
395 
396 	do {
397 		err = get_card_status(card, status, 5);
398 		if (err)
399 			break;
400 
401 		if (!R1_STATUS(*status) &&
402 				(R1_CURRENT_STATE(*status) != R1_STATE_PRG))
403 			break; /* RPMB programming operation complete */
404 
405 		/*
406 		 * Rechedule to give the MMC device a chance to continue
407 		 * processing the previous command without being polled too
408 		 * frequently.
409 		 */
410 		usleep_range(1000, 5000);
411 	} while (++retry_count < retries_max);
412 
413 	if (retry_count == retries_max)
414 		err = -EPERM;
415 
416 	return err;
417 }
418 
ioctl_do_sanitize(struct mmc_card * card)419 static int ioctl_do_sanitize(struct mmc_card *card)
420 {
421 	int err;
422 
423 	if (!mmc_can_sanitize(card)) {
424 			pr_warn("%s: %s - SANITIZE is not supported\n",
425 				mmc_hostname(card->host), __func__);
426 			err = -EOPNOTSUPP;
427 			goto out;
428 	}
429 
430 	pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
431 		mmc_hostname(card->host), __func__);
432 
433 	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
434 					EXT_CSD_SANITIZE_START, 1,
435 					MMC_SANITIZE_REQ_TIMEOUT);
436 
437 	if (err)
438 		pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n",
439 		       mmc_hostname(card->host), __func__, err);
440 
441 	pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host),
442 					     __func__);
443 out:
444 	return err;
445 }
446 
mmc_blk_ioctl_cmd(struct block_device * bdev,struct mmc_ioc_cmd __user * ic_ptr)447 static int mmc_blk_ioctl_cmd(struct block_device *bdev,
448 	struct mmc_ioc_cmd __user *ic_ptr)
449 {
450 	struct mmc_blk_ioc_data *idata;
451 	struct mmc_blk_data *md;
452 	struct mmc_card *card;
453 	struct mmc_command cmd = {0};
454 	struct mmc_data data = {0};
455 	struct mmc_request mrq = {NULL};
456 	struct scatterlist sg;
457 	int err;
458 	int is_rpmb = false;
459 	u32 status = 0;
460 
461 	/*
462 	 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
463 	 * whole block device, not on a partition.  This prevents overspray
464 	 * between sibling partitions.
465 	 */
466 	if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
467 		return -EPERM;
468 
469 	idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
470 	if (IS_ERR(idata))
471 		return PTR_ERR(idata);
472 
473 	md = mmc_blk_get(bdev->bd_disk);
474 	if (!md) {
475 		err = -EINVAL;
476 		goto cmd_err;
477 	}
478 
479 	if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
480 		is_rpmb = true;
481 
482 	card = md->queue.card;
483 	if (IS_ERR(card)) {
484 		err = PTR_ERR(card);
485 		goto cmd_done;
486 	}
487 
488 	cmd.opcode = idata->ic.opcode;
489 	cmd.arg = idata->ic.arg;
490 	cmd.flags = idata->ic.flags;
491 
492 	if (idata->buf_bytes) {
493 		data.sg = &sg;
494 		data.sg_len = 1;
495 		data.blksz = idata->ic.blksz;
496 		data.blocks = idata->ic.blocks;
497 
498 		sg_init_one(data.sg, idata->buf, idata->buf_bytes);
499 
500 		if (idata->ic.write_flag)
501 			data.flags = MMC_DATA_WRITE;
502 		else
503 			data.flags = MMC_DATA_READ;
504 
505 		/* data.flags must already be set before doing this. */
506 		mmc_set_data_timeout(&data, card);
507 
508 		/* Allow overriding the timeout_ns for empirical tuning. */
509 		if (idata->ic.data_timeout_ns)
510 			data.timeout_ns = idata->ic.data_timeout_ns;
511 
512 		if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
513 			/*
514 			 * Pretend this is a data transfer and rely on the
515 			 * host driver to compute timeout.  When all host
516 			 * drivers support cmd.cmd_timeout for R1B, this
517 			 * can be changed to:
518 			 *
519 			 *     mrq.data = NULL;
520 			 *     cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
521 			 */
522 			data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
523 		}
524 
525 		mrq.data = &data;
526 	}
527 
528 	mrq.cmd = &cmd;
529 
530 	mmc_get_card(card);
531 
532 	err = mmc_blk_part_switch(card, md);
533 	if (err)
534 		goto cmd_rel_host;
535 
536 	if (idata->ic.is_acmd) {
537 		err = mmc_app_cmd(card->host, card);
538 		if (err)
539 			goto cmd_rel_host;
540 	}
541 
542 	if (is_rpmb) {
543 		err = mmc_set_blockcount(card, data.blocks,
544 			idata->ic.write_flag & (1 << 31));
545 		if (err)
546 			goto cmd_rel_host;
547 	}
548 
549 	if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
550 	    (cmd.opcode == MMC_SWITCH)) {
551 		err = ioctl_do_sanitize(card);
552 
553 		if (err)
554 			pr_err("%s: ioctl_do_sanitize() failed. err = %d",
555 			       __func__, err);
556 
557 		goto cmd_rel_host;
558 	}
559 
560 	mmc_wait_for_req(card->host, &mrq);
561 
562 	if (cmd.error) {
563 		dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
564 						__func__, cmd.error);
565 		err = cmd.error;
566 		goto cmd_rel_host;
567 	}
568 	if (data.error) {
569 		dev_err(mmc_dev(card->host), "%s: data error %d\n",
570 						__func__, data.error);
571 		err = data.error;
572 		goto cmd_rel_host;
573 	}
574 
575 	/*
576 	 * According to the SD specs, some commands require a delay after
577 	 * issuing the command.
578 	 */
579 	if (idata->ic.postsleep_min_us)
580 		usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
581 
582 	if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
583 		err = -EFAULT;
584 		goto cmd_rel_host;
585 	}
586 
587 	if (!idata->ic.write_flag) {
588 		if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr,
589 						idata->buf, idata->buf_bytes)) {
590 			err = -EFAULT;
591 			goto cmd_rel_host;
592 		}
593 	}
594 
595 	if (is_rpmb) {
596 		/*
597 		 * Ensure RPMB command has completed by polling CMD13
598 		 * "Send Status".
599 		 */
600 		err = ioctl_rpmb_card_status_poll(card, &status, 5);
601 		if (err)
602 			dev_err(mmc_dev(card->host),
603 					"%s: Card Status=0x%08X, error %d\n",
604 					__func__, status, err);
605 	}
606 
607 cmd_rel_host:
608 	mmc_put_card(card);
609 
610 cmd_done:
611 	mmc_blk_put(md);
612 cmd_err:
613 	kfree(idata->buf);
614 	kfree(idata);
615 	return err;
616 }
617 
mmc_blk_ioctl(struct block_device * bdev,fmode_t mode,unsigned int cmd,unsigned long arg)618 static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
619 	unsigned int cmd, unsigned long arg)
620 {
621 	int ret = -EINVAL;
622 	if (cmd == MMC_IOC_CMD)
623 		ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
624 	return ret;
625 }
626 
627 #ifdef CONFIG_COMPAT
mmc_blk_compat_ioctl(struct block_device * bdev,fmode_t mode,unsigned int cmd,unsigned long arg)628 static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
629 	unsigned int cmd, unsigned long arg)
630 {
631 	return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
632 }
633 #endif
634 
635 static const struct block_device_operations mmc_bdops = {
636 	.open			= mmc_blk_open,
637 	.release		= mmc_blk_release,
638 	.getgeo			= mmc_blk_getgeo,
639 	.owner			= THIS_MODULE,
640 	.ioctl			= mmc_blk_ioctl,
641 #ifdef CONFIG_COMPAT
642 	.compat_ioctl		= mmc_blk_compat_ioctl,
643 #endif
644 };
645 
mmc_blk_part_switch(struct mmc_card * card,struct mmc_blk_data * md)646 static inline int mmc_blk_part_switch(struct mmc_card *card,
647 				      struct mmc_blk_data *md)
648 {
649 	int ret;
650 	struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
651 
652 	if (main_md->part_curr == md->part_type)
653 		return 0;
654 
655 	if (mmc_card_mmc(card)) {
656 		u8 part_config = card->ext_csd.part_config;
657 
658 		part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
659 		part_config |= md->part_type;
660 
661 		ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
662 				 EXT_CSD_PART_CONFIG, part_config,
663 				 card->ext_csd.part_time);
664 		if (ret)
665 			return ret;
666 
667 		card->ext_csd.part_config = part_config;
668 	}
669 
670 	main_md->part_curr = md->part_type;
671 	return 0;
672 }
673 
mmc_sd_num_wr_blocks(struct mmc_card * card)674 static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
675 {
676 	int err;
677 	u32 result;
678 	__be32 *blocks;
679 
680 	struct mmc_request mrq = {NULL};
681 	struct mmc_command cmd = {0};
682 	struct mmc_data data = {0};
683 
684 	struct scatterlist sg;
685 
686 	cmd.opcode = MMC_APP_CMD;
687 	cmd.arg = card->rca << 16;
688 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
689 
690 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
691 	if (err)
692 		return (u32)-1;
693 	if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
694 		return (u32)-1;
695 
696 	memset(&cmd, 0, sizeof(struct mmc_command));
697 
698 	cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
699 	cmd.arg = 0;
700 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
701 
702 	data.blksz = 4;
703 	data.blocks = 1;
704 	data.flags = MMC_DATA_READ;
705 	data.sg = &sg;
706 	data.sg_len = 1;
707 	mmc_set_data_timeout(&data, card);
708 
709 	mrq.cmd = &cmd;
710 	mrq.data = &data;
711 
712 	blocks = kmalloc(4, GFP_KERNEL);
713 	if (!blocks)
714 		return (u32)-1;
715 
716 	sg_init_one(&sg, blocks, 4);
717 
718 	mmc_wait_for_req(card->host, &mrq);
719 
720 	result = ntohl(*blocks);
721 	kfree(blocks);
722 
723 	if (cmd.error || data.error)
724 		result = (u32)-1;
725 
726 	return result;
727 }
728 
get_card_status(struct mmc_card * card,u32 * status,int retries)729 static int get_card_status(struct mmc_card *card, u32 *status, int retries)
730 {
731 	struct mmc_command cmd = {0};
732 	int err;
733 
734 	cmd.opcode = MMC_SEND_STATUS;
735 	if (!mmc_host_is_spi(card->host))
736 		cmd.arg = card->rca << 16;
737 	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
738 	err = mmc_wait_for_cmd(card->host, &cmd, retries);
739 	if (err == 0)
740 		*status = cmd.resp[0];
741 	return err;
742 }
743 
card_busy_detect(struct mmc_card * card,unsigned int timeout_ms,bool hw_busy_detect,struct request * req,int * gen_err)744 static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
745 		bool hw_busy_detect, struct request *req, int *gen_err)
746 {
747 	unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
748 	int err = 0;
749 	u32 status;
750 
751 	do {
752 		err = get_card_status(card, &status, 5);
753 		if (err) {
754 			pr_err("%s: error %d requesting status\n",
755 			       req->rq_disk->disk_name, err);
756 			return err;
757 		}
758 
759 		if (status & R1_ERROR) {
760 			pr_err("%s: %s: error sending status cmd, status %#x\n",
761 				req->rq_disk->disk_name, __func__, status);
762 			*gen_err = 1;
763 		}
764 
765 		/* We may rely on the host hw to handle busy detection.*/
766 		if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) &&
767 			hw_busy_detect)
768 			break;
769 
770 		/*
771 		 * Timeout if the device never becomes ready for data and never
772 		 * leaves the program state.
773 		 */
774 		if (time_after(jiffies, timeout)) {
775 			pr_err("%s: Card stuck in programming state! %s %s\n",
776 				mmc_hostname(card->host),
777 				req->rq_disk->disk_name, __func__);
778 			return -ETIMEDOUT;
779 		}
780 
781 		/*
782 		 * Some cards mishandle the status bits,
783 		 * so make sure to check both the busy
784 		 * indication and the card state.
785 		 */
786 	} while (!(status & R1_READY_FOR_DATA) ||
787 		 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
788 
789 	return err;
790 }
791 
send_stop(struct mmc_card * card,unsigned int timeout_ms,struct request * req,int * gen_err,u32 * stop_status)792 static int send_stop(struct mmc_card *card, unsigned int timeout_ms,
793 		struct request *req, int *gen_err, u32 *stop_status)
794 {
795 	struct mmc_host *host = card->host;
796 	struct mmc_command cmd = {0};
797 	int err;
798 	bool use_r1b_resp = rq_data_dir(req) == WRITE;
799 
800 	/*
801 	 * Normally we use R1B responses for WRITE, but in cases where the host
802 	 * has specified a max_busy_timeout we need to validate it. A failure
803 	 * means we need to prevent the host from doing hw busy detection, which
804 	 * is done by converting to a R1 response instead.
805 	 */
806 	if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
807 		use_r1b_resp = false;
808 
809 	cmd.opcode = MMC_STOP_TRANSMISSION;
810 	if (use_r1b_resp) {
811 		cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
812 		cmd.busy_timeout = timeout_ms;
813 	} else {
814 		cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
815 	}
816 
817 	err = mmc_wait_for_cmd(host, &cmd, 5);
818 	if (err)
819 		return err;
820 
821 	*stop_status = cmd.resp[0];
822 
823 	/* No need to check card status in case of READ. */
824 	if (rq_data_dir(req) == READ)
825 		return 0;
826 
827 	if (!mmc_host_is_spi(host) &&
828 		(*stop_status & R1_ERROR)) {
829 		pr_err("%s: %s: general error sending stop command, resp %#x\n",
830 			req->rq_disk->disk_name, __func__, *stop_status);
831 		*gen_err = 1;
832 	}
833 
834 	return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err);
835 }
836 
837 #define ERR_NOMEDIUM	3
838 #define ERR_RETRY	2
839 #define ERR_ABORT	1
840 #define ERR_CONTINUE	0
841 
mmc_blk_cmd_error(struct request * req,const char * name,int error,bool status_valid,u32 status)842 static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
843 	bool status_valid, u32 status)
844 {
845 	switch (error) {
846 	case -EILSEQ:
847 		/* response crc error, retry the r/w cmd */
848 		pr_err("%s: %s sending %s command, card status %#x\n",
849 			req->rq_disk->disk_name, "response CRC error",
850 			name, status);
851 		return ERR_RETRY;
852 
853 	case -ETIMEDOUT:
854 		pr_err("%s: %s sending %s command, card status %#x\n",
855 			req->rq_disk->disk_name, "timed out", name, status);
856 
857 		/* If the status cmd initially failed, retry the r/w cmd */
858 		if (!status_valid)
859 			return ERR_RETRY;
860 
861 		/*
862 		 * If it was a r/w cmd crc error, or illegal command
863 		 * (eg, issued in wrong state) then retry - we should
864 		 * have corrected the state problem above.
865 		 */
866 		if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND))
867 			return ERR_RETRY;
868 
869 		/* Otherwise abort the command */
870 		return ERR_ABORT;
871 
872 	default:
873 		/* We don't understand the error code the driver gave us */
874 		pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
875 		       req->rq_disk->disk_name, error, status);
876 		return ERR_ABORT;
877 	}
878 }
879 
880 /*
881  * Initial r/w and stop cmd error recovery.
882  * We don't know whether the card received the r/w cmd or not, so try to
883  * restore things back to a sane state.  Essentially, we do this as follows:
884  * - Obtain card status.  If the first attempt to obtain card status fails,
885  *   the status word will reflect the failed status cmd, not the failed
886  *   r/w cmd.  If we fail to obtain card status, it suggests we can no
887  *   longer communicate with the card.
888  * - Check the card state.  If the card received the cmd but there was a
889  *   transient problem with the response, it might still be in a data transfer
890  *   mode.  Try to send it a stop command.  If this fails, we can't recover.
891  * - If the r/w cmd failed due to a response CRC error, it was probably
892  *   transient, so retry the cmd.
893  * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
894  * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
895  *   illegal cmd, retry.
896  * Otherwise we don't understand what happened, so abort.
897  */
mmc_blk_cmd_recovery(struct mmc_card * card,struct request * req,struct mmc_blk_request * brq,int * ecc_err,int * gen_err)898 static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
899 	struct mmc_blk_request *brq, int *ecc_err, int *gen_err)
900 {
901 	bool prev_cmd_status_valid = true;
902 	u32 status, stop_status = 0;
903 	int err, retry;
904 
905 	if (mmc_card_removed(card))
906 		return ERR_NOMEDIUM;
907 
908 	/*
909 	 * Try to get card status which indicates both the card state
910 	 * and why there was no response.  If the first attempt fails,
911 	 * we can't be sure the returned status is for the r/w command.
912 	 */
913 	for (retry = 2; retry >= 0; retry--) {
914 		err = get_card_status(card, &status, 0);
915 		if (!err)
916 			break;
917 
918 		prev_cmd_status_valid = false;
919 		pr_err("%s: error %d sending status command, %sing\n",
920 		       req->rq_disk->disk_name, err, retry ? "retry" : "abort");
921 	}
922 
923 	/* We couldn't get a response from the card.  Give up. */
924 	if (err) {
925 		/* Check if the card is removed */
926 		if (mmc_detect_card_removed(card->host))
927 			return ERR_NOMEDIUM;
928 		return ERR_ABORT;
929 	}
930 
931 	/* Flag ECC errors */
932 	if ((status & R1_CARD_ECC_FAILED) ||
933 	    (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
934 	    (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
935 		*ecc_err = 1;
936 
937 	/* Flag General errors */
938 	if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
939 		if ((status & R1_ERROR) ||
940 			(brq->stop.resp[0] & R1_ERROR)) {
941 			pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
942 			       req->rq_disk->disk_name, __func__,
943 			       brq->stop.resp[0], status);
944 			*gen_err = 1;
945 		}
946 
947 	/*
948 	 * Check the current card state.  If it is in some data transfer
949 	 * mode, tell it to stop (and hopefully transition back to TRAN.)
950 	 */
951 	if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
952 	    R1_CURRENT_STATE(status) == R1_STATE_RCV) {
953 		err = send_stop(card,
954 			DIV_ROUND_UP(brq->data.timeout_ns, 1000000),
955 			req, gen_err, &stop_status);
956 		if (err) {
957 			pr_err("%s: error %d sending stop command\n",
958 			       req->rq_disk->disk_name, err);
959 			/*
960 			 * If the stop cmd also timed out, the card is probably
961 			 * not present, so abort. Other errors are bad news too.
962 			 */
963 			return ERR_ABORT;
964 		}
965 
966 		if (stop_status & R1_CARD_ECC_FAILED)
967 			*ecc_err = 1;
968 	}
969 
970 	/* Check for set block count errors */
971 	if (brq->sbc.error)
972 		return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
973 				prev_cmd_status_valid, status);
974 
975 	/* Check for r/w command errors */
976 	if (brq->cmd.error)
977 		return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
978 				prev_cmd_status_valid, status);
979 
980 	/* Data errors */
981 	if (!brq->stop.error)
982 		return ERR_CONTINUE;
983 
984 	/* Now for stop errors.  These aren't fatal to the transfer. */
985 	pr_info("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
986 	       req->rq_disk->disk_name, brq->stop.error,
987 	       brq->cmd.resp[0], status);
988 
989 	/*
990 	 * Subsitute in our own stop status as this will give the error
991 	 * state which happened during the execution of the r/w command.
992 	 */
993 	if (stop_status) {
994 		brq->stop.resp[0] = stop_status;
995 		brq->stop.error = 0;
996 	}
997 	return ERR_CONTINUE;
998 }
999 
mmc_blk_reset(struct mmc_blk_data * md,struct mmc_host * host,int type)1000 static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
1001 			 int type)
1002 {
1003 	int err;
1004 
1005 	if (md->reset_done & type)
1006 		return -EEXIST;
1007 
1008 	md->reset_done |= type;
1009 	err = mmc_hw_reset(host);
1010 	/* Ensure we switch back to the correct partition */
1011 	if (err != -EOPNOTSUPP) {
1012 		struct mmc_blk_data *main_md =
1013 			dev_get_drvdata(&host->card->dev);
1014 		int part_err;
1015 
1016 		main_md->part_curr = main_md->part_type;
1017 		part_err = mmc_blk_part_switch(host->card, md);
1018 		if (part_err) {
1019 			/*
1020 			 * We have failed to get back into the correct
1021 			 * partition, so we need to abort the whole request.
1022 			 */
1023 			return -ENODEV;
1024 		}
1025 	}
1026 	return err;
1027 }
1028 
mmc_blk_reset_success(struct mmc_blk_data * md,int type)1029 static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
1030 {
1031 	md->reset_done &= ~type;
1032 }
1033 
mmc_access_rpmb(struct mmc_queue * mq)1034 int mmc_access_rpmb(struct mmc_queue *mq)
1035 {
1036 	struct mmc_blk_data *md = mq->data;
1037 	/*
1038 	 * If this is a RPMB partition access, return ture
1039 	 */
1040 	if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
1041 		return true;
1042 
1043 	return false;
1044 }
1045 
mmc_blk_issue_discard_rq(struct mmc_queue * mq,struct request * req)1046 static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
1047 {
1048 	struct mmc_blk_data *md = mq->data;
1049 	struct mmc_card *card = md->queue.card;
1050 	unsigned int from, nr, arg;
1051 	int err = 0, type = MMC_BLK_DISCARD;
1052 
1053 	if (!mmc_can_erase(card)) {
1054 		err = -EOPNOTSUPP;
1055 		goto out;
1056 	}
1057 
1058 	from = blk_rq_pos(req);
1059 	nr = blk_rq_sectors(req);
1060 
1061 	if (mmc_can_discard(card))
1062 		arg = MMC_DISCARD_ARG;
1063 	else if (mmc_can_trim(card))
1064 		arg = MMC_TRIM_ARG;
1065 	else
1066 		arg = MMC_ERASE_ARG;
1067 retry:
1068 	if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1069 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1070 				 INAND_CMD38_ARG_EXT_CSD,
1071 				 arg == MMC_TRIM_ARG ?
1072 				 INAND_CMD38_ARG_TRIM :
1073 				 INAND_CMD38_ARG_ERASE,
1074 				 0);
1075 		if (err)
1076 			goto out;
1077 	}
1078 	err = mmc_erase(card, from, nr, arg);
1079 out:
1080 	if (err == -EIO && !mmc_blk_reset(md, card->host, type))
1081 		goto retry;
1082 	if (!err)
1083 		mmc_blk_reset_success(md, type);
1084 	blk_end_request(req, err, blk_rq_bytes(req));
1085 
1086 	return err ? 0 : 1;
1087 }
1088 
mmc_blk_issue_secdiscard_rq(struct mmc_queue * mq,struct request * req)1089 static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
1090 				       struct request *req)
1091 {
1092 	struct mmc_blk_data *md = mq->data;
1093 	struct mmc_card *card = md->queue.card;
1094 	unsigned int from, nr, arg;
1095 	int err = 0, type = MMC_BLK_SECDISCARD;
1096 
1097 	if (!(mmc_can_secure_erase_trim(card))) {
1098 		err = -EOPNOTSUPP;
1099 		goto out;
1100 	}
1101 
1102 	from = blk_rq_pos(req);
1103 	nr = blk_rq_sectors(req);
1104 
1105 	if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
1106 		arg = MMC_SECURE_TRIM1_ARG;
1107 	else
1108 		arg = MMC_SECURE_ERASE_ARG;
1109 
1110 retry:
1111 	if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1112 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1113 				 INAND_CMD38_ARG_EXT_CSD,
1114 				 arg == MMC_SECURE_TRIM1_ARG ?
1115 				 INAND_CMD38_ARG_SECTRIM1 :
1116 				 INAND_CMD38_ARG_SECERASE,
1117 				 0);
1118 		if (err)
1119 			goto out_retry;
1120 	}
1121 
1122 	err = mmc_erase(card, from, nr, arg);
1123 	if (err == -EIO)
1124 		goto out_retry;
1125 	if (err)
1126 		goto out;
1127 
1128 	if (arg == MMC_SECURE_TRIM1_ARG) {
1129 		if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1130 			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1131 					 INAND_CMD38_ARG_EXT_CSD,
1132 					 INAND_CMD38_ARG_SECTRIM2,
1133 					 0);
1134 			if (err)
1135 				goto out_retry;
1136 		}
1137 
1138 		err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
1139 		if (err == -EIO)
1140 			goto out_retry;
1141 		if (err)
1142 			goto out;
1143 	}
1144 
1145 out_retry:
1146 	if (err && !mmc_blk_reset(md, card->host, type))
1147 		goto retry;
1148 	if (!err)
1149 		mmc_blk_reset_success(md, type);
1150 out:
1151 	blk_end_request(req, err, blk_rq_bytes(req));
1152 
1153 	return err ? 0 : 1;
1154 }
1155 
mmc_blk_issue_flush(struct mmc_queue * mq,struct request * req)1156 static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
1157 {
1158 	struct mmc_blk_data *md = mq->data;
1159 	struct mmc_card *card = md->queue.card;
1160 	int ret = 0;
1161 
1162 	ret = mmc_flush_cache(card);
1163 	if (ret)
1164 		ret = -EIO;
1165 
1166 	blk_end_request_all(req, ret);
1167 
1168 	return ret ? 0 : 1;
1169 }
1170 
1171 /*
1172  * Reformat current write as a reliable write, supporting
1173  * both legacy and the enhanced reliable write MMC cards.
1174  * In each transfer we'll handle only as much as a single
1175  * reliable write can handle, thus finish the request in
1176  * partial completions.
1177  */
mmc_apply_rel_rw(struct mmc_blk_request * brq,struct mmc_card * card,struct request * req)1178 static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
1179 				    struct mmc_card *card,
1180 				    struct request *req)
1181 {
1182 	if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
1183 		/* Legacy mode imposes restrictions on transfers. */
1184 		if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
1185 			brq->data.blocks = 1;
1186 
1187 		if (brq->data.blocks > card->ext_csd.rel_sectors)
1188 			brq->data.blocks = card->ext_csd.rel_sectors;
1189 		else if (brq->data.blocks < card->ext_csd.rel_sectors)
1190 			brq->data.blocks = 1;
1191 	}
1192 }
1193 
1194 #define CMD_ERRORS							\
1195 	(R1_OUT_OF_RANGE |	/* Command argument out of range */	\
1196 	 R1_ADDRESS_ERROR |	/* Misaligned address */		\
1197 	 R1_BLOCK_LEN_ERROR |	/* Transferred block length incorrect */\
1198 	 R1_WP_VIOLATION |	/* Tried to write to protected block */	\
1199 	 R1_CC_ERROR |		/* Card controller error */		\
1200 	 R1_ERROR)		/* General/unknown error */
1201 
mmc_blk_err_check(struct mmc_card * card,struct mmc_async_req * areq)1202 static int mmc_blk_err_check(struct mmc_card *card,
1203 			     struct mmc_async_req *areq)
1204 {
1205 	struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
1206 						    mmc_active);
1207 	struct mmc_blk_request *brq = &mq_mrq->brq;
1208 	struct request *req = mq_mrq->req;
1209 	int ecc_err = 0, gen_err = 0;
1210 
1211 	/*
1212 	 * sbc.error indicates a problem with the set block count
1213 	 * command.  No data will have been transferred.
1214 	 *
1215 	 * cmd.error indicates a problem with the r/w command.  No
1216 	 * data will have been transferred.
1217 	 *
1218 	 * stop.error indicates a problem with the stop command.  Data
1219 	 * may have been transferred, or may still be transferring.
1220 	 */
1221 	if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
1222 	    brq->data.error) {
1223 		switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
1224 		case ERR_RETRY:
1225 			return MMC_BLK_RETRY;
1226 		case ERR_ABORT:
1227 			return MMC_BLK_ABORT;
1228 		case ERR_NOMEDIUM:
1229 			return MMC_BLK_NOMEDIUM;
1230 		case ERR_CONTINUE:
1231 			break;
1232 		}
1233 	}
1234 
1235 	/*
1236 	 * Check for errors relating to the execution of the
1237 	 * initial command - such as address errors.  No data
1238 	 * has been transferred.
1239 	 */
1240 	if (brq->cmd.resp[0] & CMD_ERRORS) {
1241 		pr_err("%s: r/w command failed, status = %#x\n",
1242 		       req->rq_disk->disk_name, brq->cmd.resp[0]);
1243 		return MMC_BLK_ABORT;
1244 	}
1245 
1246 	/*
1247 	 * Everything else is either success, or a data error of some
1248 	 * kind.  If it was a write, we may have transitioned to
1249 	 * program mode, which we have to wait for it to complete.
1250 	 */
1251 	if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
1252 		int err;
1253 
1254 		/* Check stop command response */
1255 		if (brq->stop.resp[0] & R1_ERROR) {
1256 			pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
1257 			       req->rq_disk->disk_name, __func__,
1258 			       brq->stop.resp[0]);
1259 			gen_err = 1;
1260 		}
1261 
1262 		err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req,
1263 					&gen_err);
1264 		if (err)
1265 			return MMC_BLK_CMD_ERR;
1266 	}
1267 
1268 	/* if general error occurs, retry the write operation. */
1269 	if (gen_err) {
1270 		pr_warn("%s: retrying write for general error\n",
1271 				req->rq_disk->disk_name);
1272 		return MMC_BLK_RETRY;
1273 	}
1274 
1275 	if (brq->data.error) {
1276 		pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
1277 		       req->rq_disk->disk_name, brq->data.error,
1278 		       (unsigned)blk_rq_pos(req),
1279 		       (unsigned)blk_rq_sectors(req),
1280 		       brq->cmd.resp[0], brq->stop.resp[0]);
1281 
1282 		if (rq_data_dir(req) == READ) {
1283 			if (ecc_err)
1284 				return MMC_BLK_ECC_ERR;
1285 			return MMC_BLK_DATA_ERR;
1286 		} else {
1287 			return MMC_BLK_CMD_ERR;
1288 		}
1289 	}
1290 
1291 	if (!brq->data.bytes_xfered)
1292 		return MMC_BLK_RETRY;
1293 
1294 	if (mmc_packed_cmd(mq_mrq->cmd_type)) {
1295 		if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
1296 			return MMC_BLK_PARTIAL;
1297 		else
1298 			return MMC_BLK_SUCCESS;
1299 	}
1300 
1301 	if (blk_rq_bytes(req) != brq->data.bytes_xfered)
1302 		return MMC_BLK_PARTIAL;
1303 
1304 	return MMC_BLK_SUCCESS;
1305 }
1306 
mmc_blk_packed_err_check(struct mmc_card * card,struct mmc_async_req * areq)1307 static int mmc_blk_packed_err_check(struct mmc_card *card,
1308 				    struct mmc_async_req *areq)
1309 {
1310 	struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
1311 			mmc_active);
1312 	struct request *req = mq_rq->req;
1313 	struct mmc_packed *packed = mq_rq->packed;
1314 	int err, check, status;
1315 	u8 *ext_csd;
1316 
1317 	BUG_ON(!packed);
1318 
1319 	packed->retries--;
1320 	check = mmc_blk_err_check(card, areq);
1321 	err = get_card_status(card, &status, 0);
1322 	if (err) {
1323 		pr_err("%s: error %d sending status command\n",
1324 		       req->rq_disk->disk_name, err);
1325 		return MMC_BLK_ABORT;
1326 	}
1327 
1328 	if (status & R1_EXCEPTION_EVENT) {
1329 		err = mmc_get_ext_csd(card, &ext_csd);
1330 		if (err) {
1331 			pr_err("%s: error %d sending ext_csd\n",
1332 			       req->rq_disk->disk_name, err);
1333 			return MMC_BLK_ABORT;
1334 		}
1335 
1336 		if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
1337 		     EXT_CSD_PACKED_FAILURE) &&
1338 		    (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1339 		     EXT_CSD_PACKED_GENERIC_ERROR)) {
1340 			if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1341 			    EXT_CSD_PACKED_INDEXED_ERROR) {
1342 				packed->idx_failure =
1343 				  ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
1344 				check = MMC_BLK_PARTIAL;
1345 			}
1346 			pr_err("%s: packed cmd failed, nr %u, sectors %u, "
1347 			       "failure index: %d\n",
1348 			       req->rq_disk->disk_name, packed->nr_entries,
1349 			       packed->blocks, packed->idx_failure);
1350 		}
1351 		kfree(ext_csd);
1352 	}
1353 
1354 	return check;
1355 }
1356 
mmc_blk_rw_rq_prep(struct mmc_queue_req * mqrq,struct mmc_card * card,int disable_multi,struct mmc_queue * mq)1357 static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1358 			       struct mmc_card *card,
1359 			       int disable_multi,
1360 			       struct mmc_queue *mq)
1361 {
1362 	u32 readcmd, writecmd;
1363 	struct mmc_blk_request *brq = &mqrq->brq;
1364 	struct request *req = mqrq->req;
1365 	struct mmc_blk_data *md = mq->data;
1366 	bool do_data_tag;
1367 
1368 	/*
1369 	 * Reliable writes are used to implement Forced Unit Access and
1370 	 * REQ_META accesses, and are supported only on MMCs.
1371 	 *
1372 	 * XXX: this really needs a good explanation of why REQ_META
1373 	 * is treated special.
1374 	 */
1375 	bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
1376 			  (req->cmd_flags & REQ_META)) &&
1377 		(rq_data_dir(req) == WRITE) &&
1378 		(md->flags & MMC_BLK_REL_WR);
1379 
1380 	memset(brq, 0, sizeof(struct mmc_blk_request));
1381 	brq->mrq.cmd = &brq->cmd;
1382 	brq->mrq.data = &brq->data;
1383 
1384 	brq->cmd.arg = blk_rq_pos(req);
1385 	if (!mmc_card_blockaddr(card))
1386 		brq->cmd.arg <<= 9;
1387 	brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1388 	brq->data.blksz = 512;
1389 	brq->stop.opcode = MMC_STOP_TRANSMISSION;
1390 	brq->stop.arg = 0;
1391 	brq->data.blocks = blk_rq_sectors(req);
1392 
1393 	/*
1394 	 * The block layer doesn't support all sector count
1395 	 * restrictions, so we need to be prepared for too big
1396 	 * requests.
1397 	 */
1398 	if (brq->data.blocks > card->host->max_blk_count)
1399 		brq->data.blocks = card->host->max_blk_count;
1400 
1401 	if (brq->data.blocks > 1) {
1402 		/*
1403 		 * After a read error, we redo the request one sector
1404 		 * at a time in order to accurately determine which
1405 		 * sectors can be read successfully.
1406 		 */
1407 		if (disable_multi)
1408 			brq->data.blocks = 1;
1409 
1410 		/*
1411 		 * Some controllers have HW issues while operating
1412 		 * in multiple I/O mode
1413 		 */
1414 		if (card->host->ops->multi_io_quirk)
1415 			brq->data.blocks = card->host->ops->multi_io_quirk(card,
1416 						(rq_data_dir(req) == READ) ?
1417 						MMC_DATA_READ : MMC_DATA_WRITE,
1418 						brq->data.blocks);
1419 	}
1420 
1421 	if (brq->data.blocks > 1 || do_rel_wr) {
1422 		/* SPI multiblock writes terminate using a special
1423 		 * token, not a STOP_TRANSMISSION request.
1424 		 */
1425 		if (!mmc_host_is_spi(card->host) ||
1426 		    rq_data_dir(req) == READ)
1427 			brq->mrq.stop = &brq->stop;
1428 		readcmd = MMC_READ_MULTIPLE_BLOCK;
1429 		writecmd = MMC_WRITE_MULTIPLE_BLOCK;
1430 	} else {
1431 		brq->mrq.stop = NULL;
1432 		readcmd = MMC_READ_SINGLE_BLOCK;
1433 		writecmd = MMC_WRITE_BLOCK;
1434 	}
1435 	if (rq_data_dir(req) == READ) {
1436 		brq->cmd.opcode = readcmd;
1437 		brq->data.flags |= MMC_DATA_READ;
1438 		if (brq->mrq.stop)
1439 			brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 |
1440 					MMC_CMD_AC;
1441 	} else {
1442 		brq->cmd.opcode = writecmd;
1443 		brq->data.flags |= MMC_DATA_WRITE;
1444 		if (brq->mrq.stop)
1445 			brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B |
1446 					MMC_CMD_AC;
1447 	}
1448 
1449 	if (do_rel_wr)
1450 		mmc_apply_rel_rw(brq, card, req);
1451 
1452 	/*
1453 	 * Data tag is used only during writing meta data to speed
1454 	 * up write and any subsequent read of this meta data
1455 	 */
1456 	do_data_tag = (card->ext_csd.data_tag_unit_size) &&
1457 		(req->cmd_flags & REQ_META) &&
1458 		(rq_data_dir(req) == WRITE) &&
1459 		((brq->data.blocks * brq->data.blksz) >=
1460 		 card->ext_csd.data_tag_unit_size);
1461 
1462 	/*
1463 	 * Pre-defined multi-block transfers are preferable to
1464 	 * open ended-ones (and necessary for reliable writes).
1465 	 * However, it is not sufficient to just send CMD23,
1466 	 * and avoid the final CMD12, as on an error condition
1467 	 * CMD12 (stop) needs to be sent anyway. This, coupled
1468 	 * with Auto-CMD23 enhancements provided by some
1469 	 * hosts, means that the complexity of dealing
1470 	 * with this is best left to the host. If CMD23 is
1471 	 * supported by card and host, we'll fill sbc in and let
1472 	 * the host deal with handling it correctly. This means
1473 	 * that for hosts that don't expose MMC_CAP_CMD23, no
1474 	 * change of behavior will be observed.
1475 	 *
1476 	 * N.B: Some MMC cards experience perf degradation.
1477 	 * We'll avoid using CMD23-bounded multiblock writes for
1478 	 * these, while retaining features like reliable writes.
1479 	 */
1480 	if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
1481 	    (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
1482 	     do_data_tag)) {
1483 		brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1484 		brq->sbc.arg = brq->data.blocks |
1485 			(do_rel_wr ? (1 << 31) : 0) |
1486 			(do_data_tag ? (1 << 29) : 0);
1487 		brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1488 		brq->mrq.sbc = &brq->sbc;
1489 	}
1490 
1491 	mmc_set_data_timeout(&brq->data, card);
1492 
1493 	brq->data.sg = mqrq->sg;
1494 	brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1495 
1496 	/*
1497 	 * Adjust the sg list so it is the same size as the
1498 	 * request.
1499 	 */
1500 	if (brq->data.blocks != blk_rq_sectors(req)) {
1501 		int i, data_size = brq->data.blocks << 9;
1502 		struct scatterlist *sg;
1503 
1504 		for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
1505 			data_size -= sg->length;
1506 			if (data_size <= 0) {
1507 				sg->length += data_size;
1508 				i++;
1509 				break;
1510 			}
1511 		}
1512 		brq->data.sg_len = i;
1513 	}
1514 
1515 	mqrq->mmc_active.mrq = &brq->mrq;
1516 	mqrq->mmc_active.err_check = mmc_blk_err_check;
1517 
1518 	mmc_queue_bounce_pre(mqrq);
1519 }
1520 
mmc_calc_packed_hdr_segs(struct request_queue * q,struct mmc_card * card)1521 static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q,
1522 					  struct mmc_card *card)
1523 {
1524 	unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512;
1525 	unsigned int max_seg_sz = queue_max_segment_size(q);
1526 	unsigned int len, nr_segs = 0;
1527 
1528 	do {
1529 		len = min(hdr_sz, max_seg_sz);
1530 		hdr_sz -= len;
1531 		nr_segs++;
1532 	} while (hdr_sz);
1533 
1534 	return nr_segs;
1535 }
1536 
mmc_blk_prep_packed_list(struct mmc_queue * mq,struct request * req)1537 static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
1538 {
1539 	struct request_queue *q = mq->queue;
1540 	struct mmc_card *card = mq->card;
1541 	struct request *cur = req, *next = NULL;
1542 	struct mmc_blk_data *md = mq->data;
1543 	struct mmc_queue_req *mqrq = mq->mqrq_cur;
1544 	bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
1545 	unsigned int req_sectors = 0, phys_segments = 0;
1546 	unsigned int max_blk_count, max_phys_segs;
1547 	bool put_back = true;
1548 	u8 max_packed_rw = 0;
1549 	u8 reqs = 0;
1550 
1551 	if (!(md->flags & MMC_BLK_PACKED_CMD))
1552 		goto no_packed;
1553 
1554 	if ((rq_data_dir(cur) == WRITE) &&
1555 	    mmc_host_packed_wr(card->host))
1556 		max_packed_rw = card->ext_csd.max_packed_writes;
1557 
1558 	if (max_packed_rw == 0)
1559 		goto no_packed;
1560 
1561 	if (mmc_req_rel_wr(cur) &&
1562 	    (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
1563 		goto no_packed;
1564 
1565 	if (mmc_large_sector(card) &&
1566 	    !IS_ALIGNED(blk_rq_sectors(cur), 8))
1567 		goto no_packed;
1568 
1569 	mmc_blk_clear_packed(mqrq);
1570 
1571 	max_blk_count = min(card->host->max_blk_count,
1572 			    card->host->max_req_size >> 9);
1573 	if (unlikely(max_blk_count > 0xffff))
1574 		max_blk_count = 0xffff;
1575 
1576 	max_phys_segs = queue_max_segments(q);
1577 	req_sectors += blk_rq_sectors(cur);
1578 	phys_segments += cur->nr_phys_segments;
1579 
1580 	if (rq_data_dir(cur) == WRITE) {
1581 		req_sectors += mmc_large_sector(card) ? 8 : 1;
1582 		phys_segments += mmc_calc_packed_hdr_segs(q, card);
1583 	}
1584 
1585 	do {
1586 		if (reqs >= max_packed_rw - 1) {
1587 			put_back = false;
1588 			break;
1589 		}
1590 
1591 		spin_lock_irq(q->queue_lock);
1592 		next = blk_fetch_request(q);
1593 		spin_unlock_irq(q->queue_lock);
1594 		if (!next) {
1595 			put_back = false;
1596 			break;
1597 		}
1598 
1599 		if (mmc_large_sector(card) &&
1600 		    !IS_ALIGNED(blk_rq_sectors(next), 8))
1601 			break;
1602 
1603 		if (next->cmd_flags & REQ_DISCARD ||
1604 		    next->cmd_flags & REQ_FLUSH)
1605 			break;
1606 
1607 		if (rq_data_dir(cur) != rq_data_dir(next))
1608 			break;
1609 
1610 		if (mmc_req_rel_wr(next) &&
1611 		    (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
1612 			break;
1613 
1614 		req_sectors += blk_rq_sectors(next);
1615 		if (req_sectors > max_blk_count)
1616 			break;
1617 
1618 		phys_segments +=  next->nr_phys_segments;
1619 		if (phys_segments > max_phys_segs)
1620 			break;
1621 
1622 		list_add_tail(&next->queuelist, &mqrq->packed->list);
1623 		cur = next;
1624 		reqs++;
1625 	} while (1);
1626 
1627 	if (put_back) {
1628 		spin_lock_irq(q->queue_lock);
1629 		blk_requeue_request(q, next);
1630 		spin_unlock_irq(q->queue_lock);
1631 	}
1632 
1633 	if (reqs > 0) {
1634 		list_add(&req->queuelist, &mqrq->packed->list);
1635 		mqrq->packed->nr_entries = ++reqs;
1636 		mqrq->packed->retries = reqs;
1637 		return reqs;
1638 	}
1639 
1640 no_packed:
1641 	mqrq->cmd_type = MMC_PACKED_NONE;
1642 	return 0;
1643 }
1644 
mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req * mqrq,struct mmc_card * card,struct mmc_queue * mq)1645 static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
1646 					struct mmc_card *card,
1647 					struct mmc_queue *mq)
1648 {
1649 	struct mmc_blk_request *brq = &mqrq->brq;
1650 	struct request *req = mqrq->req;
1651 	struct request *prq;
1652 	struct mmc_blk_data *md = mq->data;
1653 	struct mmc_packed *packed = mqrq->packed;
1654 	bool do_rel_wr, do_data_tag;
1655 	u32 *packed_cmd_hdr;
1656 	u8 hdr_blocks;
1657 	u8 i = 1;
1658 
1659 	BUG_ON(!packed);
1660 
1661 	mqrq->cmd_type = MMC_PACKED_WRITE;
1662 	packed->blocks = 0;
1663 	packed->idx_failure = MMC_PACKED_NR_IDX;
1664 
1665 	packed_cmd_hdr = packed->cmd_hdr;
1666 	memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
1667 	packed_cmd_hdr[0] = (packed->nr_entries << 16) |
1668 		(PACKED_CMD_WR << 8) | PACKED_CMD_VER;
1669 	hdr_blocks = mmc_large_sector(card) ? 8 : 1;
1670 
1671 	/*
1672 	 * Argument for each entry of packed group
1673 	 */
1674 	list_for_each_entry(prq, &packed->list, queuelist) {
1675 		do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
1676 		do_data_tag = (card->ext_csd.data_tag_unit_size) &&
1677 			(prq->cmd_flags & REQ_META) &&
1678 			(rq_data_dir(prq) == WRITE) &&
1679 			((brq->data.blocks * brq->data.blksz) >=
1680 			 card->ext_csd.data_tag_unit_size);
1681 		/* Argument of CMD23 */
1682 		packed_cmd_hdr[(i * 2)] =
1683 			(do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
1684 			(do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
1685 			blk_rq_sectors(prq);
1686 		/* Argument of CMD18 or CMD25 */
1687 		packed_cmd_hdr[((i * 2)) + 1] =
1688 			mmc_card_blockaddr(card) ?
1689 			blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
1690 		packed->blocks += blk_rq_sectors(prq);
1691 		i++;
1692 	}
1693 
1694 	memset(brq, 0, sizeof(struct mmc_blk_request));
1695 	brq->mrq.cmd = &brq->cmd;
1696 	brq->mrq.data = &brq->data;
1697 	brq->mrq.sbc = &brq->sbc;
1698 	brq->mrq.stop = &brq->stop;
1699 
1700 	brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1701 	brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks);
1702 	brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1703 
1704 	brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
1705 	brq->cmd.arg = blk_rq_pos(req);
1706 	if (!mmc_card_blockaddr(card))
1707 		brq->cmd.arg <<= 9;
1708 	brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1709 
1710 	brq->data.blksz = 512;
1711 	brq->data.blocks = packed->blocks + hdr_blocks;
1712 	brq->data.flags |= MMC_DATA_WRITE;
1713 
1714 	brq->stop.opcode = MMC_STOP_TRANSMISSION;
1715 	brq->stop.arg = 0;
1716 	brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1717 
1718 	mmc_set_data_timeout(&brq->data, card);
1719 
1720 	brq->data.sg = mqrq->sg;
1721 	brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1722 
1723 	mqrq->mmc_active.mrq = &brq->mrq;
1724 	mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
1725 
1726 	mmc_queue_bounce_pre(mqrq);
1727 }
1728 
mmc_blk_cmd_err(struct mmc_blk_data * md,struct mmc_card * card,struct mmc_blk_request * brq,struct request * req,int ret)1729 static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
1730 			   struct mmc_blk_request *brq, struct request *req,
1731 			   int ret)
1732 {
1733 	struct mmc_queue_req *mq_rq;
1734 	mq_rq = container_of(brq, struct mmc_queue_req, brq);
1735 
1736 	/*
1737 	 * If this is an SD card and we're writing, we can first
1738 	 * mark the known good sectors as ok.
1739 	 *
1740 	 * If the card is not SD, we can still ok written sectors
1741 	 * as reported by the controller (which might be less than
1742 	 * the real number of written sectors, but never more).
1743 	 */
1744 	if (mmc_card_sd(card)) {
1745 		u32 blocks;
1746 
1747 		blocks = mmc_sd_num_wr_blocks(card);
1748 		if (blocks != (u32)-1) {
1749 			ret = blk_end_request(req, 0, blocks << 9);
1750 		}
1751 	} else {
1752 		if (!mmc_packed_cmd(mq_rq->cmd_type))
1753 			ret = blk_end_request(req, 0, brq->data.bytes_xfered);
1754 	}
1755 	return ret;
1756 }
1757 
mmc_blk_end_packed_req(struct mmc_queue_req * mq_rq)1758 static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq)
1759 {
1760 	struct request *prq;
1761 	struct mmc_packed *packed = mq_rq->packed;
1762 	int idx = packed->idx_failure, i = 0;
1763 	int ret = 0;
1764 
1765 	BUG_ON(!packed);
1766 
1767 	while (!list_empty(&packed->list)) {
1768 		prq = list_entry_rq(packed->list.next);
1769 		if (idx == i) {
1770 			/* retry from error index */
1771 			packed->nr_entries -= idx;
1772 			mq_rq->req = prq;
1773 			ret = 1;
1774 
1775 			if (packed->nr_entries == MMC_PACKED_NR_SINGLE) {
1776 				list_del_init(&prq->queuelist);
1777 				mmc_blk_clear_packed(mq_rq);
1778 			}
1779 			return ret;
1780 		}
1781 		list_del_init(&prq->queuelist);
1782 		blk_end_request(prq, 0, blk_rq_bytes(prq));
1783 		i++;
1784 	}
1785 
1786 	mmc_blk_clear_packed(mq_rq);
1787 	return ret;
1788 }
1789 
mmc_blk_abort_packed_req(struct mmc_queue_req * mq_rq)1790 static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq)
1791 {
1792 	struct request *prq;
1793 	struct mmc_packed *packed = mq_rq->packed;
1794 
1795 	BUG_ON(!packed);
1796 
1797 	while (!list_empty(&packed->list)) {
1798 		prq = list_entry_rq(packed->list.next);
1799 		list_del_init(&prq->queuelist);
1800 		blk_end_request(prq, -EIO, blk_rq_bytes(prq));
1801 	}
1802 
1803 	mmc_blk_clear_packed(mq_rq);
1804 }
1805 
mmc_blk_revert_packed_req(struct mmc_queue * mq,struct mmc_queue_req * mq_rq)1806 static void mmc_blk_revert_packed_req(struct mmc_queue *mq,
1807 				      struct mmc_queue_req *mq_rq)
1808 {
1809 	struct request *prq;
1810 	struct request_queue *q = mq->queue;
1811 	struct mmc_packed *packed = mq_rq->packed;
1812 
1813 	BUG_ON(!packed);
1814 
1815 	while (!list_empty(&packed->list)) {
1816 		prq = list_entry_rq(packed->list.prev);
1817 		if (prq->queuelist.prev != &packed->list) {
1818 			list_del_init(&prq->queuelist);
1819 			spin_lock_irq(q->queue_lock);
1820 			blk_requeue_request(mq->queue, prq);
1821 			spin_unlock_irq(q->queue_lock);
1822 		} else {
1823 			list_del_init(&prq->queuelist);
1824 		}
1825 	}
1826 
1827 	mmc_blk_clear_packed(mq_rq);
1828 }
1829 
mmc_blk_issue_rw_rq(struct mmc_queue * mq,struct request * rqc)1830 static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1831 {
1832 	struct mmc_blk_data *md = mq->data;
1833 	struct mmc_card *card = md->queue.card;
1834 	struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
1835 	int ret = 1, disable_multi = 0, retry = 0, type;
1836 	enum mmc_blk_status status;
1837 	struct mmc_queue_req *mq_rq;
1838 	struct request *req = rqc;
1839 	struct mmc_async_req *areq;
1840 	const u8 packed_nr = 2;
1841 	u8 reqs = 0;
1842 
1843 	if (!rqc && !mq->mqrq_prev->req)
1844 		return 0;
1845 
1846 	if (rqc)
1847 		reqs = mmc_blk_prep_packed_list(mq, rqc);
1848 
1849 	do {
1850 		if (rqc) {
1851 			/*
1852 			 * When 4KB native sector is enabled, only 8 blocks
1853 			 * multiple read or write is allowed
1854 			 */
1855 			if ((brq->data.blocks & 0x07) &&
1856 			    (card->ext_csd.data_sector_size == 4096)) {
1857 				pr_err("%s: Transfer size is not 4KB sector size aligned\n",
1858 					req->rq_disk->disk_name);
1859 				mq_rq = mq->mqrq_cur;
1860 				goto cmd_abort;
1861 			}
1862 
1863 			if (reqs >= packed_nr)
1864 				mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur,
1865 							    card, mq);
1866 			else
1867 				mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
1868 			areq = &mq->mqrq_cur->mmc_active;
1869 		} else
1870 			areq = NULL;
1871 		areq = mmc_start_req(card->host, areq, (int *) &status);
1872 		if (!areq) {
1873 			if (status == MMC_BLK_NEW_REQUEST)
1874 				mq->flags |= MMC_QUEUE_NEW_REQUEST;
1875 			return 0;
1876 		}
1877 
1878 		mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
1879 		brq = &mq_rq->brq;
1880 		req = mq_rq->req;
1881 		type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
1882 		mmc_queue_bounce_post(mq_rq);
1883 
1884 		switch (status) {
1885 		case MMC_BLK_SUCCESS:
1886 		case MMC_BLK_PARTIAL:
1887 			/*
1888 			 * A block was successfully transferred.
1889 			 */
1890 			mmc_blk_reset_success(md, type);
1891 
1892 			if (mmc_packed_cmd(mq_rq->cmd_type)) {
1893 				ret = mmc_blk_end_packed_req(mq_rq);
1894 				break;
1895 			} else {
1896 				ret = blk_end_request(req, 0,
1897 						brq->data.bytes_xfered);
1898 			}
1899 
1900 			/*
1901 			 * If the blk_end_request function returns non-zero even
1902 			 * though all data has been transferred and no errors
1903 			 * were returned by the host controller, it's a bug.
1904 			 */
1905 			if (status == MMC_BLK_SUCCESS && ret) {
1906 				pr_err("%s BUG rq_tot %d d_xfer %d\n",
1907 				       __func__, blk_rq_bytes(req),
1908 				       brq->data.bytes_xfered);
1909 				rqc = NULL;
1910 				goto cmd_abort;
1911 			}
1912 			break;
1913 		case MMC_BLK_CMD_ERR:
1914 			ret = mmc_blk_cmd_err(md, card, brq, req, ret);
1915 			if (mmc_blk_reset(md, card->host, type))
1916 				goto cmd_abort;
1917 			if (!ret)
1918 				goto start_new_req;
1919 			break;
1920 		case MMC_BLK_RETRY:
1921 			if (retry++ < 5)
1922 				break;
1923 			/* Fall through */
1924 		case MMC_BLK_ABORT:
1925 			if (!mmc_blk_reset(md, card->host, type))
1926 				break;
1927 			goto cmd_abort;
1928 		case MMC_BLK_DATA_ERR: {
1929 			int err;
1930 
1931 			err = mmc_blk_reset(md, card->host, type);
1932 			if (!err)
1933 				break;
1934 			if (err == -ENODEV ||
1935 				mmc_packed_cmd(mq_rq->cmd_type))
1936 				goto cmd_abort;
1937 			/* Fall through */
1938 		}
1939 		case MMC_BLK_ECC_ERR:
1940 			if (brq->data.blocks > 1) {
1941 				/* Redo read one sector at a time */
1942 				pr_warn("%s: retrying using single block read\n",
1943 					req->rq_disk->disk_name);
1944 				disable_multi = 1;
1945 				break;
1946 			}
1947 			/*
1948 			 * After an error, we redo I/O one sector at a
1949 			 * time, so we only reach here after trying to
1950 			 * read a single sector.
1951 			 */
1952 			ret = blk_end_request(req, -EIO,
1953 						brq->data.blksz);
1954 			if (!ret)
1955 				goto start_new_req;
1956 			break;
1957 		case MMC_BLK_NOMEDIUM:
1958 			goto cmd_abort;
1959 		default:
1960 			pr_err("%s: Unhandled return value (%d)",
1961 					req->rq_disk->disk_name, status);
1962 			goto cmd_abort;
1963 		}
1964 
1965 		if (ret) {
1966 			if (mmc_packed_cmd(mq_rq->cmd_type)) {
1967 				if (!mq_rq->packed->retries)
1968 					goto cmd_abort;
1969 				mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
1970 				mmc_start_req(card->host,
1971 					      &mq_rq->mmc_active, NULL);
1972 			} else {
1973 
1974 				/*
1975 				 * In case of a incomplete request
1976 				 * prepare it again and resend.
1977 				 */
1978 				mmc_blk_rw_rq_prep(mq_rq, card,
1979 						disable_multi, mq);
1980 				mmc_start_req(card->host,
1981 						&mq_rq->mmc_active, NULL);
1982 			}
1983 		}
1984 	} while (ret);
1985 
1986 	return 1;
1987 
1988  cmd_abort:
1989 	if (mmc_packed_cmd(mq_rq->cmd_type)) {
1990 		mmc_blk_abort_packed_req(mq_rq);
1991 	} else {
1992 		if (mmc_card_removed(card))
1993 			req->cmd_flags |= REQ_QUIET;
1994 		while (ret)
1995 			ret = blk_end_request(req, -EIO,
1996 					blk_rq_cur_bytes(req));
1997 	}
1998 
1999  start_new_req:
2000 	if (rqc) {
2001 		if (mmc_card_removed(card)) {
2002 			rqc->cmd_flags |= REQ_QUIET;
2003 			blk_end_request_all(rqc, -EIO);
2004 		} else {
2005 			/*
2006 			 * If current request is packed, it needs to put back.
2007 			 */
2008 			if (mmc_packed_cmd(mq->mqrq_cur->cmd_type))
2009 				mmc_blk_revert_packed_req(mq, mq->mqrq_cur);
2010 
2011 			mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
2012 			mmc_start_req(card->host,
2013 				      &mq->mqrq_cur->mmc_active, NULL);
2014 		}
2015 	}
2016 
2017 	return 0;
2018 }
2019 
mmc_blk_issue_rq(struct mmc_queue * mq,struct request * req)2020 static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
2021 {
2022 	int ret;
2023 	struct mmc_blk_data *md = mq->data;
2024 	struct mmc_card *card = md->queue.card;
2025 	struct mmc_host *host = card->host;
2026 	unsigned long flags;
2027 	unsigned int cmd_flags = req ? req->cmd_flags : 0;
2028 
2029 	if (req && !mq->mqrq_prev->req)
2030 		/* claim host only for the first request */
2031 		mmc_get_card(card);
2032 
2033 	ret = mmc_blk_part_switch(card, md);
2034 	if (ret) {
2035 		if (req) {
2036 			blk_end_request_all(req, -EIO);
2037 		}
2038 		ret = 0;
2039 		goto out;
2040 	}
2041 
2042 	mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
2043 	if (cmd_flags & REQ_DISCARD) {
2044 		/* complete ongoing async transfer before issuing discard */
2045 		if (card->host->areq)
2046 			mmc_blk_issue_rw_rq(mq, NULL);
2047 		if (req->cmd_flags & REQ_SECURE)
2048 			ret = mmc_blk_issue_secdiscard_rq(mq, req);
2049 		else
2050 			ret = mmc_blk_issue_discard_rq(mq, req);
2051 	} else if (cmd_flags & REQ_FLUSH) {
2052 		/* complete ongoing async transfer before issuing flush */
2053 		if (card->host->areq)
2054 			mmc_blk_issue_rw_rq(mq, NULL);
2055 		ret = mmc_blk_issue_flush(mq, req);
2056 	} else {
2057 		if (!req && host->areq) {
2058 			spin_lock_irqsave(&host->context_info.lock, flags);
2059 			host->context_info.is_waiting_last_req = true;
2060 			spin_unlock_irqrestore(&host->context_info.lock, flags);
2061 		}
2062 		ret = mmc_blk_issue_rw_rq(mq, req);
2063 	}
2064 
2065 out:
2066 	if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
2067 	     (cmd_flags & MMC_REQ_SPECIAL_MASK))
2068 		/*
2069 		 * Release host when there are no more requests
2070 		 * and after special request(discard, flush) is done.
2071 		 * In case sepecial request, there is no reentry to
2072 		 * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'.
2073 		 */
2074 		mmc_put_card(card);
2075 	return ret;
2076 }
2077 
mmc_blk_readonly(struct mmc_card * card)2078 static inline int mmc_blk_readonly(struct mmc_card *card)
2079 {
2080 	return mmc_card_readonly(card) ||
2081 	       !(card->csd.cmdclass & CCC_BLOCK_WRITE);
2082 }
2083 
mmc_blk_alloc_req(struct mmc_card * card,struct device * parent,sector_t size,bool default_ro,const char * subname,int area_type)2084 static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
2085 					      struct device *parent,
2086 					      sector_t size,
2087 					      bool default_ro,
2088 					      const char *subname,
2089 					      int area_type)
2090 {
2091 	struct mmc_blk_data *md;
2092 	int devidx, ret;
2093 
2094 	devidx = find_first_zero_bit(dev_use, max_devices);
2095 	if (devidx >= max_devices)
2096 		return ERR_PTR(-ENOSPC);
2097 	__set_bit(devidx, dev_use);
2098 
2099 	md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
2100 	if (!md) {
2101 		ret = -ENOMEM;
2102 		goto out;
2103 	}
2104 
2105 	/*
2106 	 * !subname implies we are creating main mmc_blk_data that will be
2107 	 * associated with mmc_card with dev_set_drvdata. Due to device
2108 	 * partitions, devidx will not coincide with a per-physical card
2109 	 * index anymore so we keep track of a name index.
2110 	 */
2111 	if (!subname) {
2112 		md->name_idx = find_first_zero_bit(name_use, max_devices);
2113 		__set_bit(md->name_idx, name_use);
2114 	} else
2115 		md->name_idx = ((struct mmc_blk_data *)
2116 				dev_to_disk(parent)->private_data)->name_idx;
2117 
2118 	md->area_type = area_type;
2119 
2120 	/*
2121 	 * Set the read-only status based on the supported commands
2122 	 * and the write protect switch.
2123 	 */
2124 	md->read_only = mmc_blk_readonly(card);
2125 
2126 	md->disk = alloc_disk(perdev_minors);
2127 	if (md->disk == NULL) {
2128 		ret = -ENOMEM;
2129 		goto err_kfree;
2130 	}
2131 
2132 	spin_lock_init(&md->lock);
2133 	INIT_LIST_HEAD(&md->part);
2134 	md->usage = 1;
2135 
2136 	ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
2137 	if (ret)
2138 		goto err_putdisk;
2139 
2140 	md->queue.issue_fn = mmc_blk_issue_rq;
2141 	md->queue.data = md;
2142 
2143 	md->disk->major	= MMC_BLOCK_MAJOR;
2144 	md->disk->first_minor = devidx * perdev_minors;
2145 	md->disk->fops = &mmc_bdops;
2146 	md->disk->private_data = md;
2147 	md->disk->queue = md->queue.queue;
2148 	md->disk->driverfs_dev = parent;
2149 	set_disk_ro(md->disk, md->read_only || default_ro);
2150 	if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT))
2151 		md->disk->flags |= GENHD_FL_NO_PART_SCAN;
2152 
2153 	/*
2154 	 * As discussed on lkml, GENHD_FL_REMOVABLE should:
2155 	 *
2156 	 * - be set for removable media with permanent block devices
2157 	 * - be unset for removable block devices with permanent media
2158 	 *
2159 	 * Since MMC block devices clearly fall under the second
2160 	 * case, we do not set GENHD_FL_REMOVABLE.  Userspace
2161 	 * should use the block device creation/destruction hotplug
2162 	 * messages to tell when the card is present.
2163 	 */
2164 
2165 	snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
2166 		 "mmcblk%u%s", md->name_idx, subname ? subname : "");
2167 
2168 	if (mmc_card_mmc(card))
2169 		blk_queue_logical_block_size(md->queue.queue,
2170 					     card->ext_csd.data_sector_size);
2171 	else
2172 		blk_queue_logical_block_size(md->queue.queue, 512);
2173 
2174 	set_capacity(md->disk, size);
2175 
2176 	if (mmc_host_cmd23(card->host)) {
2177 		if (mmc_card_mmc(card) ||
2178 		    (mmc_card_sd(card) &&
2179 		     card->scr.cmds & SD_SCR_CMD23_SUPPORT))
2180 			md->flags |= MMC_BLK_CMD23;
2181 	}
2182 
2183 	if (mmc_card_mmc(card) &&
2184 	    md->flags & MMC_BLK_CMD23 &&
2185 	    ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
2186 	     card->ext_csd.rel_sectors)) {
2187 		md->flags |= MMC_BLK_REL_WR;
2188 		blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
2189 	}
2190 
2191 	if (mmc_card_mmc(card) &&
2192 	    (area_type == MMC_BLK_DATA_AREA_MAIN) &&
2193 	    (md->flags & MMC_BLK_CMD23) &&
2194 	    card->ext_csd.packed_event_en) {
2195 		if (!mmc_packed_init(&md->queue, card))
2196 			md->flags |= MMC_BLK_PACKED_CMD;
2197 	}
2198 
2199 	return md;
2200 
2201  err_putdisk:
2202 	put_disk(md->disk);
2203  err_kfree:
2204 	kfree(md);
2205  out:
2206 	return ERR_PTR(ret);
2207 }
2208 
mmc_blk_alloc(struct mmc_card * card)2209 static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
2210 {
2211 	sector_t size;
2212 
2213 	if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
2214 		/*
2215 		 * The EXT_CSD sector count is in number or 512 byte
2216 		 * sectors.
2217 		 */
2218 		size = card->ext_csd.sectors;
2219 	} else {
2220 		/*
2221 		 * The CSD capacity field is in units of read_blkbits.
2222 		 * set_capacity takes units of 512 bytes.
2223 		 */
2224 		size = card->csd.capacity << (card->csd.read_blkbits - 9);
2225 	}
2226 
2227 	return mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
2228 					MMC_BLK_DATA_AREA_MAIN);
2229 }
2230 
mmc_blk_alloc_part(struct mmc_card * card,struct mmc_blk_data * md,unsigned int part_type,sector_t size,bool default_ro,const char * subname,int area_type)2231 static int mmc_blk_alloc_part(struct mmc_card *card,
2232 			      struct mmc_blk_data *md,
2233 			      unsigned int part_type,
2234 			      sector_t size,
2235 			      bool default_ro,
2236 			      const char *subname,
2237 			      int area_type)
2238 {
2239 	char cap_str[10];
2240 	struct mmc_blk_data *part_md;
2241 
2242 	part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
2243 				    subname, area_type);
2244 	if (IS_ERR(part_md))
2245 		return PTR_ERR(part_md);
2246 	part_md->part_type = part_type;
2247 	list_add(&part_md->part, &md->part);
2248 
2249 	string_get_size((u64)get_capacity(part_md->disk), 512, STRING_UNITS_2,
2250 			cap_str, sizeof(cap_str));
2251 	pr_info("%s: %s %s partition %u %s\n",
2252 	       part_md->disk->disk_name, mmc_card_id(card),
2253 	       mmc_card_name(card), part_md->part_type, cap_str);
2254 	return 0;
2255 }
2256 
2257 /* MMC Physical partitions consist of two boot partitions and
2258  * up to four general purpose partitions.
2259  * For each partition enabled in EXT_CSD a block device will be allocatedi
2260  * to provide access to the partition.
2261  */
2262 
mmc_blk_alloc_parts(struct mmc_card * card,struct mmc_blk_data * md)2263 static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
2264 {
2265 	int idx, ret = 0;
2266 
2267 	if (!mmc_card_mmc(card))
2268 		return 0;
2269 
2270 	for (idx = 0; idx < card->nr_parts; idx++) {
2271 		if (card->part[idx].size) {
2272 			ret = mmc_blk_alloc_part(card, md,
2273 				card->part[idx].part_cfg,
2274 				card->part[idx].size >> 9,
2275 				card->part[idx].force_ro,
2276 				card->part[idx].name,
2277 				card->part[idx].area_type);
2278 			if (ret)
2279 				return ret;
2280 		}
2281 	}
2282 
2283 	return ret;
2284 }
2285 
mmc_blk_remove_req(struct mmc_blk_data * md)2286 static void mmc_blk_remove_req(struct mmc_blk_data *md)
2287 {
2288 	struct mmc_card *card;
2289 
2290 	if (md) {
2291 		/*
2292 		 * Flush remaining requests and free queues. It
2293 		 * is freeing the queue that stops new requests
2294 		 * from being accepted.
2295 		 */
2296 		card = md->queue.card;
2297 		mmc_cleanup_queue(&md->queue);
2298 		if (md->flags & MMC_BLK_PACKED_CMD)
2299 			mmc_packed_clean(&md->queue);
2300 		if (md->disk->flags & GENHD_FL_UP) {
2301 			device_remove_file(disk_to_dev(md->disk), &md->force_ro);
2302 			if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2303 					card->ext_csd.boot_ro_lockable)
2304 				device_remove_file(disk_to_dev(md->disk),
2305 					&md->power_ro_lock);
2306 
2307 			del_gendisk(md->disk);
2308 		}
2309 		mmc_blk_put(md);
2310 	}
2311 }
2312 
mmc_blk_remove_parts(struct mmc_card * card,struct mmc_blk_data * md)2313 static void mmc_blk_remove_parts(struct mmc_card *card,
2314 				 struct mmc_blk_data *md)
2315 {
2316 	struct list_head *pos, *q;
2317 	struct mmc_blk_data *part_md;
2318 
2319 	__clear_bit(md->name_idx, name_use);
2320 	list_for_each_safe(pos, q, &md->part) {
2321 		part_md = list_entry(pos, struct mmc_blk_data, part);
2322 		list_del(pos);
2323 		mmc_blk_remove_req(part_md);
2324 	}
2325 }
2326 
mmc_add_disk(struct mmc_blk_data * md)2327 static int mmc_add_disk(struct mmc_blk_data *md)
2328 {
2329 	int ret;
2330 	struct mmc_card *card = md->queue.card;
2331 
2332 	add_disk(md->disk);
2333 	md->force_ro.show = force_ro_show;
2334 	md->force_ro.store = force_ro_store;
2335 	sysfs_attr_init(&md->force_ro.attr);
2336 	md->force_ro.attr.name = "force_ro";
2337 	md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
2338 	ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
2339 	if (ret)
2340 		goto force_ro_fail;
2341 
2342 	if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2343 	     card->ext_csd.boot_ro_lockable) {
2344 		umode_t mode;
2345 
2346 		if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
2347 			mode = S_IRUGO;
2348 		else
2349 			mode = S_IRUGO | S_IWUSR;
2350 
2351 		md->power_ro_lock.show = power_ro_lock_show;
2352 		md->power_ro_lock.store = power_ro_lock_store;
2353 		sysfs_attr_init(&md->power_ro_lock.attr);
2354 		md->power_ro_lock.attr.mode = mode;
2355 		md->power_ro_lock.attr.name =
2356 					"ro_lock_until_next_power_on";
2357 		ret = device_create_file(disk_to_dev(md->disk),
2358 				&md->power_ro_lock);
2359 		if (ret)
2360 			goto power_ro_lock_fail;
2361 	}
2362 	return ret;
2363 
2364 power_ro_lock_fail:
2365 	device_remove_file(disk_to_dev(md->disk), &md->force_ro);
2366 force_ro_fail:
2367 	del_gendisk(md->disk);
2368 
2369 	return ret;
2370 }
2371 
2372 #define CID_MANFID_SANDISK	0x2
2373 #define CID_MANFID_TOSHIBA	0x11
2374 #define CID_MANFID_MICRON	0x13
2375 #define CID_MANFID_SAMSUNG	0x15
2376 
2377 static const struct mmc_fixup blk_fixups[] =
2378 {
2379 	MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
2380 		  MMC_QUIRK_INAND_CMD38),
2381 	MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk,
2382 		  MMC_QUIRK_INAND_CMD38),
2383 	MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk,
2384 		  MMC_QUIRK_INAND_CMD38),
2385 	MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk,
2386 		  MMC_QUIRK_INAND_CMD38),
2387 	MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
2388 		  MMC_QUIRK_INAND_CMD38),
2389 
2390 	/*
2391 	 * Some MMC cards experience performance degradation with CMD23
2392 	 * instead of CMD12-bounded multiblock transfers. For now we'll
2393 	 * black list what's bad...
2394 	 * - Certain Toshiba cards.
2395 	 *
2396 	 * N.B. This doesn't affect SD cards.
2397 	 */
2398 	MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
2399 		  MMC_QUIRK_BLK_NO_CMD23),
2400 	MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
2401 		  MMC_QUIRK_BLK_NO_CMD23),
2402 	MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
2403 		  MMC_QUIRK_BLK_NO_CMD23),
2404 
2405 	/*
2406 	 * Some MMC cards need longer data read timeout than indicated in CSD.
2407 	 */
2408 	MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
2409 		  MMC_QUIRK_LONG_READ_TIME),
2410 	MMC_FIXUP("008GE0", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
2411 		  MMC_QUIRK_LONG_READ_TIME),
2412 
2413 	/*
2414 	 * On these Samsung MoviNAND parts, performing secure erase or
2415 	 * secure trim can result in unrecoverable corruption due to a
2416 	 * firmware bug.
2417 	 */
2418 	MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2419 		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2420 	MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2421 		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2422 	MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2423 		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2424 	MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2425 		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2426 	MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2427 		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2428 	MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2429 		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2430 	MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2431 		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2432 	MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2433 		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2434 
2435 	END_FIXUP
2436 };
2437 
mmc_blk_probe(struct mmc_card * card)2438 static int mmc_blk_probe(struct mmc_card *card)
2439 {
2440 	struct mmc_blk_data *md, *part_md;
2441 	char cap_str[10];
2442 
2443 	/*
2444 	 * Check that the card supports the command class(es) we need.
2445 	 */
2446 	if (!(card->csd.cmdclass & CCC_BLOCK_READ))
2447 		return -ENODEV;
2448 
2449 	mmc_fixup_device(card, blk_fixups);
2450 
2451 	md = mmc_blk_alloc(card);
2452 	if (IS_ERR(md))
2453 		return PTR_ERR(md);
2454 
2455 	string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2,
2456 			cap_str, sizeof(cap_str));
2457 	pr_info("%s: %s %s %s %s\n",
2458 		md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
2459 		cap_str, md->read_only ? "(ro)" : "");
2460 
2461 	if (mmc_blk_alloc_parts(card, md))
2462 		goto out;
2463 
2464 	dev_set_drvdata(&card->dev, md);
2465 
2466 	if (mmc_add_disk(md))
2467 		goto out;
2468 
2469 	list_for_each_entry(part_md, &md->part, part) {
2470 		if (mmc_add_disk(part_md))
2471 			goto out;
2472 	}
2473 
2474 	pm_runtime_set_autosuspend_delay(&card->dev, 3000);
2475 	pm_runtime_use_autosuspend(&card->dev);
2476 
2477 	/*
2478 	 * Don't enable runtime PM for SD-combo cards here. Leave that
2479 	 * decision to be taken during the SDIO init sequence instead.
2480 	 */
2481 	if (card->type != MMC_TYPE_SD_COMBO) {
2482 		pm_runtime_set_active(&card->dev);
2483 		pm_runtime_enable(&card->dev);
2484 	}
2485 
2486 	return 0;
2487 
2488  out:
2489 	mmc_blk_remove_parts(card, md);
2490 	mmc_blk_remove_req(md);
2491 	return 0;
2492 }
2493 
mmc_blk_remove(struct mmc_card * card)2494 static void mmc_blk_remove(struct mmc_card *card)
2495 {
2496 	struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
2497 
2498 	mmc_blk_remove_parts(card, md);
2499 	pm_runtime_get_sync(&card->dev);
2500 	mmc_claim_host(card->host);
2501 	mmc_blk_part_switch(card, md);
2502 	mmc_release_host(card->host);
2503 	if (card->type != MMC_TYPE_SD_COMBO)
2504 		pm_runtime_disable(&card->dev);
2505 	pm_runtime_put_noidle(&card->dev);
2506 	mmc_blk_remove_req(md);
2507 	dev_set_drvdata(&card->dev, NULL);
2508 }
2509 
_mmc_blk_suspend(struct mmc_card * card)2510 static int _mmc_blk_suspend(struct mmc_card *card)
2511 {
2512 	struct mmc_blk_data *part_md;
2513 	struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
2514 
2515 	if (md) {
2516 		mmc_queue_suspend(&md->queue);
2517 		list_for_each_entry(part_md, &md->part, part) {
2518 			mmc_queue_suspend(&part_md->queue);
2519 		}
2520 	}
2521 	return 0;
2522 }
2523 
mmc_blk_shutdown(struct mmc_card * card)2524 static void mmc_blk_shutdown(struct mmc_card *card)
2525 {
2526 	_mmc_blk_suspend(card);
2527 }
2528 
2529 #ifdef CONFIG_PM_SLEEP
mmc_blk_suspend(struct device * dev)2530 static int mmc_blk_suspend(struct device *dev)
2531 {
2532 	struct mmc_card *card = mmc_dev_to_card(dev);
2533 
2534 	return _mmc_blk_suspend(card);
2535 }
2536 
mmc_blk_resume(struct device * dev)2537 static int mmc_blk_resume(struct device *dev)
2538 {
2539 	struct mmc_blk_data *part_md;
2540 	struct mmc_blk_data *md = dev_get_drvdata(dev);
2541 
2542 	if (md) {
2543 		/*
2544 		 * Resume involves the card going into idle state,
2545 		 * so current partition is always the main one.
2546 		 */
2547 		md->part_curr = md->part_type;
2548 		mmc_queue_resume(&md->queue);
2549 		list_for_each_entry(part_md, &md->part, part) {
2550 			mmc_queue_resume(&part_md->queue);
2551 		}
2552 	}
2553 	return 0;
2554 }
2555 #endif
2556 
2557 static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume);
2558 
2559 static struct mmc_driver mmc_driver = {
2560 	.drv		= {
2561 		.name	= "mmcblk",
2562 		.pm	= &mmc_blk_pm_ops,
2563 	},
2564 	.probe		= mmc_blk_probe,
2565 	.remove		= mmc_blk_remove,
2566 	.shutdown	= mmc_blk_shutdown,
2567 };
2568 
mmc_blk_init(void)2569 static int __init mmc_blk_init(void)
2570 {
2571 	int res;
2572 
2573 	if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
2574 		pr_info("mmcblk: using %d minors per device\n", perdev_minors);
2575 
2576 	max_devices = min(MAX_DEVICES, (1 << MINORBITS) / perdev_minors);
2577 
2578 	res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
2579 	if (res)
2580 		goto out;
2581 
2582 	res = mmc_register_driver(&mmc_driver);
2583 	if (res)
2584 		goto out2;
2585 
2586 	return 0;
2587  out2:
2588 	unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
2589  out:
2590 	return res;
2591 }
2592 
mmc_blk_exit(void)2593 static void __exit mmc_blk_exit(void)
2594 {
2595 	mmc_unregister_driver(&mmc_driver);
2596 	unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
2597 }
2598 
2599 module_init(mmc_blk_init);
2600 module_exit(mmc_blk_exit);
2601 
2602 MODULE_LICENSE("GPL");
2603 MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
2604 
2605