1 /*
2  *  linux/drivers/mmc/core/core.c
3  *
4  *  Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5  *  SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6  *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
7  *  MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/completion.h>
17 #include <linux/device.h>
18 #include <linux/delay.h>
19 #include <linux/pagemap.h>
20 #include <linux/err.h>
21 #include <linux/leds.h>
22 #include <linux/scatterlist.h>
23 #include <linux/log2.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm_wakeup.h>
27 #include <linux/suspend.h>
28 #include <linux/fault-inject.h>
29 #include <linux/random.h>
30 #include <linux/slab.h>
31 #include <linux/of.h>
32 
33 #include <linux/mmc/card.h>
34 #include <linux/mmc/host.h>
35 #include <linux/mmc/mmc.h>
36 #include <linux/mmc/sd.h>
37 #include <linux/mmc/slot-gpio.h>
38 
39 #include "core.h"
40 #include "bus.h"
41 #include "host.h"
42 #include "sdio_bus.h"
43 #include "pwrseq.h"
44 
45 #include "mmc_ops.h"
46 #include "sd_ops.h"
47 #include "sdio_ops.h"
48 
49 /* If the device is not responding */
50 #define MMC_CORE_TIMEOUT_MS	(10 * 60 * 1000) /* 10 minute timeout */
51 
52 /*
53  * Background operations can take a long time, depending on the housekeeping
54  * operations the card has to perform.
55  */
56 #define MMC_BKOPS_MAX_TIMEOUT	(4 * 60 * 1000) /* max time to wait in ms */
57 
58 static struct workqueue_struct *workqueue;
59 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
60 
61 /*
62  * Enabling software CRCs on the data blocks can be a significant (30%)
63  * performance cost, and for other reasons may not always be desired.
64  * So we allow it it to be disabled.
65  */
66 bool use_spi_crc = 1;
67 module_param(use_spi_crc, bool, 0);
68 
69 /*
70  * Internal function. Schedule delayed work in the MMC work queue.
71  */
mmc_schedule_delayed_work(struct delayed_work * work,unsigned long delay)72 static int mmc_schedule_delayed_work(struct delayed_work *work,
73 				     unsigned long delay)
74 {
75 	return queue_delayed_work(workqueue, work, delay);
76 }
77 
78 /*
79  * Internal function. Flush all scheduled work from the MMC work queue.
80  */
mmc_flush_scheduled_work(void)81 static void mmc_flush_scheduled_work(void)
82 {
83 	flush_workqueue(workqueue);
84 }
85 
86 #ifdef CONFIG_FAIL_MMC_REQUEST
87 
88 /*
89  * Internal function. Inject random data errors.
90  * If mmc_data is NULL no errors are injected.
91  */
mmc_should_fail_request(struct mmc_host * host,struct mmc_request * mrq)92 static void mmc_should_fail_request(struct mmc_host *host,
93 				    struct mmc_request *mrq)
94 {
95 	struct mmc_command *cmd = mrq->cmd;
96 	struct mmc_data *data = mrq->data;
97 	static const int data_errors[] = {
98 		-ETIMEDOUT,
99 		-EILSEQ,
100 		-EIO,
101 	};
102 
103 	if (!data)
104 		return;
105 
106 	if (cmd->error || data->error ||
107 	    !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
108 		return;
109 
110 	data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
111 	data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
112 }
113 
114 #else /* CONFIG_FAIL_MMC_REQUEST */
115 
mmc_should_fail_request(struct mmc_host * host,struct mmc_request * mrq)116 static inline void mmc_should_fail_request(struct mmc_host *host,
117 					   struct mmc_request *mrq)
118 {
119 }
120 
121 #endif /* CONFIG_FAIL_MMC_REQUEST */
122 
123 /**
124  *	mmc_request_done - finish processing an MMC request
125  *	@host: MMC host which completed request
126  *	@mrq: MMC request which request
127  *
128  *	MMC drivers should call this function when they have completed
129  *	their processing of a request.
130  */
mmc_request_done(struct mmc_host * host,struct mmc_request * mrq)131 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
132 {
133 	struct mmc_command *cmd = mrq->cmd;
134 	int err = cmd->error;
135 
136 	if (err && cmd->retries && mmc_host_is_spi(host)) {
137 		if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
138 			cmd->retries = 0;
139 	}
140 
141 	if (err && cmd->retries && !mmc_card_removed(host->card)) {
142 		/*
143 		 * Request starter must handle retries - see
144 		 * mmc_wait_for_req_done().
145 		 */
146 		if (mrq->done)
147 			mrq->done(mrq);
148 	} else {
149 		mmc_should_fail_request(host, mrq);
150 
151 		led_trigger_event(host->led, LED_OFF);
152 
153 		if (mrq->sbc) {
154 			pr_debug("%s: req done <CMD%u>: %d: %08x %08x %08x %08x\n",
155 				mmc_hostname(host), mrq->sbc->opcode,
156 				mrq->sbc->error,
157 				mrq->sbc->resp[0], mrq->sbc->resp[1],
158 				mrq->sbc->resp[2], mrq->sbc->resp[3]);
159 		}
160 
161 		pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
162 			mmc_hostname(host), cmd->opcode, err,
163 			cmd->resp[0], cmd->resp[1],
164 			cmd->resp[2], cmd->resp[3]);
165 
166 		if (mrq->data) {
167 			pr_debug("%s:     %d bytes transferred: %d\n",
168 				mmc_hostname(host),
169 				mrq->data->bytes_xfered, mrq->data->error);
170 		}
171 
172 		if (mrq->stop) {
173 			pr_debug("%s:     (CMD%u): %d: %08x %08x %08x %08x\n",
174 				mmc_hostname(host), mrq->stop->opcode,
175 				mrq->stop->error,
176 				mrq->stop->resp[0], mrq->stop->resp[1],
177 				mrq->stop->resp[2], mrq->stop->resp[3]);
178 		}
179 
180 		if (mrq->done)
181 			mrq->done(mrq);
182 
183 		mmc_host_clk_release(host);
184 	}
185 }
186 
187 EXPORT_SYMBOL(mmc_request_done);
188 
mmc_start_request(struct mmc_host * host,struct mmc_request * mrq)189 static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
190 {
191 #ifdef CONFIG_MMC_DEBUG
192 	unsigned int i, sz;
193 	struct scatterlist *sg;
194 #endif
195 	if (mmc_card_removed(host->card))
196 		return -ENOMEDIUM;
197 
198 	if (mrq->sbc) {
199 		pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
200 			 mmc_hostname(host), mrq->sbc->opcode,
201 			 mrq->sbc->arg, mrq->sbc->flags);
202 	}
203 
204 	pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
205 		 mmc_hostname(host), mrq->cmd->opcode,
206 		 mrq->cmd->arg, mrq->cmd->flags);
207 
208 	if (mrq->data) {
209 		pr_debug("%s:     blksz %d blocks %d flags %08x "
210 			"tsac %d ms nsac %d\n",
211 			mmc_hostname(host), mrq->data->blksz,
212 			mrq->data->blocks, mrq->data->flags,
213 			mrq->data->timeout_ns / 1000000,
214 			mrq->data->timeout_clks);
215 	}
216 
217 	if (mrq->stop) {
218 		pr_debug("%s:     CMD%u arg %08x flags %08x\n",
219 			 mmc_hostname(host), mrq->stop->opcode,
220 			 mrq->stop->arg, mrq->stop->flags);
221 	}
222 
223 	WARN_ON(!host->claimed);
224 
225 	mrq->cmd->error = 0;
226 	mrq->cmd->mrq = mrq;
227 	if (mrq->sbc) {
228 		mrq->sbc->error = 0;
229 		mrq->sbc->mrq = mrq;
230 	}
231 	if (mrq->data) {
232 		BUG_ON(mrq->data->blksz > host->max_blk_size);
233 		BUG_ON(mrq->data->blocks > host->max_blk_count);
234 		BUG_ON(mrq->data->blocks * mrq->data->blksz >
235 			host->max_req_size);
236 
237 #ifdef CONFIG_MMC_DEBUG
238 		sz = 0;
239 		for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
240 			sz += sg->length;
241 		BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
242 #endif
243 
244 		mrq->cmd->data = mrq->data;
245 		mrq->data->error = 0;
246 		mrq->data->mrq = mrq;
247 		if (mrq->stop) {
248 			mrq->data->stop = mrq->stop;
249 			mrq->stop->error = 0;
250 			mrq->stop->mrq = mrq;
251 		}
252 	}
253 	mmc_host_clk_hold(host);
254 	led_trigger_event(host->led, LED_FULL);
255 	host->ops->request(host, mrq);
256 
257 	return 0;
258 }
259 
260 /**
261  *	mmc_start_bkops - start BKOPS for supported cards
262  *	@card: MMC card to start BKOPS
263  *	@form_exception: A flag to indicate if this function was
264  *			 called due to an exception raised by the card
265  *
266  *	Start background operations whenever requested.
267  *	When the urgent BKOPS bit is set in a R1 command response
268  *	then background operations should be started immediately.
269 */
mmc_start_bkops(struct mmc_card * card,bool from_exception)270 void mmc_start_bkops(struct mmc_card *card, bool from_exception)
271 {
272 	int err;
273 	int timeout;
274 	bool use_busy_signal;
275 
276 	BUG_ON(!card);
277 
278 	if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card))
279 		return;
280 
281 	err = mmc_read_bkops_status(card);
282 	if (err) {
283 		pr_err("%s: Failed to read bkops status: %d\n",
284 		       mmc_hostname(card->host), err);
285 		return;
286 	}
287 
288 	if (!card->ext_csd.raw_bkops_status)
289 		return;
290 
291 	if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
292 	    from_exception)
293 		return;
294 
295 	mmc_claim_host(card->host);
296 	if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
297 		timeout = MMC_BKOPS_MAX_TIMEOUT;
298 		use_busy_signal = true;
299 	} else {
300 		timeout = 0;
301 		use_busy_signal = false;
302 	}
303 
304 	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
305 			EXT_CSD_BKOPS_START, 1, timeout,
306 			use_busy_signal, true, false);
307 	if (err) {
308 		pr_warn("%s: Error %d starting bkops\n",
309 			mmc_hostname(card->host), err);
310 		goto out;
311 	}
312 
313 	/*
314 	 * For urgent bkops status (LEVEL_2 and more)
315 	 * bkops executed synchronously, otherwise
316 	 * the operation is in progress
317 	 */
318 	if (!use_busy_signal)
319 		mmc_card_set_doing_bkops(card);
320 out:
321 	mmc_release_host(card->host);
322 }
323 EXPORT_SYMBOL(mmc_start_bkops);
324 
325 /*
326  * mmc_wait_data_done() - done callback for data request
327  * @mrq: done data request
328  *
329  * Wakes up mmc context, passed as a callback to host controller driver
330  */
mmc_wait_data_done(struct mmc_request * mrq)331 static void mmc_wait_data_done(struct mmc_request *mrq)
332 {
333 	struct mmc_context_info *context_info = &mrq->host->context_info;
334 
335 	context_info->is_done_rcv = true;
336 	wake_up_interruptible(&context_info->wait);
337 }
338 
mmc_wait_done(struct mmc_request * mrq)339 static void mmc_wait_done(struct mmc_request *mrq)
340 {
341 	complete(&mrq->completion);
342 }
343 
344 /*
345  *__mmc_start_data_req() - starts data request
346  * @host: MMC host to start the request
347  * @mrq: data request to start
348  *
349  * Sets the done callback to be called when request is completed by the card.
350  * Starts data mmc request execution
351  */
__mmc_start_data_req(struct mmc_host * host,struct mmc_request * mrq)352 static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
353 {
354 	int err;
355 
356 	mrq->done = mmc_wait_data_done;
357 	mrq->host = host;
358 
359 	err = mmc_start_request(host, mrq);
360 	if (err) {
361 		mrq->cmd->error = err;
362 		mmc_wait_data_done(mrq);
363 	}
364 
365 	return err;
366 }
367 
__mmc_start_req(struct mmc_host * host,struct mmc_request * mrq)368 static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
369 {
370 	int err;
371 
372 	init_completion(&mrq->completion);
373 	mrq->done = mmc_wait_done;
374 
375 	err = mmc_start_request(host, mrq);
376 	if (err) {
377 		mrq->cmd->error = err;
378 		complete(&mrq->completion);
379 	}
380 
381 	return err;
382 }
383 
384 /*
385  * mmc_wait_for_data_req_done() - wait for request completed
386  * @host: MMC host to prepare the command.
387  * @mrq: MMC request to wait for
388  *
389  * Blocks MMC context till host controller will ack end of data request
390  * execution or new request notification arrives from the block layer.
391  * Handles command retries.
392  *
393  * Returns enum mmc_blk_status after checking errors.
394  */
mmc_wait_for_data_req_done(struct mmc_host * host,struct mmc_request * mrq,struct mmc_async_req * next_req)395 static int mmc_wait_for_data_req_done(struct mmc_host *host,
396 				      struct mmc_request *mrq,
397 				      struct mmc_async_req *next_req)
398 {
399 	struct mmc_command *cmd;
400 	struct mmc_context_info *context_info = &host->context_info;
401 	int err;
402 	unsigned long flags;
403 
404 	while (1) {
405 		wait_event_interruptible(context_info->wait,
406 				(context_info->is_done_rcv ||
407 				 context_info->is_new_req));
408 		spin_lock_irqsave(&context_info->lock, flags);
409 		context_info->is_waiting_last_req = false;
410 		spin_unlock_irqrestore(&context_info->lock, flags);
411 		if (context_info->is_done_rcv) {
412 			context_info->is_done_rcv = false;
413 			context_info->is_new_req = false;
414 			cmd = mrq->cmd;
415 
416 			if (!cmd->error || !cmd->retries ||
417 			    mmc_card_removed(host->card)) {
418 				err = host->areq->err_check(host->card,
419 							    host->areq);
420 				break; /* return err */
421 			} else {
422 				pr_info("%s: req failed (CMD%u): %d, retrying...\n",
423 					mmc_hostname(host),
424 					cmd->opcode, cmd->error);
425 				cmd->retries--;
426 				cmd->error = 0;
427 				host->ops->request(host, mrq);
428 				continue; /* wait for done/new event again */
429 			}
430 		} else if (context_info->is_new_req) {
431 			context_info->is_new_req = false;
432 			if (!next_req) {
433 				err = MMC_BLK_NEW_REQUEST;
434 				break; /* return err */
435 			}
436 		}
437 	}
438 	return err;
439 }
440 
mmc_wait_for_req_done(struct mmc_host * host,struct mmc_request * mrq)441 static void mmc_wait_for_req_done(struct mmc_host *host,
442 				  struct mmc_request *mrq)
443 {
444 	struct mmc_command *cmd;
445 
446 	while (1) {
447 		wait_for_completion(&mrq->completion);
448 
449 		cmd = mrq->cmd;
450 
451 		/*
452 		 * If host has timed out waiting for the sanitize
453 		 * to complete, card might be still in programming state
454 		 * so let's try to bring the card out of programming
455 		 * state.
456 		 */
457 		if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) {
458 			if (!mmc_interrupt_hpi(host->card)) {
459 				pr_warn("%s: %s: Interrupted sanitize\n",
460 					mmc_hostname(host), __func__);
461 				cmd->error = 0;
462 				break;
463 			} else {
464 				pr_err("%s: %s: Failed to interrupt sanitize\n",
465 				       mmc_hostname(host), __func__);
466 			}
467 		}
468 		if (!cmd->error || !cmd->retries ||
469 		    mmc_card_removed(host->card))
470 			break;
471 
472 		pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
473 			 mmc_hostname(host), cmd->opcode, cmd->error);
474 		cmd->retries--;
475 		cmd->error = 0;
476 		host->ops->request(host, mrq);
477 	}
478 }
479 
480 /**
481  *	mmc_pre_req - Prepare for a new request
482  *	@host: MMC host to prepare command
483  *	@mrq: MMC request to prepare for
484  *	@is_first_req: true if there is no previous started request
485  *                     that may run in parellel to this call, otherwise false
486  *
487  *	mmc_pre_req() is called in prior to mmc_start_req() to let
488  *	host prepare for the new request. Preparation of a request may be
489  *	performed while another request is running on the host.
490  */
mmc_pre_req(struct mmc_host * host,struct mmc_request * mrq,bool is_first_req)491 static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
492 		 bool is_first_req)
493 {
494 	if (host->ops->pre_req) {
495 		mmc_host_clk_hold(host);
496 		host->ops->pre_req(host, mrq, is_first_req);
497 		mmc_host_clk_release(host);
498 	}
499 }
500 
501 /**
502  *	mmc_post_req - Post process a completed request
503  *	@host: MMC host to post process command
504  *	@mrq: MMC request to post process for
505  *	@err: Error, if non zero, clean up any resources made in pre_req
506  *
507  *	Let the host post process a completed request. Post processing of
508  *	a request may be performed while another reuqest is running.
509  */
mmc_post_req(struct mmc_host * host,struct mmc_request * mrq,int err)510 static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
511 			 int err)
512 {
513 	if (host->ops->post_req) {
514 		mmc_host_clk_hold(host);
515 		host->ops->post_req(host, mrq, err);
516 		mmc_host_clk_release(host);
517 	}
518 }
519 
520 /**
521  *	mmc_start_req - start a non-blocking request
522  *	@host: MMC host to start command
523  *	@areq: async request to start
524  *	@error: out parameter returns 0 for success, otherwise non zero
525  *
526  *	Start a new MMC custom command request for a host.
527  *	If there is on ongoing async request wait for completion
528  *	of that request and start the new one and return.
529  *	Does not wait for the new request to complete.
530  *
531  *      Returns the completed request, NULL in case of none completed.
532  *	Wait for the an ongoing request (previoulsy started) to complete and
533  *	return the completed request. If there is no ongoing request, NULL
534  *	is returned without waiting. NULL is not an error condition.
535  */
mmc_start_req(struct mmc_host * host,struct mmc_async_req * areq,int * error)536 struct mmc_async_req *mmc_start_req(struct mmc_host *host,
537 				    struct mmc_async_req *areq, int *error)
538 {
539 	int err = 0;
540 	int start_err = 0;
541 	struct mmc_async_req *data = host->areq;
542 
543 	/* Prepare a new request */
544 	if (areq)
545 		mmc_pre_req(host, areq->mrq, !host->areq);
546 
547 	if (host->areq) {
548 		err = mmc_wait_for_data_req_done(host, host->areq->mrq,	areq);
549 		if (err == MMC_BLK_NEW_REQUEST) {
550 			if (error)
551 				*error = err;
552 			/*
553 			 * The previous request was not completed,
554 			 * nothing to return
555 			 */
556 			return NULL;
557 		}
558 		/*
559 		 * Check BKOPS urgency for each R1 response
560 		 */
561 		if (host->card && mmc_card_mmc(host->card) &&
562 		    ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
563 		     (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
564 		    (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) {
565 
566 			/* Cancel the prepared request */
567 			if (areq)
568 				mmc_post_req(host, areq->mrq, -EINVAL);
569 
570 			mmc_start_bkops(host->card, true);
571 
572 			/* prepare the request again */
573 			if (areq)
574 				mmc_pre_req(host, areq->mrq, !host->areq);
575 		}
576 	}
577 
578 	if (!err && areq)
579 		start_err = __mmc_start_data_req(host, areq->mrq);
580 
581 	if (host->areq)
582 		mmc_post_req(host, host->areq->mrq, 0);
583 
584 	 /* Cancel a prepared request if it was not started. */
585 	if ((err || start_err) && areq)
586 		mmc_post_req(host, areq->mrq, -EINVAL);
587 
588 	if (err)
589 		host->areq = NULL;
590 	else
591 		host->areq = areq;
592 
593 	if (error)
594 		*error = err;
595 	return data;
596 }
597 EXPORT_SYMBOL(mmc_start_req);
598 
599 /**
600  *	mmc_wait_for_req - start a request and wait for completion
601  *	@host: MMC host to start command
602  *	@mrq: MMC request to start
603  *
604  *	Start a new MMC custom command request for a host, and wait
605  *	for the command to complete. Does not attempt to parse the
606  *	response.
607  */
mmc_wait_for_req(struct mmc_host * host,struct mmc_request * mrq)608 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
609 {
610 	__mmc_start_req(host, mrq);
611 	mmc_wait_for_req_done(host, mrq);
612 }
613 EXPORT_SYMBOL(mmc_wait_for_req);
614 
615 /**
616  *	mmc_interrupt_hpi - Issue for High priority Interrupt
617  *	@card: the MMC card associated with the HPI transfer
618  *
619  *	Issued High Priority Interrupt, and check for card status
620  *	until out-of prg-state.
621  */
mmc_interrupt_hpi(struct mmc_card * card)622 int mmc_interrupt_hpi(struct mmc_card *card)
623 {
624 	int err;
625 	u32 status;
626 	unsigned long prg_wait;
627 
628 	BUG_ON(!card);
629 
630 	if (!card->ext_csd.hpi_en) {
631 		pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
632 		return 1;
633 	}
634 
635 	mmc_claim_host(card->host);
636 	err = mmc_send_status(card, &status);
637 	if (err) {
638 		pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
639 		goto out;
640 	}
641 
642 	switch (R1_CURRENT_STATE(status)) {
643 	case R1_STATE_IDLE:
644 	case R1_STATE_READY:
645 	case R1_STATE_STBY:
646 	case R1_STATE_TRAN:
647 		/*
648 		 * In idle and transfer states, HPI is not needed and the caller
649 		 * can issue the next intended command immediately
650 		 */
651 		goto out;
652 	case R1_STATE_PRG:
653 		break;
654 	default:
655 		/* In all other states, it's illegal to issue HPI */
656 		pr_debug("%s: HPI cannot be sent. Card state=%d\n",
657 			mmc_hostname(card->host), R1_CURRENT_STATE(status));
658 		err = -EINVAL;
659 		goto out;
660 	}
661 
662 	err = mmc_send_hpi_cmd(card, &status);
663 	if (err)
664 		goto out;
665 
666 	prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
667 	do {
668 		err = mmc_send_status(card, &status);
669 
670 		if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
671 			break;
672 		if (time_after(jiffies, prg_wait))
673 			err = -ETIMEDOUT;
674 	} while (!err);
675 
676 out:
677 	mmc_release_host(card->host);
678 	return err;
679 }
680 EXPORT_SYMBOL(mmc_interrupt_hpi);
681 
682 /**
683  *	mmc_wait_for_cmd - start a command and wait for completion
684  *	@host: MMC host to start command
685  *	@cmd: MMC command to start
686  *	@retries: maximum number of retries
687  *
688  *	Start a new MMC command for a host, and wait for the command
689  *	to complete.  Return any error that occurred while the command
690  *	was executing.  Do not attempt to parse the response.
691  */
mmc_wait_for_cmd(struct mmc_host * host,struct mmc_command * cmd,int retries)692 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
693 {
694 	struct mmc_request mrq = {NULL};
695 
696 	WARN_ON(!host->claimed);
697 
698 	memset(cmd->resp, 0, sizeof(cmd->resp));
699 	cmd->retries = retries;
700 
701 	mrq.cmd = cmd;
702 	cmd->data = NULL;
703 
704 	mmc_wait_for_req(host, &mrq);
705 
706 	return cmd->error;
707 }
708 
709 EXPORT_SYMBOL(mmc_wait_for_cmd);
710 
711 /**
712  *	mmc_stop_bkops - stop ongoing BKOPS
713  *	@card: MMC card to check BKOPS
714  *
715  *	Send HPI command to stop ongoing background operations to
716  *	allow rapid servicing of foreground operations, e.g. read/
717  *	writes. Wait until the card comes out of the programming state
718  *	to avoid errors in servicing read/write requests.
719  */
mmc_stop_bkops(struct mmc_card * card)720 int mmc_stop_bkops(struct mmc_card *card)
721 {
722 	int err = 0;
723 
724 	BUG_ON(!card);
725 	err = mmc_interrupt_hpi(card);
726 
727 	/*
728 	 * If err is EINVAL, we can't issue an HPI.
729 	 * It should complete the BKOPS.
730 	 */
731 	if (!err || (err == -EINVAL)) {
732 		mmc_card_clr_doing_bkops(card);
733 		err = 0;
734 	}
735 
736 	return err;
737 }
738 EXPORT_SYMBOL(mmc_stop_bkops);
739 
mmc_read_bkops_status(struct mmc_card * card)740 int mmc_read_bkops_status(struct mmc_card *card)
741 {
742 	int err;
743 	u8 *ext_csd;
744 
745 	mmc_claim_host(card->host);
746 	err = mmc_get_ext_csd(card, &ext_csd);
747 	mmc_release_host(card->host);
748 	if (err)
749 		return err;
750 
751 	card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
752 	card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
753 	kfree(ext_csd);
754 	return 0;
755 }
756 EXPORT_SYMBOL(mmc_read_bkops_status);
757 
758 /**
759  *	mmc_set_data_timeout - set the timeout for a data command
760  *	@data: data phase for command
761  *	@card: the MMC card associated with the data transfer
762  *
763  *	Computes the data timeout parameters according to the
764  *	correct algorithm given the card type.
765  */
mmc_set_data_timeout(struct mmc_data * data,const struct mmc_card * card)766 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
767 {
768 	unsigned int mult;
769 
770 	/*
771 	 * SDIO cards only define an upper 1 s limit on access.
772 	 */
773 	if (mmc_card_sdio(card)) {
774 		data->timeout_ns = 1000000000;
775 		data->timeout_clks = 0;
776 		return;
777 	}
778 
779 	/*
780 	 * SD cards use a 100 multiplier rather than 10
781 	 */
782 	mult = mmc_card_sd(card) ? 100 : 10;
783 
784 	/*
785 	 * Scale up the multiplier (and therefore the timeout) by
786 	 * the r2w factor for writes.
787 	 */
788 	if (data->flags & MMC_DATA_WRITE)
789 		mult <<= card->csd.r2w_factor;
790 
791 	data->timeout_ns = card->csd.tacc_ns * mult;
792 	data->timeout_clks = card->csd.tacc_clks * mult;
793 
794 	/*
795 	 * SD cards also have an upper limit on the timeout.
796 	 */
797 	if (mmc_card_sd(card)) {
798 		unsigned int timeout_us, limit_us;
799 
800 		timeout_us = data->timeout_ns / 1000;
801 		if (mmc_host_clk_rate(card->host))
802 			timeout_us += data->timeout_clks * 1000 /
803 				(mmc_host_clk_rate(card->host) / 1000);
804 
805 		if (data->flags & MMC_DATA_WRITE)
806 			/*
807 			 * The MMC spec "It is strongly recommended
808 			 * for hosts to implement more than 500ms
809 			 * timeout value even if the card indicates
810 			 * the 250ms maximum busy length."  Even the
811 			 * previous value of 300ms is known to be
812 			 * insufficient for some cards.
813 			 */
814 			limit_us = 3000000;
815 		else
816 			limit_us = 100000;
817 
818 		/*
819 		 * SDHC cards always use these fixed values.
820 		 */
821 		if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
822 			data->timeout_ns = limit_us * 1000;
823 			data->timeout_clks = 0;
824 		}
825 
826 		/* assign limit value if invalid */
827 		if (timeout_us == 0)
828 			data->timeout_ns = limit_us * 1000;
829 	}
830 
831 	/*
832 	 * Some cards require longer data read timeout than indicated in CSD.
833 	 * Address this by setting the read timeout to a "reasonably high"
834 	 * value. For the cards tested, 600ms has proven enough. If necessary,
835 	 * this value can be increased if other problematic cards require this.
836 	 */
837 	if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
838 		data->timeout_ns = 600000000;
839 		data->timeout_clks = 0;
840 	}
841 
842 	/*
843 	 * Some cards need very high timeouts if driven in SPI mode.
844 	 * The worst observed timeout was 900ms after writing a
845 	 * continuous stream of data until the internal logic
846 	 * overflowed.
847 	 */
848 	if (mmc_host_is_spi(card->host)) {
849 		if (data->flags & MMC_DATA_WRITE) {
850 			if (data->timeout_ns < 1000000000)
851 				data->timeout_ns = 1000000000;	/* 1s */
852 		} else {
853 			if (data->timeout_ns < 100000000)
854 				data->timeout_ns =  100000000;	/* 100ms */
855 		}
856 	}
857 }
858 EXPORT_SYMBOL(mmc_set_data_timeout);
859 
860 /**
861  *	mmc_align_data_size - pads a transfer size to a more optimal value
862  *	@card: the MMC card associated with the data transfer
863  *	@sz: original transfer size
864  *
865  *	Pads the original data size with a number of extra bytes in
866  *	order to avoid controller bugs and/or performance hits
867  *	(e.g. some controllers revert to PIO for certain sizes).
868  *
869  *	Returns the improved size, which might be unmodified.
870  *
871  *	Note that this function is only relevant when issuing a
872  *	single scatter gather entry.
873  */
mmc_align_data_size(struct mmc_card * card,unsigned int sz)874 unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
875 {
876 	/*
877 	 * FIXME: We don't have a system for the controller to tell
878 	 * the core about its problems yet, so for now we just 32-bit
879 	 * align the size.
880 	 */
881 	sz = ((sz + 3) / 4) * 4;
882 
883 	return sz;
884 }
885 EXPORT_SYMBOL(mmc_align_data_size);
886 
887 /**
888  *	__mmc_claim_host - exclusively claim a host
889  *	@host: mmc host to claim
890  *	@abort: whether or not the operation should be aborted
891  *
892  *	Claim a host for a set of operations.  If @abort is non null and
893  *	dereference a non-zero value then this will return prematurely with
894  *	that non-zero value without acquiring the lock.  Returns zero
895  *	with the lock held otherwise.
896  */
__mmc_claim_host(struct mmc_host * host,atomic_t * abort)897 int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
898 {
899 	DECLARE_WAITQUEUE(wait, current);
900 	unsigned long flags;
901 	int stop;
902 	bool pm = false;
903 
904 	might_sleep();
905 
906 	add_wait_queue(&host->wq, &wait);
907 	spin_lock_irqsave(&host->lock, flags);
908 	while (1) {
909 		set_current_state(TASK_UNINTERRUPTIBLE);
910 		stop = abort ? atomic_read(abort) : 0;
911 		if (stop || !host->claimed || host->claimer == current)
912 			break;
913 		spin_unlock_irqrestore(&host->lock, flags);
914 		schedule();
915 		spin_lock_irqsave(&host->lock, flags);
916 	}
917 	set_current_state(TASK_RUNNING);
918 	if (!stop) {
919 		host->claimed = 1;
920 		host->claimer = current;
921 		host->claim_cnt += 1;
922 		if (host->claim_cnt == 1)
923 			pm = true;
924 	} else
925 		wake_up(&host->wq);
926 	spin_unlock_irqrestore(&host->lock, flags);
927 	remove_wait_queue(&host->wq, &wait);
928 
929 	if (pm)
930 		pm_runtime_get_sync(mmc_dev(host));
931 
932 	return stop;
933 }
934 EXPORT_SYMBOL(__mmc_claim_host);
935 
936 /**
937  *	mmc_release_host - release a host
938  *	@host: mmc host to release
939  *
940  *	Release a MMC host, allowing others to claim the host
941  *	for their operations.
942  */
mmc_release_host(struct mmc_host * host)943 void mmc_release_host(struct mmc_host *host)
944 {
945 	unsigned long flags;
946 
947 	WARN_ON(!host->claimed);
948 
949 	spin_lock_irqsave(&host->lock, flags);
950 	if (--host->claim_cnt) {
951 		/* Release for nested claim */
952 		spin_unlock_irqrestore(&host->lock, flags);
953 	} else {
954 		host->claimed = 0;
955 		host->claimer = NULL;
956 		spin_unlock_irqrestore(&host->lock, flags);
957 		wake_up(&host->wq);
958 		pm_runtime_mark_last_busy(mmc_dev(host));
959 		pm_runtime_put_autosuspend(mmc_dev(host));
960 	}
961 }
962 EXPORT_SYMBOL(mmc_release_host);
963 
964 /*
965  * This is a helper function, which fetches a runtime pm reference for the
966  * card device and also claims the host.
967  */
mmc_get_card(struct mmc_card * card)968 void mmc_get_card(struct mmc_card *card)
969 {
970 	pm_runtime_get_sync(&card->dev);
971 	mmc_claim_host(card->host);
972 }
973 EXPORT_SYMBOL(mmc_get_card);
974 
975 /*
976  * This is a helper function, which releases the host and drops the runtime
977  * pm reference for the card device.
978  */
mmc_put_card(struct mmc_card * card)979 void mmc_put_card(struct mmc_card *card)
980 {
981 	mmc_release_host(card->host);
982 	pm_runtime_mark_last_busy(&card->dev);
983 	pm_runtime_put_autosuspend(&card->dev);
984 }
985 EXPORT_SYMBOL(mmc_put_card);
986 
987 /*
988  * Internal function that does the actual ios call to the host driver,
989  * optionally printing some debug output.
990  */
mmc_set_ios(struct mmc_host * host)991 static inline void mmc_set_ios(struct mmc_host *host)
992 {
993 	struct mmc_ios *ios = &host->ios;
994 
995 	pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
996 		"width %u timing %u\n",
997 		 mmc_hostname(host), ios->clock, ios->bus_mode,
998 		 ios->power_mode, ios->chip_select, ios->vdd,
999 		 ios->bus_width, ios->timing);
1000 
1001 	if (ios->clock > 0)
1002 		mmc_set_ungated(host);
1003 	host->ops->set_ios(host, ios);
1004 }
1005 
1006 /*
1007  * Control chip select pin on a host.
1008  */
mmc_set_chip_select(struct mmc_host * host,int mode)1009 void mmc_set_chip_select(struct mmc_host *host, int mode)
1010 {
1011 	mmc_host_clk_hold(host);
1012 	host->ios.chip_select = mode;
1013 	mmc_set_ios(host);
1014 	mmc_host_clk_release(host);
1015 }
1016 
1017 /*
1018  * Sets the host clock to the highest possible frequency that
1019  * is below "hz".
1020  */
__mmc_set_clock(struct mmc_host * host,unsigned int hz)1021 static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
1022 {
1023 	WARN_ON(hz && hz < host->f_min);
1024 
1025 	if (hz > host->f_max)
1026 		hz = host->f_max;
1027 
1028 	host->ios.clock = hz;
1029 	mmc_set_ios(host);
1030 }
1031 
mmc_set_clock(struct mmc_host * host,unsigned int hz)1032 void mmc_set_clock(struct mmc_host *host, unsigned int hz)
1033 {
1034 	mmc_host_clk_hold(host);
1035 	__mmc_set_clock(host, hz);
1036 	mmc_host_clk_release(host);
1037 }
1038 
1039 #ifdef CONFIG_MMC_CLKGATE
1040 /*
1041  * This gates the clock by setting it to 0 Hz.
1042  */
mmc_gate_clock(struct mmc_host * host)1043 void mmc_gate_clock(struct mmc_host *host)
1044 {
1045 	unsigned long flags;
1046 
1047 	spin_lock_irqsave(&host->clk_lock, flags);
1048 	host->clk_old = host->ios.clock;
1049 	host->ios.clock = 0;
1050 	host->clk_gated = true;
1051 	spin_unlock_irqrestore(&host->clk_lock, flags);
1052 	mmc_set_ios(host);
1053 }
1054 
1055 /*
1056  * This restores the clock from gating by using the cached
1057  * clock value.
1058  */
mmc_ungate_clock(struct mmc_host * host)1059 void mmc_ungate_clock(struct mmc_host *host)
1060 {
1061 	/*
1062 	 * We should previously have gated the clock, so the clock shall
1063 	 * be 0 here! The clock may however be 0 during initialization,
1064 	 * when some request operations are performed before setting
1065 	 * the frequency. When ungate is requested in that situation
1066 	 * we just ignore the call.
1067 	 */
1068 	if (host->clk_old) {
1069 		BUG_ON(host->ios.clock);
1070 		/* This call will also set host->clk_gated to false */
1071 		__mmc_set_clock(host, host->clk_old);
1072 	}
1073 }
1074 
mmc_set_ungated(struct mmc_host * host)1075 void mmc_set_ungated(struct mmc_host *host)
1076 {
1077 	unsigned long flags;
1078 
1079 	/*
1080 	 * We've been given a new frequency while the clock is gated,
1081 	 * so make sure we regard this as ungating it.
1082 	 */
1083 	spin_lock_irqsave(&host->clk_lock, flags);
1084 	host->clk_gated = false;
1085 	spin_unlock_irqrestore(&host->clk_lock, flags);
1086 }
1087 
1088 #else
mmc_set_ungated(struct mmc_host * host)1089 void mmc_set_ungated(struct mmc_host *host)
1090 {
1091 }
1092 #endif
1093 
mmc_execute_tuning(struct mmc_card * card)1094 int mmc_execute_tuning(struct mmc_card *card)
1095 {
1096 	struct mmc_host *host = card->host;
1097 	u32 opcode;
1098 	int err;
1099 
1100 	if (!host->ops->execute_tuning)
1101 		return 0;
1102 
1103 	if (mmc_card_mmc(card))
1104 		opcode = MMC_SEND_TUNING_BLOCK_HS200;
1105 	else
1106 		opcode = MMC_SEND_TUNING_BLOCK;
1107 
1108 	mmc_host_clk_hold(host);
1109 	err = host->ops->execute_tuning(host, opcode);
1110 	mmc_host_clk_release(host);
1111 
1112 	if (err)
1113 		pr_err("%s: tuning execution failed\n", mmc_hostname(host));
1114 
1115 	return err;
1116 }
1117 
1118 /*
1119  * Change the bus mode (open drain/push-pull) of a host.
1120  */
mmc_set_bus_mode(struct mmc_host * host,unsigned int mode)1121 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
1122 {
1123 	mmc_host_clk_hold(host);
1124 	host->ios.bus_mode = mode;
1125 	mmc_set_ios(host);
1126 	mmc_host_clk_release(host);
1127 }
1128 
1129 /*
1130  * Change data bus width of a host.
1131  */
mmc_set_bus_width(struct mmc_host * host,unsigned int width)1132 void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
1133 {
1134 	mmc_host_clk_hold(host);
1135 	host->ios.bus_width = width;
1136 	mmc_set_ios(host);
1137 	mmc_host_clk_release(host);
1138 }
1139 
1140 /*
1141  * Set initial state after a power cycle or a hw_reset.
1142  */
mmc_set_initial_state(struct mmc_host * host)1143 void mmc_set_initial_state(struct mmc_host *host)
1144 {
1145 	if (mmc_host_is_spi(host))
1146 		host->ios.chip_select = MMC_CS_HIGH;
1147 	else
1148 		host->ios.chip_select = MMC_CS_DONTCARE;
1149 	host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1150 	host->ios.bus_width = MMC_BUS_WIDTH_1;
1151 	host->ios.timing = MMC_TIMING_LEGACY;
1152 
1153 	mmc_set_ios(host);
1154 }
1155 
1156 /**
1157  * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
1158  * @vdd:	voltage (mV)
1159  * @low_bits:	prefer low bits in boundary cases
1160  *
1161  * This function returns the OCR bit number according to the provided @vdd
1162  * value. If conversion is not possible a negative errno value returned.
1163  *
1164  * Depending on the @low_bits flag the function prefers low or high OCR bits
1165  * on boundary voltages. For example,
1166  * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
1167  * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
1168  *
1169  * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
1170  */
mmc_vdd_to_ocrbitnum(int vdd,bool low_bits)1171 static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
1172 {
1173 	const int max_bit = ilog2(MMC_VDD_35_36);
1174 	int bit;
1175 
1176 	if (vdd < 1650 || vdd > 3600)
1177 		return -EINVAL;
1178 
1179 	if (vdd >= 1650 && vdd <= 1950)
1180 		return ilog2(MMC_VDD_165_195);
1181 
1182 	if (low_bits)
1183 		vdd -= 1;
1184 
1185 	/* Base 2000 mV, step 100 mV, bit's base 8. */
1186 	bit = (vdd - 2000) / 100 + 8;
1187 	if (bit > max_bit)
1188 		return max_bit;
1189 	return bit;
1190 }
1191 
1192 /**
1193  * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
1194  * @vdd_min:	minimum voltage value (mV)
1195  * @vdd_max:	maximum voltage value (mV)
1196  *
1197  * This function returns the OCR mask bits according to the provided @vdd_min
1198  * and @vdd_max values. If conversion is not possible the function returns 0.
1199  *
1200  * Notes wrt boundary cases:
1201  * This function sets the OCR bits for all boundary voltages, for example
1202  * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
1203  * MMC_VDD_34_35 mask.
1204  */
mmc_vddrange_to_ocrmask(int vdd_min,int vdd_max)1205 u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
1206 {
1207 	u32 mask = 0;
1208 
1209 	if (vdd_max < vdd_min)
1210 		return 0;
1211 
1212 	/* Prefer high bits for the boundary vdd_max values. */
1213 	vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
1214 	if (vdd_max < 0)
1215 		return 0;
1216 
1217 	/* Prefer low bits for the boundary vdd_min values. */
1218 	vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
1219 	if (vdd_min < 0)
1220 		return 0;
1221 
1222 	/* Fill the mask, from max bit to min bit. */
1223 	while (vdd_max >= vdd_min)
1224 		mask |= 1 << vdd_max--;
1225 
1226 	return mask;
1227 }
1228 EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
1229 
1230 #ifdef CONFIG_OF
1231 
1232 /**
1233  * mmc_of_parse_voltage - return mask of supported voltages
1234  * @np: The device node need to be parsed.
1235  * @mask: mask of voltages available for MMC/SD/SDIO
1236  *
1237  * 1. Return zero on success.
1238  * 2. Return negative errno: voltage-range is invalid.
1239  */
mmc_of_parse_voltage(struct device_node * np,u32 * mask)1240 int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
1241 {
1242 	const u32 *voltage_ranges;
1243 	int num_ranges, i;
1244 
1245 	voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
1246 	num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
1247 	if (!voltage_ranges || !num_ranges) {
1248 		pr_info("%s: voltage-ranges unspecified\n", np->full_name);
1249 		return -EINVAL;
1250 	}
1251 
1252 	for (i = 0; i < num_ranges; i++) {
1253 		const int j = i * 2;
1254 		u32 ocr_mask;
1255 
1256 		ocr_mask = mmc_vddrange_to_ocrmask(
1257 				be32_to_cpu(voltage_ranges[j]),
1258 				be32_to_cpu(voltage_ranges[j + 1]));
1259 		if (!ocr_mask) {
1260 			pr_err("%s: voltage-range #%d is invalid\n",
1261 				np->full_name, i);
1262 			return -EINVAL;
1263 		}
1264 		*mask |= ocr_mask;
1265 	}
1266 
1267 	return 0;
1268 }
1269 EXPORT_SYMBOL(mmc_of_parse_voltage);
1270 
1271 #endif /* CONFIG_OF */
1272 
mmc_of_get_func_num(struct device_node * node)1273 static int mmc_of_get_func_num(struct device_node *node)
1274 {
1275 	u32 reg;
1276 	int ret;
1277 
1278 	ret = of_property_read_u32(node, "reg", &reg);
1279 	if (ret < 0)
1280 		return ret;
1281 
1282 	return reg;
1283 }
1284 
mmc_of_find_child_device(struct mmc_host * host,unsigned func_num)1285 struct device_node *mmc_of_find_child_device(struct mmc_host *host,
1286 		unsigned func_num)
1287 {
1288 	struct device_node *node;
1289 
1290 	if (!host->parent || !host->parent->of_node)
1291 		return NULL;
1292 
1293 	for_each_child_of_node(host->parent->of_node, node) {
1294 		if (mmc_of_get_func_num(node) == func_num)
1295 			return node;
1296 	}
1297 
1298 	return NULL;
1299 }
1300 
1301 #ifdef CONFIG_REGULATOR
1302 
1303 /**
1304  * mmc_regulator_get_ocrmask - return mask of supported voltages
1305  * @supply: regulator to use
1306  *
1307  * This returns either a negative errno, or a mask of voltages that
1308  * can be provided to MMC/SD/SDIO devices using the specified voltage
1309  * regulator.  This would normally be called before registering the
1310  * MMC host adapter.
1311  */
mmc_regulator_get_ocrmask(struct regulator * supply)1312 int mmc_regulator_get_ocrmask(struct regulator *supply)
1313 {
1314 	int			result = 0;
1315 	int			count;
1316 	int			i;
1317 	int			vdd_uV;
1318 	int			vdd_mV;
1319 
1320 	count = regulator_count_voltages(supply);
1321 	if (count < 0)
1322 		return count;
1323 
1324 	for (i = 0; i < count; i++) {
1325 		vdd_uV = regulator_list_voltage(supply, i);
1326 		if (vdd_uV <= 0)
1327 			continue;
1328 
1329 		vdd_mV = vdd_uV / 1000;
1330 		result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1331 	}
1332 
1333 	if (!result) {
1334 		vdd_uV = regulator_get_voltage(supply);
1335 		if (vdd_uV <= 0)
1336 			return vdd_uV;
1337 
1338 		vdd_mV = vdd_uV / 1000;
1339 		result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1340 	}
1341 
1342 	return result;
1343 }
1344 EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask);
1345 
1346 /**
1347  * mmc_regulator_set_ocr - set regulator to match host->ios voltage
1348  * @mmc: the host to regulate
1349  * @supply: regulator to use
1350  * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
1351  *
1352  * Returns zero on success, else negative errno.
1353  *
1354  * MMC host drivers may use this to enable or disable a regulator using
1355  * a particular supply voltage.  This would normally be called from the
1356  * set_ios() method.
1357  */
mmc_regulator_set_ocr(struct mmc_host * mmc,struct regulator * supply,unsigned short vdd_bit)1358 int mmc_regulator_set_ocr(struct mmc_host *mmc,
1359 			struct regulator *supply,
1360 			unsigned short vdd_bit)
1361 {
1362 	int			result = 0;
1363 	int			min_uV, max_uV;
1364 
1365 	if (vdd_bit) {
1366 		int		tmp;
1367 
1368 		/*
1369 		 * REVISIT mmc_vddrange_to_ocrmask() may have set some
1370 		 * bits this regulator doesn't quite support ... don't
1371 		 * be too picky, most cards and regulators are OK with
1372 		 * a 0.1V range goof (it's a small error percentage).
1373 		 */
1374 		tmp = vdd_bit - ilog2(MMC_VDD_165_195);
1375 		if (tmp == 0) {
1376 			min_uV = 1650 * 1000;
1377 			max_uV = 1950 * 1000;
1378 		} else {
1379 			min_uV = 1900 * 1000 + tmp * 100 * 1000;
1380 			max_uV = min_uV + 100 * 1000;
1381 		}
1382 
1383 		result = regulator_set_voltage(supply, min_uV, max_uV);
1384 		if (result == 0 && !mmc->regulator_enabled) {
1385 			result = regulator_enable(supply);
1386 			if (!result)
1387 				mmc->regulator_enabled = true;
1388 		}
1389 	} else if (mmc->regulator_enabled) {
1390 		result = regulator_disable(supply);
1391 		if (result == 0)
1392 			mmc->regulator_enabled = false;
1393 	}
1394 
1395 	if (result)
1396 		dev_err(mmc_dev(mmc),
1397 			"could not set regulator OCR (%d)\n", result);
1398 	return result;
1399 }
1400 EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
1401 
1402 #endif /* CONFIG_REGULATOR */
1403 
mmc_regulator_get_supply(struct mmc_host * mmc)1404 int mmc_regulator_get_supply(struct mmc_host *mmc)
1405 {
1406 	struct device *dev = mmc_dev(mmc);
1407 	int ret;
1408 
1409 	mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
1410 	mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc");
1411 
1412 	if (IS_ERR(mmc->supply.vmmc)) {
1413 		if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
1414 			return -EPROBE_DEFER;
1415 		dev_info(dev, "No vmmc regulator found\n");
1416 	} else {
1417 		ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
1418 		if (ret > 0)
1419 			mmc->ocr_avail = ret;
1420 		else
1421 			dev_warn(dev, "Failed getting OCR mask: %d\n", ret);
1422 	}
1423 
1424 	if (IS_ERR(mmc->supply.vqmmc)) {
1425 		if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER)
1426 			return -EPROBE_DEFER;
1427 		dev_info(dev, "No vqmmc regulator found\n");
1428 	}
1429 
1430 	return 0;
1431 }
1432 EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
1433 
1434 /*
1435  * Mask off any voltages we don't support and select
1436  * the lowest voltage
1437  */
mmc_select_voltage(struct mmc_host * host,u32 ocr)1438 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1439 {
1440 	int bit;
1441 
1442 	/*
1443 	 * Sanity check the voltages that the card claims to
1444 	 * support.
1445 	 */
1446 	if (ocr & 0x7F) {
1447 		dev_warn(mmc_dev(host),
1448 		"card claims to support voltages below defined range\n");
1449 		ocr &= ~0x7F;
1450 	}
1451 
1452 	ocr &= host->ocr_avail;
1453 	if (!ocr) {
1454 		dev_warn(mmc_dev(host), "no support for card's volts\n");
1455 		return 0;
1456 	}
1457 
1458 	if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
1459 		bit = ffs(ocr) - 1;
1460 		ocr &= 3 << bit;
1461 		mmc_power_cycle(host, ocr);
1462 	} else {
1463 		bit = fls(ocr) - 1;
1464 		ocr &= 3 << bit;
1465 		if (bit != host->ios.vdd)
1466 			dev_warn(mmc_dev(host), "exceeding card's volts\n");
1467 	}
1468 
1469 	return ocr;
1470 }
1471 
__mmc_set_signal_voltage(struct mmc_host * host,int signal_voltage)1472 int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
1473 {
1474 	int err = 0;
1475 	int old_signal_voltage = host->ios.signal_voltage;
1476 
1477 	host->ios.signal_voltage = signal_voltage;
1478 	if (host->ops->start_signal_voltage_switch) {
1479 		mmc_host_clk_hold(host);
1480 		err = host->ops->start_signal_voltage_switch(host, &host->ios);
1481 		mmc_host_clk_release(host);
1482 	}
1483 
1484 	if (err)
1485 		host->ios.signal_voltage = old_signal_voltage;
1486 
1487 	return err;
1488 
1489 }
1490 
mmc_set_signal_voltage(struct mmc_host * host,int signal_voltage,u32 ocr)1491 int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr)
1492 {
1493 	struct mmc_command cmd = {0};
1494 	int err = 0;
1495 	u32 clock;
1496 
1497 	BUG_ON(!host);
1498 
1499 	/*
1500 	 * Send CMD11 only if the request is to switch the card to
1501 	 * 1.8V signalling.
1502 	 */
1503 	if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
1504 		return __mmc_set_signal_voltage(host, signal_voltage);
1505 
1506 	/*
1507 	 * If we cannot switch voltages, return failure so the caller
1508 	 * can continue without UHS mode
1509 	 */
1510 	if (!host->ops->start_signal_voltage_switch)
1511 		return -EPERM;
1512 	if (!host->ops->card_busy)
1513 		pr_warn("%s: cannot verify signal voltage switch\n",
1514 			mmc_hostname(host));
1515 
1516 	mmc_host_clk_hold(host);
1517 
1518 	cmd.opcode = SD_SWITCH_VOLTAGE;
1519 	cmd.arg = 0;
1520 	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1521 
1522 	err = mmc_wait_for_cmd(host, &cmd, 0);
1523 	if (err)
1524 		goto err_command;
1525 
1526 	if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) {
1527 		err = -EIO;
1528 		goto err_command;
1529 	}
1530 	/*
1531 	 * The card should drive cmd and dat[0:3] low immediately
1532 	 * after the response of cmd11, but wait 1 ms to be sure
1533 	 */
1534 	mmc_delay(1);
1535 	if (host->ops->card_busy && !host->ops->card_busy(host)) {
1536 		err = -EAGAIN;
1537 		goto power_cycle;
1538 	}
1539 	/*
1540 	 * During a signal voltage level switch, the clock must be gated
1541 	 * for 5 ms according to the SD spec
1542 	 */
1543 	clock = host->ios.clock;
1544 	host->ios.clock = 0;
1545 	mmc_set_ios(host);
1546 
1547 	if (__mmc_set_signal_voltage(host, signal_voltage)) {
1548 		/*
1549 		 * Voltages may not have been switched, but we've already
1550 		 * sent CMD11, so a power cycle is required anyway
1551 		 */
1552 		err = -EAGAIN;
1553 		goto power_cycle;
1554 	}
1555 
1556 	/* Keep clock gated for at least 5 ms */
1557 	mmc_delay(5);
1558 	host->ios.clock = clock;
1559 	mmc_set_ios(host);
1560 
1561 	/* Wait for at least 1 ms according to spec */
1562 	mmc_delay(1);
1563 
1564 	/*
1565 	 * Failure to switch is indicated by the card holding
1566 	 * dat[0:3] low
1567 	 */
1568 	if (host->ops->card_busy && host->ops->card_busy(host))
1569 		err = -EAGAIN;
1570 
1571 power_cycle:
1572 	if (err) {
1573 		pr_debug("%s: Signal voltage switch failed, "
1574 			"power cycling card\n", mmc_hostname(host));
1575 		mmc_power_cycle(host, ocr);
1576 	}
1577 
1578 err_command:
1579 	mmc_host_clk_release(host);
1580 
1581 	return err;
1582 }
1583 
1584 /*
1585  * Select timing parameters for host.
1586  */
mmc_set_timing(struct mmc_host * host,unsigned int timing)1587 void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1588 {
1589 	mmc_host_clk_hold(host);
1590 	host->ios.timing = timing;
1591 	mmc_set_ios(host);
1592 	mmc_host_clk_release(host);
1593 }
1594 
1595 /*
1596  * Select appropriate driver type for host.
1597  */
mmc_set_driver_type(struct mmc_host * host,unsigned int drv_type)1598 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1599 {
1600 	mmc_host_clk_hold(host);
1601 	host->ios.drv_type = drv_type;
1602 	mmc_set_ios(host);
1603 	mmc_host_clk_release(host);
1604 }
1605 
1606 /*
1607  * Apply power to the MMC stack.  This is a two-stage process.
1608  * First, we enable power to the card without the clock running.
1609  * We then wait a bit for the power to stabilise.  Finally,
1610  * enable the bus drivers and clock to the card.
1611  *
1612  * We must _NOT_ enable the clock prior to power stablising.
1613  *
1614  * If a host does all the power sequencing itself, ignore the
1615  * initial MMC_POWER_UP stage.
1616  */
mmc_power_up(struct mmc_host * host,u32 ocr)1617 void mmc_power_up(struct mmc_host *host, u32 ocr)
1618 {
1619 	if (host->ios.power_mode == MMC_POWER_ON)
1620 		return;
1621 
1622 	mmc_host_clk_hold(host);
1623 
1624 	mmc_pwrseq_pre_power_on(host);
1625 
1626 	host->ios.vdd = fls(ocr) - 1;
1627 	host->ios.power_mode = MMC_POWER_UP;
1628 	/* Set initial state and call mmc_set_ios */
1629 	mmc_set_initial_state(host);
1630 
1631 	/* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */
1632 	if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330) == 0)
1633 		dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n");
1634 	else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180) == 0)
1635 		dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n");
1636 	else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120) == 0)
1637 		dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n");
1638 
1639 	/*
1640 	 * This delay should be sufficient to allow the power supply
1641 	 * to reach the minimum voltage.
1642 	 */
1643 	mmc_delay(10);
1644 
1645 	mmc_pwrseq_post_power_on(host);
1646 
1647 	host->ios.clock = host->f_init;
1648 
1649 	host->ios.power_mode = MMC_POWER_ON;
1650 	mmc_set_ios(host);
1651 
1652 	/*
1653 	 * This delay must be at least 74 clock sizes, or 1 ms, or the
1654 	 * time required to reach a stable voltage.
1655 	 */
1656 	mmc_delay(10);
1657 
1658 	mmc_host_clk_release(host);
1659 }
1660 
mmc_power_off(struct mmc_host * host)1661 void mmc_power_off(struct mmc_host *host)
1662 {
1663 	if (host->ios.power_mode == MMC_POWER_OFF)
1664 		return;
1665 
1666 	mmc_host_clk_hold(host);
1667 
1668 	mmc_pwrseq_power_off(host);
1669 
1670 	host->ios.clock = 0;
1671 	host->ios.vdd = 0;
1672 
1673 	host->ios.power_mode = MMC_POWER_OFF;
1674 	/* Set initial state and call mmc_set_ios */
1675 	mmc_set_initial_state(host);
1676 
1677 	/*
1678 	 * Some configurations, such as the 802.11 SDIO card in the OLPC
1679 	 * XO-1.5, require a short delay after poweroff before the card
1680 	 * can be successfully turned on again.
1681 	 */
1682 	mmc_delay(1);
1683 
1684 	mmc_host_clk_release(host);
1685 }
1686 
mmc_power_cycle(struct mmc_host * host,u32 ocr)1687 void mmc_power_cycle(struct mmc_host *host, u32 ocr)
1688 {
1689 	mmc_power_off(host);
1690 	/* Wait at least 1 ms according to SD spec */
1691 	mmc_delay(1);
1692 	mmc_power_up(host, ocr);
1693 }
1694 
1695 /*
1696  * Cleanup when the last reference to the bus operator is dropped.
1697  */
__mmc_release_bus(struct mmc_host * host)1698 static void __mmc_release_bus(struct mmc_host *host)
1699 {
1700 	BUG_ON(!host);
1701 	BUG_ON(host->bus_refs);
1702 	BUG_ON(!host->bus_dead);
1703 
1704 	host->bus_ops = NULL;
1705 }
1706 
1707 /*
1708  * Increase reference count of bus operator
1709  */
mmc_bus_get(struct mmc_host * host)1710 static inline void mmc_bus_get(struct mmc_host *host)
1711 {
1712 	unsigned long flags;
1713 
1714 	spin_lock_irqsave(&host->lock, flags);
1715 	host->bus_refs++;
1716 	spin_unlock_irqrestore(&host->lock, flags);
1717 }
1718 
1719 /*
1720  * Decrease reference count of bus operator and free it if
1721  * it is the last reference.
1722  */
mmc_bus_put(struct mmc_host * host)1723 static inline void mmc_bus_put(struct mmc_host *host)
1724 {
1725 	unsigned long flags;
1726 
1727 	spin_lock_irqsave(&host->lock, flags);
1728 	host->bus_refs--;
1729 	if ((host->bus_refs == 0) && host->bus_ops)
1730 		__mmc_release_bus(host);
1731 	spin_unlock_irqrestore(&host->lock, flags);
1732 }
1733 
1734 /*
1735  * Assign a mmc bus handler to a host. Only one bus handler may control a
1736  * host at any given time.
1737  */
mmc_attach_bus(struct mmc_host * host,const struct mmc_bus_ops * ops)1738 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1739 {
1740 	unsigned long flags;
1741 
1742 	BUG_ON(!host);
1743 	BUG_ON(!ops);
1744 
1745 	WARN_ON(!host->claimed);
1746 
1747 	spin_lock_irqsave(&host->lock, flags);
1748 
1749 	BUG_ON(host->bus_ops);
1750 	BUG_ON(host->bus_refs);
1751 
1752 	host->bus_ops = ops;
1753 	host->bus_refs = 1;
1754 	host->bus_dead = 0;
1755 
1756 	spin_unlock_irqrestore(&host->lock, flags);
1757 }
1758 
1759 /*
1760  * Remove the current bus handler from a host.
1761  */
mmc_detach_bus(struct mmc_host * host)1762 void mmc_detach_bus(struct mmc_host *host)
1763 {
1764 	unsigned long flags;
1765 
1766 	BUG_ON(!host);
1767 
1768 	WARN_ON(!host->claimed);
1769 	WARN_ON(!host->bus_ops);
1770 
1771 	spin_lock_irqsave(&host->lock, flags);
1772 
1773 	host->bus_dead = 1;
1774 
1775 	spin_unlock_irqrestore(&host->lock, flags);
1776 
1777 	mmc_bus_put(host);
1778 }
1779 
_mmc_detect_change(struct mmc_host * host,unsigned long delay,bool cd_irq)1780 static void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
1781 				bool cd_irq)
1782 {
1783 #ifdef CONFIG_MMC_DEBUG
1784 	unsigned long flags;
1785 	spin_lock_irqsave(&host->lock, flags);
1786 	WARN_ON(host->removed);
1787 	spin_unlock_irqrestore(&host->lock, flags);
1788 #endif
1789 
1790 	/*
1791 	 * If the device is configured as wakeup, we prevent a new sleep for
1792 	 * 5 s to give provision for user space to consume the event.
1793 	 */
1794 	if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) &&
1795 		device_can_wakeup(mmc_dev(host)))
1796 		pm_wakeup_event(mmc_dev(host), 5000);
1797 
1798 	host->detect_change = 1;
1799 	mmc_schedule_delayed_work(&host->detect, delay);
1800 }
1801 
1802 /**
1803  *	mmc_detect_change - process change of state on a MMC socket
1804  *	@host: host which changed state.
1805  *	@delay: optional delay to wait before detection (jiffies)
1806  *
1807  *	MMC drivers should call this when they detect a card has been
1808  *	inserted or removed. The MMC layer will confirm that any
1809  *	present card is still functional, and initialize any newly
1810  *	inserted.
1811  */
mmc_detect_change(struct mmc_host * host,unsigned long delay)1812 void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1813 {
1814 	_mmc_detect_change(host, delay, true);
1815 }
1816 EXPORT_SYMBOL(mmc_detect_change);
1817 
mmc_init_erase(struct mmc_card * card)1818 void mmc_init_erase(struct mmc_card *card)
1819 {
1820 	unsigned int sz;
1821 
1822 	if (is_power_of_2(card->erase_size))
1823 		card->erase_shift = ffs(card->erase_size) - 1;
1824 	else
1825 		card->erase_shift = 0;
1826 
1827 	/*
1828 	 * It is possible to erase an arbitrarily large area of an SD or MMC
1829 	 * card.  That is not desirable because it can take a long time
1830 	 * (minutes) potentially delaying more important I/O, and also the
1831 	 * timeout calculations become increasingly hugely over-estimated.
1832 	 * Consequently, 'pref_erase' is defined as a guide to limit erases
1833 	 * to that size and alignment.
1834 	 *
1835 	 * For SD cards that define Allocation Unit size, limit erases to one
1836 	 * Allocation Unit at a time.  For MMC cards that define High Capacity
1837 	 * Erase Size, whether it is switched on or not, limit to that size.
1838 	 * Otherwise just have a stab at a good value.  For modern cards it
1839 	 * will end up being 4MiB.  Note that if the value is too small, it
1840 	 * can end up taking longer to erase.
1841 	 */
1842 	if (mmc_card_sd(card) && card->ssr.au) {
1843 		card->pref_erase = card->ssr.au;
1844 		card->erase_shift = ffs(card->ssr.au) - 1;
1845 	} else if (card->ext_csd.hc_erase_size) {
1846 		card->pref_erase = card->ext_csd.hc_erase_size;
1847 	} else if (card->erase_size) {
1848 		sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
1849 		if (sz < 128)
1850 			card->pref_erase = 512 * 1024 / 512;
1851 		else if (sz < 512)
1852 			card->pref_erase = 1024 * 1024 / 512;
1853 		else if (sz < 1024)
1854 			card->pref_erase = 2 * 1024 * 1024 / 512;
1855 		else
1856 			card->pref_erase = 4 * 1024 * 1024 / 512;
1857 		if (card->pref_erase < card->erase_size)
1858 			card->pref_erase = card->erase_size;
1859 		else {
1860 			sz = card->pref_erase % card->erase_size;
1861 			if (sz)
1862 				card->pref_erase += card->erase_size - sz;
1863 		}
1864 	} else
1865 		card->pref_erase = 0;
1866 }
1867 
mmc_mmc_erase_timeout(struct mmc_card * card,unsigned int arg,unsigned int qty)1868 static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
1869 				          unsigned int arg, unsigned int qty)
1870 {
1871 	unsigned int erase_timeout;
1872 
1873 	if (arg == MMC_DISCARD_ARG ||
1874 	    (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
1875 		erase_timeout = card->ext_csd.trim_timeout;
1876 	} else if (card->ext_csd.erase_group_def & 1) {
1877 		/* High Capacity Erase Group Size uses HC timeouts */
1878 		if (arg == MMC_TRIM_ARG)
1879 			erase_timeout = card->ext_csd.trim_timeout;
1880 		else
1881 			erase_timeout = card->ext_csd.hc_erase_timeout;
1882 	} else {
1883 		/* CSD Erase Group Size uses write timeout */
1884 		unsigned int mult = (10 << card->csd.r2w_factor);
1885 		unsigned int timeout_clks = card->csd.tacc_clks * mult;
1886 		unsigned int timeout_us;
1887 
1888 		/* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
1889 		if (card->csd.tacc_ns < 1000000)
1890 			timeout_us = (card->csd.tacc_ns * mult) / 1000;
1891 		else
1892 			timeout_us = (card->csd.tacc_ns / 1000) * mult;
1893 
1894 		/*
1895 		 * ios.clock is only a target.  The real clock rate might be
1896 		 * less but not that much less, so fudge it by multiplying by 2.
1897 		 */
1898 		timeout_clks <<= 1;
1899 		timeout_us += (timeout_clks * 1000) /
1900 			      (mmc_host_clk_rate(card->host) / 1000);
1901 
1902 		erase_timeout = timeout_us / 1000;
1903 
1904 		/*
1905 		 * Theoretically, the calculation could underflow so round up
1906 		 * to 1ms in that case.
1907 		 */
1908 		if (!erase_timeout)
1909 			erase_timeout = 1;
1910 	}
1911 
1912 	/* Multiplier for secure operations */
1913 	if (arg & MMC_SECURE_ARGS) {
1914 		if (arg == MMC_SECURE_ERASE_ARG)
1915 			erase_timeout *= card->ext_csd.sec_erase_mult;
1916 		else
1917 			erase_timeout *= card->ext_csd.sec_trim_mult;
1918 	}
1919 
1920 	erase_timeout *= qty;
1921 
1922 	/*
1923 	 * Ensure at least a 1 second timeout for SPI as per
1924 	 * 'mmc_set_data_timeout()'
1925 	 */
1926 	if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
1927 		erase_timeout = 1000;
1928 
1929 	return erase_timeout;
1930 }
1931 
mmc_sd_erase_timeout(struct mmc_card * card,unsigned int arg,unsigned int qty)1932 static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
1933 					 unsigned int arg,
1934 					 unsigned int qty)
1935 {
1936 	unsigned int erase_timeout;
1937 
1938 	if (card->ssr.erase_timeout) {
1939 		/* Erase timeout specified in SD Status Register (SSR) */
1940 		erase_timeout = card->ssr.erase_timeout * qty +
1941 				card->ssr.erase_offset;
1942 	} else {
1943 		/*
1944 		 * Erase timeout not specified in SD Status Register (SSR) so
1945 		 * use 250ms per write block.
1946 		 */
1947 		erase_timeout = 250 * qty;
1948 	}
1949 
1950 	/* Must not be less than 1 second */
1951 	if (erase_timeout < 1000)
1952 		erase_timeout = 1000;
1953 
1954 	return erase_timeout;
1955 }
1956 
mmc_erase_timeout(struct mmc_card * card,unsigned int arg,unsigned int qty)1957 static unsigned int mmc_erase_timeout(struct mmc_card *card,
1958 				      unsigned int arg,
1959 				      unsigned int qty)
1960 {
1961 	if (mmc_card_sd(card))
1962 		return mmc_sd_erase_timeout(card, arg, qty);
1963 	else
1964 		return mmc_mmc_erase_timeout(card, arg, qty);
1965 }
1966 
mmc_do_erase(struct mmc_card * card,unsigned int from,unsigned int to,unsigned int arg)1967 static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1968 			unsigned int to, unsigned int arg)
1969 {
1970 	struct mmc_command cmd = {0};
1971 	unsigned int qty = 0;
1972 	unsigned long timeout;
1973 	int err;
1974 
1975 	/*
1976 	 * qty is used to calculate the erase timeout which depends on how many
1977 	 * erase groups (or allocation units in SD terminology) are affected.
1978 	 * We count erasing part of an erase group as one erase group.
1979 	 * For SD, the allocation units are always a power of 2.  For MMC, the
1980 	 * erase group size is almost certainly also power of 2, but it does not
1981 	 * seem to insist on that in the JEDEC standard, so we fall back to
1982 	 * division in that case.  SD may not specify an allocation unit size,
1983 	 * in which case the timeout is based on the number of write blocks.
1984 	 *
1985 	 * Note that the timeout for secure trim 2 will only be correct if the
1986 	 * number of erase groups specified is the same as the total of all
1987 	 * preceding secure trim 1 commands.  Since the power may have been
1988 	 * lost since the secure trim 1 commands occurred, it is generally
1989 	 * impossible to calculate the secure trim 2 timeout correctly.
1990 	 */
1991 	if (card->erase_shift)
1992 		qty += ((to >> card->erase_shift) -
1993 			(from >> card->erase_shift)) + 1;
1994 	else if (mmc_card_sd(card))
1995 		qty += to - from + 1;
1996 	else
1997 		qty += ((to / card->erase_size) -
1998 			(from / card->erase_size)) + 1;
1999 
2000 	if (!mmc_card_blockaddr(card)) {
2001 		from <<= 9;
2002 		to <<= 9;
2003 	}
2004 
2005 	if (mmc_card_sd(card))
2006 		cmd.opcode = SD_ERASE_WR_BLK_START;
2007 	else
2008 		cmd.opcode = MMC_ERASE_GROUP_START;
2009 	cmd.arg = from;
2010 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2011 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
2012 	if (err) {
2013 		pr_err("mmc_erase: group start error %d, "
2014 		       "status %#x\n", err, cmd.resp[0]);
2015 		err = -EIO;
2016 		goto out;
2017 	}
2018 
2019 	memset(&cmd, 0, sizeof(struct mmc_command));
2020 	if (mmc_card_sd(card))
2021 		cmd.opcode = SD_ERASE_WR_BLK_END;
2022 	else
2023 		cmd.opcode = MMC_ERASE_GROUP_END;
2024 	cmd.arg = to;
2025 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2026 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
2027 	if (err) {
2028 		pr_err("mmc_erase: group end error %d, status %#x\n",
2029 		       err, cmd.resp[0]);
2030 		err = -EIO;
2031 		goto out;
2032 	}
2033 
2034 	memset(&cmd, 0, sizeof(struct mmc_command));
2035 	cmd.opcode = MMC_ERASE;
2036 	cmd.arg = arg;
2037 	cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
2038 	cmd.busy_timeout = mmc_erase_timeout(card, arg, qty);
2039 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
2040 	if (err) {
2041 		pr_err("mmc_erase: erase error %d, status %#x\n",
2042 		       err, cmd.resp[0]);
2043 		err = -EIO;
2044 		goto out;
2045 	}
2046 
2047 	if (mmc_host_is_spi(card->host))
2048 		goto out;
2049 
2050 	timeout = jiffies + msecs_to_jiffies(MMC_CORE_TIMEOUT_MS);
2051 	do {
2052 		memset(&cmd, 0, sizeof(struct mmc_command));
2053 		cmd.opcode = MMC_SEND_STATUS;
2054 		cmd.arg = card->rca << 16;
2055 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
2056 		/* Do not retry else we can't see errors */
2057 		err = mmc_wait_for_cmd(card->host, &cmd, 0);
2058 		if (err || (cmd.resp[0] & 0xFDF92000)) {
2059 			pr_err("error %d requesting status %#x\n",
2060 				err, cmd.resp[0]);
2061 			err = -EIO;
2062 			goto out;
2063 		}
2064 
2065 		/* Timeout if the device never becomes ready for data and
2066 		 * never leaves the program state.
2067 		 */
2068 		if (time_after(jiffies, timeout)) {
2069 			pr_err("%s: Card stuck in programming state! %s\n",
2070 				mmc_hostname(card->host), __func__);
2071 			err =  -EIO;
2072 			goto out;
2073 		}
2074 
2075 	} while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
2076 		 (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG));
2077 out:
2078 	return err;
2079 }
2080 
2081 /**
2082  * mmc_erase - erase sectors.
2083  * @card: card to erase
2084  * @from: first sector to erase
2085  * @nr: number of sectors to erase
2086  * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
2087  *
2088  * Caller must claim host before calling this function.
2089  */
mmc_erase(struct mmc_card * card,unsigned int from,unsigned int nr,unsigned int arg)2090 int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
2091 	      unsigned int arg)
2092 {
2093 	unsigned int rem, to = from + nr;
2094 
2095 	if (!(card->host->caps & MMC_CAP_ERASE) ||
2096 	    !(card->csd.cmdclass & CCC_ERASE))
2097 		return -EOPNOTSUPP;
2098 
2099 	if (!card->erase_size)
2100 		return -EOPNOTSUPP;
2101 
2102 	if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
2103 		return -EOPNOTSUPP;
2104 
2105 	if ((arg & MMC_SECURE_ARGS) &&
2106 	    !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
2107 		return -EOPNOTSUPP;
2108 
2109 	if ((arg & MMC_TRIM_ARGS) &&
2110 	    !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
2111 		return -EOPNOTSUPP;
2112 
2113 	if (arg == MMC_SECURE_ERASE_ARG) {
2114 		if (from % card->erase_size || nr % card->erase_size)
2115 			return -EINVAL;
2116 	}
2117 
2118 	if (arg == MMC_ERASE_ARG) {
2119 		rem = from % card->erase_size;
2120 		if (rem) {
2121 			rem = card->erase_size - rem;
2122 			from += rem;
2123 			if (nr > rem)
2124 				nr -= rem;
2125 			else
2126 				return 0;
2127 		}
2128 		rem = nr % card->erase_size;
2129 		if (rem)
2130 			nr -= rem;
2131 	}
2132 
2133 	if (nr == 0)
2134 		return 0;
2135 
2136 	to = from + nr;
2137 
2138 	if (to <= from)
2139 		return -EINVAL;
2140 
2141 	/* 'from' and 'to' are inclusive */
2142 	to -= 1;
2143 
2144 	return mmc_do_erase(card, from, to, arg);
2145 }
2146 EXPORT_SYMBOL(mmc_erase);
2147 
mmc_can_erase(struct mmc_card * card)2148 int mmc_can_erase(struct mmc_card *card)
2149 {
2150 	if ((card->host->caps & MMC_CAP_ERASE) &&
2151 	    (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
2152 		return 1;
2153 	return 0;
2154 }
2155 EXPORT_SYMBOL(mmc_can_erase);
2156 
mmc_can_trim(struct mmc_card * card)2157 int mmc_can_trim(struct mmc_card *card)
2158 {
2159 	if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)
2160 		return 1;
2161 	return 0;
2162 }
2163 EXPORT_SYMBOL(mmc_can_trim);
2164 
mmc_can_discard(struct mmc_card * card)2165 int mmc_can_discard(struct mmc_card *card)
2166 {
2167 	/*
2168 	 * As there's no way to detect the discard support bit at v4.5
2169 	 * use the s/w feature support filed.
2170 	 */
2171 	if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
2172 		return 1;
2173 	return 0;
2174 }
2175 EXPORT_SYMBOL(mmc_can_discard);
2176 
mmc_can_sanitize(struct mmc_card * card)2177 int mmc_can_sanitize(struct mmc_card *card)
2178 {
2179 	if (!mmc_can_trim(card) && !mmc_can_erase(card))
2180 		return 0;
2181 	if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
2182 		return 1;
2183 	return 0;
2184 }
2185 EXPORT_SYMBOL(mmc_can_sanitize);
2186 
mmc_can_secure_erase_trim(struct mmc_card * card)2187 int mmc_can_secure_erase_trim(struct mmc_card *card)
2188 {
2189 	if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
2190 	    !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
2191 		return 1;
2192 	return 0;
2193 }
2194 EXPORT_SYMBOL(mmc_can_secure_erase_trim);
2195 
mmc_erase_group_aligned(struct mmc_card * card,unsigned int from,unsigned int nr)2196 int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
2197 			    unsigned int nr)
2198 {
2199 	if (!card->erase_size)
2200 		return 0;
2201 	if (from % card->erase_size || nr % card->erase_size)
2202 		return 0;
2203 	return 1;
2204 }
2205 EXPORT_SYMBOL(mmc_erase_group_aligned);
2206 
mmc_do_calc_max_discard(struct mmc_card * card,unsigned int arg)2207 static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
2208 					    unsigned int arg)
2209 {
2210 	struct mmc_host *host = card->host;
2211 	unsigned int max_discard, x, y, qty = 0, max_qty, timeout;
2212 	unsigned int last_timeout = 0;
2213 
2214 	if (card->erase_shift)
2215 		max_qty = UINT_MAX >> card->erase_shift;
2216 	else if (mmc_card_sd(card))
2217 		max_qty = UINT_MAX;
2218 	else
2219 		max_qty = UINT_MAX / card->erase_size;
2220 
2221 	/* Find the largest qty with an OK timeout */
2222 	do {
2223 		y = 0;
2224 		for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
2225 			timeout = mmc_erase_timeout(card, arg, qty + x);
2226 			if (timeout > host->max_busy_timeout)
2227 				break;
2228 			if (timeout < last_timeout)
2229 				break;
2230 			last_timeout = timeout;
2231 			y = x;
2232 		}
2233 		qty += y;
2234 	} while (y);
2235 
2236 	if (!qty)
2237 		return 0;
2238 
2239 	if (qty == 1)
2240 		return 1;
2241 
2242 	/* Convert qty to sectors */
2243 	if (card->erase_shift)
2244 		max_discard = --qty << card->erase_shift;
2245 	else if (mmc_card_sd(card))
2246 		max_discard = qty;
2247 	else
2248 		max_discard = --qty * card->erase_size;
2249 
2250 	return max_discard;
2251 }
2252 
mmc_calc_max_discard(struct mmc_card * card)2253 unsigned int mmc_calc_max_discard(struct mmc_card *card)
2254 {
2255 	struct mmc_host *host = card->host;
2256 	unsigned int max_discard, max_trim;
2257 
2258 	if (!host->max_busy_timeout)
2259 		return UINT_MAX;
2260 
2261 	/*
2262 	 * Without erase_group_def set, MMC erase timeout depends on clock
2263 	 * frequence which can change.  In that case, the best choice is
2264 	 * just the preferred erase size.
2265 	 */
2266 	if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
2267 		return card->pref_erase;
2268 
2269 	max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
2270 	if (mmc_can_trim(card)) {
2271 		max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
2272 		if (max_trim < max_discard)
2273 			max_discard = max_trim;
2274 	} else if (max_discard < card->erase_size) {
2275 		max_discard = 0;
2276 	}
2277 	pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
2278 		 mmc_hostname(host), max_discard, host->max_busy_timeout);
2279 	return max_discard;
2280 }
2281 EXPORT_SYMBOL(mmc_calc_max_discard);
2282 
mmc_set_blocklen(struct mmc_card * card,unsigned int blocklen)2283 int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
2284 {
2285 	struct mmc_command cmd = {0};
2286 
2287 	if (mmc_card_blockaddr(card) || mmc_card_ddr52(card))
2288 		return 0;
2289 
2290 	cmd.opcode = MMC_SET_BLOCKLEN;
2291 	cmd.arg = blocklen;
2292 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2293 	return mmc_wait_for_cmd(card->host, &cmd, 5);
2294 }
2295 EXPORT_SYMBOL(mmc_set_blocklen);
2296 
mmc_set_blockcount(struct mmc_card * card,unsigned int blockcount,bool is_rel_write)2297 int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
2298 			bool is_rel_write)
2299 {
2300 	struct mmc_command cmd = {0};
2301 
2302 	cmd.opcode = MMC_SET_BLOCK_COUNT;
2303 	cmd.arg = blockcount & 0x0000FFFF;
2304 	if (is_rel_write)
2305 		cmd.arg |= 1 << 31;
2306 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2307 	return mmc_wait_for_cmd(card->host, &cmd, 5);
2308 }
2309 EXPORT_SYMBOL(mmc_set_blockcount);
2310 
mmc_hw_reset_for_init(struct mmc_host * host)2311 static void mmc_hw_reset_for_init(struct mmc_host *host)
2312 {
2313 	if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
2314 		return;
2315 	mmc_host_clk_hold(host);
2316 	host->ops->hw_reset(host);
2317 	mmc_host_clk_release(host);
2318 }
2319 
mmc_hw_reset(struct mmc_host * host)2320 int mmc_hw_reset(struct mmc_host *host)
2321 {
2322 	int ret;
2323 
2324 	if (!host->card)
2325 		return -EINVAL;
2326 
2327 	mmc_bus_get(host);
2328 	if (!host->bus_ops || host->bus_dead || !host->bus_ops->reset) {
2329 		mmc_bus_put(host);
2330 		return -EOPNOTSUPP;
2331 	}
2332 
2333 	ret = host->bus_ops->reset(host);
2334 	mmc_bus_put(host);
2335 
2336 	pr_warn("%s: tried to reset card\n", mmc_hostname(host));
2337 
2338 	return ret;
2339 }
2340 EXPORT_SYMBOL(mmc_hw_reset);
2341 
mmc_rescan_try_freq(struct mmc_host * host,unsigned freq)2342 static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
2343 {
2344 	host->f_init = freq;
2345 
2346 #ifdef CONFIG_MMC_DEBUG
2347 	pr_info("%s: %s: trying to init card at %u Hz\n",
2348 		mmc_hostname(host), __func__, host->f_init);
2349 #endif
2350 	mmc_power_up(host, host->ocr_avail);
2351 
2352 	/*
2353 	 * Some eMMCs (with VCCQ always on) may not be reset after power up, so
2354 	 * do a hardware reset if possible.
2355 	 */
2356 	mmc_hw_reset_for_init(host);
2357 
2358 	/*
2359 	 * sdio_reset sends CMD52 to reset card.  Since we do not know
2360 	 * if the card is being re-initialized, just send it.  CMD52
2361 	 * should be ignored by SD/eMMC cards.
2362 	 */
2363 	sdio_reset(host);
2364 	mmc_go_idle(host);
2365 
2366 	mmc_send_if_cond(host, host->ocr_avail);
2367 
2368 	/* Order's important: probe SDIO, then SD, then MMC */
2369 	if (!mmc_attach_sdio(host))
2370 		return 0;
2371 	if (!mmc_attach_sd(host))
2372 		return 0;
2373 	if (!mmc_attach_mmc(host))
2374 		return 0;
2375 
2376 	mmc_power_off(host);
2377 	return -EIO;
2378 }
2379 
_mmc_detect_card_removed(struct mmc_host * host)2380 int _mmc_detect_card_removed(struct mmc_host *host)
2381 {
2382 	int ret;
2383 
2384 	if (host->caps & MMC_CAP_NONREMOVABLE)
2385 		return 0;
2386 
2387 	if (!host->card || mmc_card_removed(host->card))
2388 		return 1;
2389 
2390 	ret = host->bus_ops->alive(host);
2391 
2392 	/*
2393 	 * Card detect status and alive check may be out of sync if card is
2394 	 * removed slowly, when card detect switch changes while card/slot
2395 	 * pads are still contacted in hardware (refer to "SD Card Mechanical
2396 	 * Addendum, Appendix C: Card Detection Switch"). So reschedule a
2397 	 * detect work 200ms later for this case.
2398 	 */
2399 	if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
2400 		mmc_detect_change(host, msecs_to_jiffies(200));
2401 		pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
2402 	}
2403 
2404 	if (ret) {
2405 		mmc_card_set_removed(host->card);
2406 		pr_debug("%s: card remove detected\n", mmc_hostname(host));
2407 	}
2408 
2409 	return ret;
2410 }
2411 
mmc_detect_card_removed(struct mmc_host * host)2412 int mmc_detect_card_removed(struct mmc_host *host)
2413 {
2414 	struct mmc_card *card = host->card;
2415 	int ret;
2416 
2417 	WARN_ON(!host->claimed);
2418 
2419 	if (!card)
2420 		return 1;
2421 
2422 	ret = mmc_card_removed(card);
2423 	/*
2424 	 * The card will be considered unchanged unless we have been asked to
2425 	 * detect a change or host requires polling to provide card detection.
2426 	 */
2427 	if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
2428 		return ret;
2429 
2430 	host->detect_change = 0;
2431 	if (!ret) {
2432 		ret = _mmc_detect_card_removed(host);
2433 		if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) {
2434 			/*
2435 			 * Schedule a detect work as soon as possible to let a
2436 			 * rescan handle the card removal.
2437 			 */
2438 			cancel_delayed_work(&host->detect);
2439 			_mmc_detect_change(host, 0, false);
2440 		}
2441 	}
2442 
2443 	return ret;
2444 }
2445 EXPORT_SYMBOL(mmc_detect_card_removed);
2446 
mmc_rescan(struct work_struct * work)2447 void mmc_rescan(struct work_struct *work)
2448 {
2449 	struct mmc_host *host =
2450 		container_of(work, struct mmc_host, detect.work);
2451 	int i;
2452 
2453 	if (host->trigger_card_event && host->ops->card_event) {
2454 		host->ops->card_event(host);
2455 		host->trigger_card_event = false;
2456 	}
2457 
2458 	if (host->rescan_disable)
2459 		return;
2460 
2461 	/* If there is a non-removable card registered, only scan once */
2462 	if ((host->caps & MMC_CAP_NONREMOVABLE) && host->rescan_entered)
2463 		return;
2464 	host->rescan_entered = 1;
2465 
2466 	mmc_bus_get(host);
2467 
2468 	/*
2469 	 * if there is a _removable_ card registered, check whether it is
2470 	 * still present
2471 	 */
2472 	if (host->bus_ops && !host->bus_dead
2473 	    && !(host->caps & MMC_CAP_NONREMOVABLE))
2474 		host->bus_ops->detect(host);
2475 
2476 	host->detect_change = 0;
2477 
2478 	/*
2479 	 * Let mmc_bus_put() free the bus/bus_ops if we've found that
2480 	 * the card is no longer present.
2481 	 */
2482 	mmc_bus_put(host);
2483 	mmc_bus_get(host);
2484 
2485 	/* if there still is a card present, stop here */
2486 	if (host->bus_ops != NULL) {
2487 		mmc_bus_put(host);
2488 		goto out;
2489 	}
2490 
2491 	/*
2492 	 * Only we can add a new handler, so it's safe to
2493 	 * release the lock here.
2494 	 */
2495 	mmc_bus_put(host);
2496 
2497 	if (!(host->caps & MMC_CAP_NONREMOVABLE) && host->ops->get_cd &&
2498 			host->ops->get_cd(host) == 0) {
2499 		mmc_claim_host(host);
2500 		mmc_power_off(host);
2501 		mmc_release_host(host);
2502 		goto out;
2503 	}
2504 
2505 	mmc_claim_host(host);
2506 	for (i = 0; i < ARRAY_SIZE(freqs); i++) {
2507 		if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
2508 			break;
2509 		if (freqs[i] <= host->f_min)
2510 			break;
2511 	}
2512 	mmc_release_host(host);
2513 
2514  out:
2515 	if (host->caps & MMC_CAP_NEEDS_POLL)
2516 		mmc_schedule_delayed_work(&host->detect, HZ);
2517 }
2518 
mmc_start_host(struct mmc_host * host)2519 void mmc_start_host(struct mmc_host *host)
2520 {
2521 	host->f_init = max(freqs[0], host->f_min);
2522 	host->rescan_disable = 0;
2523 	host->ios.power_mode = MMC_POWER_UNDEFINED;
2524 	if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)
2525 		mmc_power_off(host);
2526 	else
2527 		mmc_power_up(host, host->ocr_avail);
2528 	mmc_gpiod_request_cd_irq(host);
2529 	_mmc_detect_change(host, 0, false);
2530 }
2531 
mmc_stop_host(struct mmc_host * host)2532 void mmc_stop_host(struct mmc_host *host)
2533 {
2534 #ifdef CONFIG_MMC_DEBUG
2535 	unsigned long flags;
2536 	spin_lock_irqsave(&host->lock, flags);
2537 	host->removed = 1;
2538 	spin_unlock_irqrestore(&host->lock, flags);
2539 #endif
2540 	if (host->slot.cd_irq >= 0)
2541 		disable_irq(host->slot.cd_irq);
2542 
2543 	host->rescan_disable = 1;
2544 	cancel_delayed_work_sync(&host->detect);
2545 	mmc_flush_scheduled_work();
2546 
2547 	/* clear pm flags now and let card drivers set them as needed */
2548 	host->pm_flags = 0;
2549 
2550 	mmc_bus_get(host);
2551 	if (host->bus_ops && !host->bus_dead) {
2552 		/* Calling bus_ops->remove() with a claimed host can deadlock */
2553 		host->bus_ops->remove(host);
2554 		mmc_claim_host(host);
2555 		mmc_detach_bus(host);
2556 		mmc_power_off(host);
2557 		mmc_release_host(host);
2558 		mmc_bus_put(host);
2559 		return;
2560 	}
2561 	mmc_bus_put(host);
2562 
2563 	BUG_ON(host->card);
2564 
2565 	mmc_power_off(host);
2566 }
2567 
mmc_power_save_host(struct mmc_host * host)2568 int mmc_power_save_host(struct mmc_host *host)
2569 {
2570 	int ret = 0;
2571 
2572 #ifdef CONFIG_MMC_DEBUG
2573 	pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__);
2574 #endif
2575 
2576 	mmc_bus_get(host);
2577 
2578 	if (!host->bus_ops || host->bus_dead) {
2579 		mmc_bus_put(host);
2580 		return -EINVAL;
2581 	}
2582 
2583 	if (host->bus_ops->power_save)
2584 		ret = host->bus_ops->power_save(host);
2585 
2586 	mmc_bus_put(host);
2587 
2588 	mmc_power_off(host);
2589 
2590 	return ret;
2591 }
2592 EXPORT_SYMBOL(mmc_power_save_host);
2593 
mmc_power_restore_host(struct mmc_host * host)2594 int mmc_power_restore_host(struct mmc_host *host)
2595 {
2596 	int ret;
2597 
2598 #ifdef CONFIG_MMC_DEBUG
2599 	pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__);
2600 #endif
2601 
2602 	mmc_bus_get(host);
2603 
2604 	if (!host->bus_ops || host->bus_dead) {
2605 		mmc_bus_put(host);
2606 		return -EINVAL;
2607 	}
2608 
2609 	mmc_power_up(host, host->card->ocr);
2610 	ret = host->bus_ops->power_restore(host);
2611 
2612 	mmc_bus_put(host);
2613 
2614 	return ret;
2615 }
2616 EXPORT_SYMBOL(mmc_power_restore_host);
2617 
2618 /*
2619  * Flush the cache to the non-volatile storage.
2620  */
mmc_flush_cache(struct mmc_card * card)2621 int mmc_flush_cache(struct mmc_card *card)
2622 {
2623 	int err = 0;
2624 
2625 	if (mmc_card_mmc(card) &&
2626 			(card->ext_csd.cache_size > 0) &&
2627 			(card->ext_csd.cache_ctrl & 1)) {
2628 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2629 				EXT_CSD_FLUSH_CACHE, 1, 0);
2630 		if (err)
2631 			pr_err("%s: cache flush error %d\n",
2632 					mmc_hostname(card->host), err);
2633 	}
2634 
2635 	return err;
2636 }
2637 EXPORT_SYMBOL(mmc_flush_cache);
2638 
2639 #ifdef CONFIG_PM
2640 
2641 /* Do the card removal on suspend if card is assumed removeable
2642  * Do that in pm notifier while userspace isn't yet frozen, so we will be able
2643    to sync the card.
2644 */
mmc_pm_notify(struct notifier_block * notify_block,unsigned long mode,void * unused)2645 int mmc_pm_notify(struct notifier_block *notify_block,
2646 					unsigned long mode, void *unused)
2647 {
2648 	struct mmc_host *host = container_of(
2649 		notify_block, struct mmc_host, pm_notify);
2650 	unsigned long flags;
2651 	int err = 0;
2652 
2653 	switch (mode) {
2654 	case PM_HIBERNATION_PREPARE:
2655 	case PM_SUSPEND_PREPARE:
2656 	case PM_RESTORE_PREPARE:
2657 		spin_lock_irqsave(&host->lock, flags);
2658 		host->rescan_disable = 1;
2659 		spin_unlock_irqrestore(&host->lock, flags);
2660 		cancel_delayed_work_sync(&host->detect);
2661 
2662 		if (!host->bus_ops)
2663 			break;
2664 
2665 		/* Validate prerequisites for suspend */
2666 		if (host->bus_ops->pre_suspend)
2667 			err = host->bus_ops->pre_suspend(host);
2668 		if (!err)
2669 			break;
2670 
2671 		/* Calling bus_ops->remove() with a claimed host can deadlock */
2672 		host->bus_ops->remove(host);
2673 		mmc_claim_host(host);
2674 		mmc_detach_bus(host);
2675 		mmc_power_off(host);
2676 		mmc_release_host(host);
2677 		host->pm_flags = 0;
2678 		break;
2679 
2680 	case PM_POST_SUSPEND:
2681 	case PM_POST_HIBERNATION:
2682 	case PM_POST_RESTORE:
2683 
2684 		spin_lock_irqsave(&host->lock, flags);
2685 		host->rescan_disable = 0;
2686 		spin_unlock_irqrestore(&host->lock, flags);
2687 		_mmc_detect_change(host, 0, false);
2688 
2689 	}
2690 
2691 	return 0;
2692 }
2693 #endif
2694 
2695 /**
2696  * mmc_init_context_info() - init synchronization context
2697  * @host: mmc host
2698  *
2699  * Init struct context_info needed to implement asynchronous
2700  * request mechanism, used by mmc core, host driver and mmc requests
2701  * supplier.
2702  */
mmc_init_context_info(struct mmc_host * host)2703 void mmc_init_context_info(struct mmc_host *host)
2704 {
2705 	spin_lock_init(&host->context_info.lock);
2706 	host->context_info.is_new_req = false;
2707 	host->context_info.is_done_rcv = false;
2708 	host->context_info.is_waiting_last_req = false;
2709 	init_waitqueue_head(&host->context_info.wait);
2710 }
2711 
mmc_init(void)2712 static int __init mmc_init(void)
2713 {
2714 	int ret;
2715 
2716 	workqueue = alloc_ordered_workqueue("kmmcd", 0);
2717 	if (!workqueue)
2718 		return -ENOMEM;
2719 
2720 	ret = mmc_register_bus();
2721 	if (ret)
2722 		goto destroy_workqueue;
2723 
2724 	ret = mmc_register_host_class();
2725 	if (ret)
2726 		goto unregister_bus;
2727 
2728 	ret = sdio_register_bus();
2729 	if (ret)
2730 		goto unregister_host_class;
2731 
2732 	return 0;
2733 
2734 unregister_host_class:
2735 	mmc_unregister_host_class();
2736 unregister_bus:
2737 	mmc_unregister_bus();
2738 destroy_workqueue:
2739 	destroy_workqueue(workqueue);
2740 
2741 	return ret;
2742 }
2743 
mmc_exit(void)2744 static void __exit mmc_exit(void)
2745 {
2746 	sdio_unregister_bus();
2747 	mmc_unregister_host_class();
2748 	mmc_unregister_bus();
2749 	destroy_workqueue(workqueue);
2750 }
2751 
2752 subsys_initcall(mmc_init);
2753 module_exit(mmc_exit);
2754 
2755 MODULE_LICENSE("GPL");
2756