root/drivers/mmc/core/core.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mmc_schedule_delayed_work
  2. mmc_should_fail_request
  3. mmc_should_fail_request
  4. mmc_complete_cmd
  5. mmc_command_done
  6. mmc_request_done
  7. __mmc_start_request
  8. mmc_mrq_pr_debug
  9. mmc_mrq_prep
  10. mmc_start_request
  11. mmc_wait_done
  12. mmc_wait_ongoing_tfr_cmd
  13. __mmc_start_req
  14. mmc_wait_for_req_done
  15. mmc_cqe_start_req
  16. mmc_cqe_request_done
  17. mmc_cqe_post_req
  18. mmc_cqe_recovery
  19. mmc_is_req_done
  20. mmc_wait_for_req
  21. mmc_wait_for_cmd
  22. mmc_set_data_timeout
  23. mmc_ctx_matches
  24. mmc_ctx_set_claimer
  25. __mmc_claim_host
  26. mmc_release_host
  27. mmc_get_card
  28. mmc_put_card
  29. mmc_set_ios
  30. mmc_set_chip_select
  31. mmc_set_clock
  32. mmc_execute_tuning
  33. mmc_set_bus_mode
  34. mmc_set_bus_width
  35. mmc_set_initial_state
  36. mmc_vdd_to_ocrbitnum
  37. mmc_vddrange_to_ocrmask
  38. mmc_of_get_func_num
  39. mmc_of_find_child_device
  40. mmc_select_voltage
  41. mmc_set_signal_voltage
  42. mmc_set_initial_signal_voltage
  43. mmc_host_set_uhs_voltage
  44. mmc_set_uhs_voltage
  45. mmc_set_timing
  46. mmc_set_driver_type
  47. mmc_select_drive_strength
  48. mmc_power_up
  49. mmc_power_off
  50. mmc_power_cycle
  51. __mmc_release_bus
  52. mmc_bus_get
  53. mmc_bus_put
  54. mmc_attach_bus
  55. mmc_detach_bus
  56. _mmc_detect_change
  57. mmc_detect_change
  58. mmc_init_erase
  59. mmc_mmc_erase_timeout
  60. mmc_sd_erase_timeout
  61. mmc_erase_timeout
  62. mmc_do_erase
  63. mmc_align_erase_size
  64. mmc_erase
  65. mmc_can_erase
  66. mmc_can_trim
  67. mmc_can_discard
  68. mmc_can_sanitize
  69. mmc_can_secure_erase_trim
  70. mmc_erase_group_aligned
  71. mmc_do_calc_max_discard
  72. mmc_calc_max_discard
  73. mmc_card_is_blockaddr
  74. mmc_set_blocklen
  75. mmc_hw_reset_for_init
  76. mmc_hw_reset
  77. mmc_sw_reset
  78. mmc_rescan_try_freq
  79. _mmc_detect_card_removed
  80. mmc_detect_card_removed
  81. mmc_rescan
  82. mmc_start_host
  83. mmc_stop_host
  84. mmc_pm_notify
  85. mmc_register_pm_notifier
  86. mmc_unregister_pm_notifier
  87. mmc_init
  88. mmc_exit

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  *  linux/drivers/mmc/core/core.c
   4  *
   5  *  Copyright (C) 2003-2004 Russell King, All Rights Reserved.
   6  *  SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
   7  *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
   8  *  MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
   9  */
  10 #include <linux/module.h>
  11 #include <linux/init.h>
  12 #include <linux/interrupt.h>
  13 #include <linux/completion.h>
  14 #include <linux/device.h>
  15 #include <linux/delay.h>
  16 #include <linux/pagemap.h>
  17 #include <linux/err.h>
  18 #include <linux/leds.h>
  19 #include <linux/scatterlist.h>
  20 #include <linux/log2.h>
  21 #include <linux/pm_runtime.h>
  22 #include <linux/pm_wakeup.h>
  23 #include <linux/suspend.h>
  24 #include <linux/fault-inject.h>
  25 #include <linux/random.h>
  26 #include <linux/slab.h>
  27 #include <linux/of.h>
  28 
  29 #include <linux/mmc/card.h>
  30 #include <linux/mmc/host.h>
  31 #include <linux/mmc/mmc.h>
  32 #include <linux/mmc/sd.h>
  33 #include <linux/mmc/slot-gpio.h>
  34 
  35 #define CREATE_TRACE_POINTS
  36 #include <trace/events/mmc.h>
  37 
  38 #include "core.h"
  39 #include "card.h"
  40 #include "bus.h"
  41 #include "host.h"
  42 #include "sdio_bus.h"
  43 #include "pwrseq.h"
  44 
  45 #include "mmc_ops.h"
  46 #include "sd_ops.h"
  47 #include "sdio_ops.h"
  48 
  49 /* The max erase timeout, used when host->max_busy_timeout isn't specified */
  50 #define MMC_ERASE_TIMEOUT_MS    (60 * 1000) /* 60 s */
  51 #define SD_DISCARD_TIMEOUT_MS   (250)
  52 
  53 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
  54 
  55 /*
  56  * Enabling software CRCs on the data blocks can be a significant (30%)
  57  * performance cost, and for other reasons may not always be desired.
  58  * So we allow it it to be disabled.
  59  */
  60 bool use_spi_crc = 1;
  61 module_param(use_spi_crc, bool, 0);
  62 
  63 static int mmc_schedule_delayed_work(struct delayed_work *work,
  64                                      unsigned long delay)
  65 {
  66         /*
  67          * We use the system_freezable_wq, because of two reasons.
  68          * First, it allows several works (not the same work item) to be
  69          * executed simultaneously. Second, the queue becomes frozen when
  70          * userspace becomes frozen during system PM.
  71          */
  72         return queue_delayed_work(system_freezable_wq, work, delay);
  73 }
  74 
  75 #ifdef CONFIG_FAIL_MMC_REQUEST
  76 
  77 /*
  78  * Internal function. Inject random data errors.
  79  * If mmc_data is NULL no errors are injected.
  80  */
  81 static void mmc_should_fail_request(struct mmc_host *host,
  82                                     struct mmc_request *mrq)
  83 {
  84         struct mmc_command *cmd = mrq->cmd;
  85         struct mmc_data *data = mrq->data;
  86         static const int data_errors[] = {
  87                 -ETIMEDOUT,
  88                 -EILSEQ,
  89                 -EIO,
  90         };
  91 
  92         if (!data)
  93                 return;
  94 
  95         if ((cmd && cmd->error) || data->error ||
  96             !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
  97                 return;
  98 
  99         data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
 100         data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
 101 }
 102 
 103 #else /* CONFIG_FAIL_MMC_REQUEST */
 104 
 105 static inline void mmc_should_fail_request(struct mmc_host *host,
 106                                            struct mmc_request *mrq)
 107 {
 108 }
 109 
 110 #endif /* CONFIG_FAIL_MMC_REQUEST */
 111 
 112 static inline void mmc_complete_cmd(struct mmc_request *mrq)
 113 {
 114         if (mrq->cap_cmd_during_tfr && !completion_done(&mrq->cmd_completion))
 115                 complete_all(&mrq->cmd_completion);
 116 }
 117 
 118 void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq)
 119 {
 120         if (!mrq->cap_cmd_during_tfr)
 121                 return;
 122 
 123         mmc_complete_cmd(mrq);
 124 
 125         pr_debug("%s: cmd done, tfr ongoing (CMD%u)\n",
 126                  mmc_hostname(host), mrq->cmd->opcode);
 127 }
 128 EXPORT_SYMBOL(mmc_command_done);
 129 
 130 /**
 131  *      mmc_request_done - finish processing an MMC request
 132  *      @host: MMC host which completed request
 133  *      @mrq: MMC request which request
 134  *
 135  *      MMC drivers should call this function when they have completed
 136  *      their processing of a request.
 137  */
 138 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
 139 {
 140         struct mmc_command *cmd = mrq->cmd;
 141         int err = cmd->error;
 142 
 143         /* Flag re-tuning needed on CRC errors */
 144         if (cmd->opcode != MMC_SEND_TUNING_BLOCK &&
 145             cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200 &&
 146             !host->retune_crc_disable &&
 147             (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
 148             (mrq->data && mrq->data->error == -EILSEQ) ||
 149             (mrq->stop && mrq->stop->error == -EILSEQ)))
 150                 mmc_retune_needed(host);
 151 
 152         if (err && cmd->retries && mmc_host_is_spi(host)) {
 153                 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
 154                         cmd->retries = 0;
 155         }
 156 
 157         if (host->ongoing_mrq == mrq)
 158                 host->ongoing_mrq = NULL;
 159 
 160         mmc_complete_cmd(mrq);
 161 
 162         trace_mmc_request_done(host, mrq);
 163 
 164         /*
 165          * We list various conditions for the command to be considered
 166          * properly done:
 167          *
 168          * - There was no error, OK fine then
 169          * - We are not doing some kind of retry
 170          * - The card was removed (...so just complete everything no matter
 171          *   if there are errors or retries)
 172          */
 173         if (!err || !cmd->retries || mmc_card_removed(host->card)) {
 174                 mmc_should_fail_request(host, mrq);
 175 
 176                 if (!host->ongoing_mrq)
 177                         led_trigger_event(host->led, LED_OFF);
 178 
 179                 if (mrq->sbc) {
 180                         pr_debug("%s: req done <CMD%u>: %d: %08x %08x %08x %08x\n",
 181                                 mmc_hostname(host), mrq->sbc->opcode,
 182                                 mrq->sbc->error,
 183                                 mrq->sbc->resp[0], mrq->sbc->resp[1],
 184                                 mrq->sbc->resp[2], mrq->sbc->resp[3]);
 185                 }
 186 
 187                 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
 188                         mmc_hostname(host), cmd->opcode, err,
 189                         cmd->resp[0], cmd->resp[1],
 190                         cmd->resp[2], cmd->resp[3]);
 191 
 192                 if (mrq->data) {
 193                         pr_debug("%s:     %d bytes transferred: %d\n",
 194                                 mmc_hostname(host),
 195                                 mrq->data->bytes_xfered, mrq->data->error);
 196                 }
 197 
 198                 if (mrq->stop) {
 199                         pr_debug("%s:     (CMD%u): %d: %08x %08x %08x %08x\n",
 200                                 mmc_hostname(host), mrq->stop->opcode,
 201                                 mrq->stop->error,
 202                                 mrq->stop->resp[0], mrq->stop->resp[1],
 203                                 mrq->stop->resp[2], mrq->stop->resp[3]);
 204                 }
 205         }
 206         /*
 207          * Request starter must handle retries - see
 208          * mmc_wait_for_req_done().
 209          */
 210         if (mrq->done)
 211                 mrq->done(mrq);
 212 }
 213 
 214 EXPORT_SYMBOL(mmc_request_done);
 215 
 216 static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
 217 {
 218         int err;
 219 
 220         /* Assumes host controller has been runtime resumed by mmc_claim_host */
 221         err = mmc_retune(host);
 222         if (err) {
 223                 mrq->cmd->error = err;
 224                 mmc_request_done(host, mrq);
 225                 return;
 226         }
 227 
 228         /*
 229          * For sdio rw commands we must wait for card busy otherwise some
 230          * sdio devices won't work properly.
 231          * And bypass I/O abort, reset and bus suspend operations.
 232          */
 233         if (sdio_is_io_busy(mrq->cmd->opcode, mrq->cmd->arg) &&
 234             host->ops->card_busy) {
 235                 int tries = 500; /* Wait aprox 500ms at maximum */
 236 
 237                 while (host->ops->card_busy(host) && --tries)
 238                         mmc_delay(1);
 239 
 240                 if (tries == 0) {
 241                         mrq->cmd->error = -EBUSY;
 242                         mmc_request_done(host, mrq);
 243                         return;
 244                 }
 245         }
 246 
 247         if (mrq->cap_cmd_during_tfr) {
 248                 host->ongoing_mrq = mrq;
 249                 /*
 250                  * Retry path could come through here without having waiting on
 251                  * cmd_completion, so ensure it is reinitialised.
 252                  */
 253                 reinit_completion(&mrq->cmd_completion);
 254         }
 255 
 256         trace_mmc_request_start(host, mrq);
 257 
 258         if (host->cqe_on)
 259                 host->cqe_ops->cqe_off(host);
 260 
 261         host->ops->request(host, mrq);
 262 }
 263 
 264 static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq,
 265                              bool cqe)
 266 {
 267         if (mrq->sbc) {
 268                 pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
 269                          mmc_hostname(host), mrq->sbc->opcode,
 270                          mrq->sbc->arg, mrq->sbc->flags);
 271         }
 272 
 273         if (mrq->cmd) {
 274                 pr_debug("%s: starting %sCMD%u arg %08x flags %08x\n",
 275                          mmc_hostname(host), cqe ? "CQE direct " : "",
 276                          mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags);
 277         } else if (cqe) {
 278                 pr_debug("%s: starting CQE transfer for tag %d blkaddr %u\n",
 279                          mmc_hostname(host), mrq->tag, mrq->data->blk_addr);
 280         }
 281 
 282         if (mrq->data) {
 283                 pr_debug("%s:     blksz %d blocks %d flags %08x "
 284                         "tsac %d ms nsac %d\n",
 285                         mmc_hostname(host), mrq->data->blksz,
 286                         mrq->data->blocks, mrq->data->flags,
 287                         mrq->data->timeout_ns / 1000000,
 288                         mrq->data->timeout_clks);
 289         }
 290 
 291         if (mrq->stop) {
 292                 pr_debug("%s:     CMD%u arg %08x flags %08x\n",
 293                          mmc_hostname(host), mrq->stop->opcode,
 294                          mrq->stop->arg, mrq->stop->flags);
 295         }
 296 }
 297 
 298 static int mmc_mrq_prep(struct mmc_host *host, struct mmc_request *mrq)
 299 {
 300         unsigned int i, sz = 0;
 301         struct scatterlist *sg;
 302 
 303         if (mrq->cmd) {
 304                 mrq->cmd->error = 0;
 305                 mrq->cmd->mrq = mrq;
 306                 mrq->cmd->data = mrq->data;
 307         }
 308         if (mrq->sbc) {
 309                 mrq->sbc->error = 0;
 310                 mrq->sbc->mrq = mrq;
 311         }
 312         if (mrq->data) {
 313                 if (mrq->data->blksz > host->max_blk_size ||
 314                     mrq->data->blocks > host->max_blk_count ||
 315                     mrq->data->blocks * mrq->data->blksz > host->max_req_size)
 316                         return -EINVAL;
 317 
 318                 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
 319                         sz += sg->length;
 320                 if (sz != mrq->data->blocks * mrq->data->blksz)
 321                         return -EINVAL;
 322 
 323                 mrq->data->error = 0;
 324                 mrq->data->mrq = mrq;
 325                 if (mrq->stop) {
 326                         mrq->data->stop = mrq->stop;
 327                         mrq->stop->error = 0;
 328                         mrq->stop->mrq = mrq;
 329                 }
 330         }
 331 
 332         return 0;
 333 }
 334 
 335 int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
 336 {
 337         int err;
 338 
 339         init_completion(&mrq->cmd_completion);
 340 
 341         mmc_retune_hold(host);
 342 
 343         if (mmc_card_removed(host->card))
 344                 return -ENOMEDIUM;
 345 
 346         mmc_mrq_pr_debug(host, mrq, false);
 347 
 348         WARN_ON(!host->claimed);
 349 
 350         err = mmc_mrq_prep(host, mrq);
 351         if (err)
 352                 return err;
 353 
 354         led_trigger_event(host->led, LED_FULL);
 355         __mmc_start_request(host, mrq);
 356 
 357         return 0;
 358 }
 359 EXPORT_SYMBOL(mmc_start_request);
 360 
 361 static void mmc_wait_done(struct mmc_request *mrq)
 362 {
 363         complete(&mrq->completion);
 364 }
 365 
 366 static inline void mmc_wait_ongoing_tfr_cmd(struct mmc_host *host)
 367 {
 368         struct mmc_request *ongoing_mrq = READ_ONCE(host->ongoing_mrq);
 369 
 370         /*
 371          * If there is an ongoing transfer, wait for the command line to become
 372          * available.
 373          */
 374         if (ongoing_mrq && !completion_done(&ongoing_mrq->cmd_completion))
 375                 wait_for_completion(&ongoing_mrq->cmd_completion);
 376 }
 377 
 378 static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
 379 {
 380         int err;
 381 
 382         mmc_wait_ongoing_tfr_cmd(host);
 383 
 384         init_completion(&mrq->completion);
 385         mrq->done = mmc_wait_done;
 386 
 387         err = mmc_start_request(host, mrq);
 388         if (err) {
 389                 mrq->cmd->error = err;
 390                 mmc_complete_cmd(mrq);
 391                 complete(&mrq->completion);
 392         }
 393 
 394         return err;
 395 }
 396 
 397 void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
 398 {
 399         struct mmc_command *cmd;
 400 
 401         while (1) {
 402                 wait_for_completion(&mrq->completion);
 403 
 404                 cmd = mrq->cmd;
 405 
 406                 /*
 407                  * If host has timed out waiting for the sanitize
 408                  * to complete, card might be still in programming state
 409                  * so let's try to bring the card out of programming
 410                  * state.
 411                  */
 412                 if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) {
 413                         if (!mmc_interrupt_hpi(host->card)) {
 414                                 pr_warn("%s: %s: Interrupted sanitize\n",
 415                                         mmc_hostname(host), __func__);
 416                                 cmd->error = 0;
 417                                 break;
 418                         } else {
 419                                 pr_err("%s: %s: Failed to interrupt sanitize\n",
 420                                        mmc_hostname(host), __func__);
 421                         }
 422                 }
 423                 if (!cmd->error || !cmd->retries ||
 424                     mmc_card_removed(host->card))
 425                         break;
 426 
 427                 mmc_retune_recheck(host);
 428 
 429                 pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
 430                          mmc_hostname(host), cmd->opcode, cmd->error);
 431                 cmd->retries--;
 432                 cmd->error = 0;
 433                 __mmc_start_request(host, mrq);
 434         }
 435 
 436         mmc_retune_release(host);
 437 }
 438 EXPORT_SYMBOL(mmc_wait_for_req_done);
 439 
 440 /*
 441  * mmc_cqe_start_req - Start a CQE request.
 442  * @host: MMC host to start the request
 443  * @mrq: request to start
 444  *
 445  * Start the request, re-tuning if needed and it is possible. Returns an error
 446  * code if the request fails to start or -EBUSY if CQE is busy.
 447  */
 448 int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq)
 449 {
 450         int err;
 451 
 452         /*
 453          * CQE cannot process re-tuning commands. Caller must hold retuning
 454          * while CQE is in use.  Re-tuning can happen here only when CQE has no
 455          * active requests i.e. this is the first.  Note, re-tuning will call
 456          * ->cqe_off().
 457          */
 458         err = mmc_retune(host);
 459         if (err)
 460                 goto out_err;
 461 
 462         mrq->host = host;
 463 
 464         mmc_mrq_pr_debug(host, mrq, true);
 465 
 466         err = mmc_mrq_prep(host, mrq);
 467         if (err)
 468                 goto out_err;
 469 
 470         err = host->cqe_ops->cqe_request(host, mrq);
 471         if (err)
 472                 goto out_err;
 473 
 474         trace_mmc_request_start(host, mrq);
 475 
 476         return 0;
 477 
 478 out_err:
 479         if (mrq->cmd) {
 480                 pr_debug("%s: failed to start CQE direct CMD%u, error %d\n",
 481                          mmc_hostname(host), mrq->cmd->opcode, err);
 482         } else {
 483                 pr_debug("%s: failed to start CQE transfer for tag %d, error %d\n",
 484                          mmc_hostname(host), mrq->tag, err);
 485         }
 486         return err;
 487 }
 488 EXPORT_SYMBOL(mmc_cqe_start_req);
 489 
 490 /**
 491  *      mmc_cqe_request_done - CQE has finished processing an MMC request
 492  *      @host: MMC host which completed request
 493  *      @mrq: MMC request which completed
 494  *
 495  *      CQE drivers should call this function when they have completed
 496  *      their processing of a request.
 497  */
 498 void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq)
 499 {
 500         mmc_should_fail_request(host, mrq);
 501 
 502         /* Flag re-tuning needed on CRC errors */
 503         if ((mrq->cmd && mrq->cmd->error == -EILSEQ) ||
 504             (mrq->data && mrq->data->error == -EILSEQ))
 505                 mmc_retune_needed(host);
 506 
 507         trace_mmc_request_done(host, mrq);
 508 
 509         if (mrq->cmd) {
 510                 pr_debug("%s: CQE req done (direct CMD%u): %d\n",
 511                          mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->error);
 512         } else {
 513                 pr_debug("%s: CQE transfer done tag %d\n",
 514                          mmc_hostname(host), mrq->tag);
 515         }
 516 
 517         if (mrq->data) {
 518                 pr_debug("%s:     %d bytes transferred: %d\n",
 519                          mmc_hostname(host),
 520                          mrq->data->bytes_xfered, mrq->data->error);
 521         }
 522 
 523         mrq->done(mrq);
 524 }
 525 EXPORT_SYMBOL(mmc_cqe_request_done);
 526 
 527 /**
 528  *      mmc_cqe_post_req - CQE post process of a completed MMC request
 529  *      @host: MMC host
 530  *      @mrq: MMC request to be processed
 531  */
 532 void mmc_cqe_post_req(struct mmc_host *host, struct mmc_request *mrq)
 533 {
 534         if (host->cqe_ops->cqe_post_req)
 535                 host->cqe_ops->cqe_post_req(host, mrq);
 536 }
 537 EXPORT_SYMBOL(mmc_cqe_post_req);
 538 
 539 /* Arbitrary 1 second timeout */
 540 #define MMC_CQE_RECOVERY_TIMEOUT        1000
 541 
 542 /*
 543  * mmc_cqe_recovery - Recover from CQE errors.
 544  * @host: MMC host to recover
 545  *
 546  * Recovery consists of stopping CQE, stopping eMMC, discarding the queue in
 547  * in eMMC, and discarding the queue in CQE. CQE must call
 548  * mmc_cqe_request_done() on all requests. An error is returned if the eMMC
 549  * fails to discard its queue.
 550  */
 551 int mmc_cqe_recovery(struct mmc_host *host)
 552 {
 553         struct mmc_command cmd;
 554         int err;
 555 
 556         mmc_retune_hold_now(host);
 557 
 558         /*
 559          * Recovery is expected seldom, if at all, but it reduces performance,
 560          * so make sure it is not completely silent.
 561          */
 562         pr_warn("%s: running CQE recovery\n", mmc_hostname(host));
 563 
 564         host->cqe_ops->cqe_recovery_start(host);
 565 
 566         memset(&cmd, 0, sizeof(cmd));
 567         cmd.opcode       = MMC_STOP_TRANSMISSION,
 568         cmd.flags        = MMC_RSP_R1B | MMC_CMD_AC,
 569         cmd.flags       &= ~MMC_RSP_CRC; /* Ignore CRC */
 570         cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT,
 571         mmc_wait_for_cmd(host, &cmd, 0);
 572 
 573         memset(&cmd, 0, sizeof(cmd));
 574         cmd.opcode       = MMC_CMDQ_TASK_MGMT;
 575         cmd.arg          = 1; /* Discard entire queue */
 576         cmd.flags        = MMC_RSP_R1B | MMC_CMD_AC;
 577         cmd.flags       &= ~MMC_RSP_CRC; /* Ignore CRC */
 578         cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT,
 579         err = mmc_wait_for_cmd(host, &cmd, 0);
 580 
 581         host->cqe_ops->cqe_recovery_finish(host);
 582 
 583         mmc_retune_release(host);
 584 
 585         return err;
 586 }
 587 EXPORT_SYMBOL(mmc_cqe_recovery);
 588 
 589 /**
 590  *      mmc_is_req_done - Determine if a 'cap_cmd_during_tfr' request is done
 591  *      @host: MMC host
 592  *      @mrq: MMC request
 593  *
 594  *      mmc_is_req_done() is used with requests that have
 595  *      mrq->cap_cmd_during_tfr = true. mmc_is_req_done() must be called after
 596  *      starting a request and before waiting for it to complete. That is,
 597  *      either in between calls to mmc_start_req(), or after mmc_wait_for_req()
 598  *      and before mmc_wait_for_req_done(). If it is called at other times the
 599  *      result is not meaningful.
 600  */
 601 bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq)
 602 {
 603         return completion_done(&mrq->completion);
 604 }
 605 EXPORT_SYMBOL(mmc_is_req_done);
 606 
 607 /**
 608  *      mmc_wait_for_req - start a request and wait for completion
 609  *      @host: MMC host to start command
 610  *      @mrq: MMC request to start
 611  *
 612  *      Start a new MMC custom command request for a host, and wait
 613  *      for the command to complete. In the case of 'cap_cmd_during_tfr'
 614  *      requests, the transfer is ongoing and the caller can issue further
 615  *      commands that do not use the data lines, and then wait by calling
 616  *      mmc_wait_for_req_done().
 617  *      Does not attempt to parse the response.
 618  */
 619 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
 620 {
 621         __mmc_start_req(host, mrq);
 622 
 623         if (!mrq->cap_cmd_during_tfr)
 624                 mmc_wait_for_req_done(host, mrq);
 625 }
 626 EXPORT_SYMBOL(mmc_wait_for_req);
 627 
 628 /**
 629  *      mmc_wait_for_cmd - start a command and wait for completion
 630  *      @host: MMC host to start command
 631  *      @cmd: MMC command to start
 632  *      @retries: maximum number of retries
 633  *
 634  *      Start a new MMC command for a host, and wait for the command
 635  *      to complete.  Return any error that occurred while the command
 636  *      was executing.  Do not attempt to parse the response.
 637  */
 638 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
 639 {
 640         struct mmc_request mrq = {};
 641 
 642         WARN_ON(!host->claimed);
 643 
 644         memset(cmd->resp, 0, sizeof(cmd->resp));
 645         cmd->retries = retries;
 646 
 647         mrq.cmd = cmd;
 648         cmd->data = NULL;
 649 
 650         mmc_wait_for_req(host, &mrq);
 651 
 652         return cmd->error;
 653 }
 654 
 655 EXPORT_SYMBOL(mmc_wait_for_cmd);
 656 
 657 /**
 658  *      mmc_set_data_timeout - set the timeout for a data command
 659  *      @data: data phase for command
 660  *      @card: the MMC card associated with the data transfer
 661  *
 662  *      Computes the data timeout parameters according to the
 663  *      correct algorithm given the card type.
 664  */
 665 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
 666 {
 667         unsigned int mult;
 668 
 669         /*
 670          * SDIO cards only define an upper 1 s limit on access.
 671          */
 672         if (mmc_card_sdio(card)) {
 673                 data->timeout_ns = 1000000000;
 674                 data->timeout_clks = 0;
 675                 return;
 676         }
 677 
 678         /*
 679          * SD cards use a 100 multiplier rather than 10
 680          */
 681         mult = mmc_card_sd(card) ? 100 : 10;
 682 
 683         /*
 684          * Scale up the multiplier (and therefore the timeout) by
 685          * the r2w factor for writes.
 686          */
 687         if (data->flags & MMC_DATA_WRITE)
 688                 mult <<= card->csd.r2w_factor;
 689 
 690         data->timeout_ns = card->csd.taac_ns * mult;
 691         data->timeout_clks = card->csd.taac_clks * mult;
 692 
 693         /*
 694          * SD cards also have an upper limit on the timeout.
 695          */
 696         if (mmc_card_sd(card)) {
 697                 unsigned int timeout_us, limit_us;
 698 
 699                 timeout_us = data->timeout_ns / 1000;
 700                 if (card->host->ios.clock)
 701                         timeout_us += data->timeout_clks * 1000 /
 702                                 (card->host->ios.clock / 1000);
 703 
 704                 if (data->flags & MMC_DATA_WRITE)
 705                         /*
 706                          * The MMC spec "It is strongly recommended
 707                          * for hosts to implement more than 500ms
 708                          * timeout value even if the card indicates
 709                          * the 250ms maximum busy length."  Even the
 710                          * previous value of 300ms is known to be
 711                          * insufficient for some cards.
 712                          */
 713                         limit_us = 3000000;
 714                 else
 715                         limit_us = 100000;
 716 
 717                 /*
 718                  * SDHC cards always use these fixed values.
 719                  */
 720                 if (timeout_us > limit_us) {
 721                         data->timeout_ns = limit_us * 1000;
 722                         data->timeout_clks = 0;
 723                 }
 724 
 725                 /* assign limit value if invalid */
 726                 if (timeout_us == 0)
 727                         data->timeout_ns = limit_us * 1000;
 728         }
 729 
 730         /*
 731          * Some cards require longer data read timeout than indicated in CSD.
 732          * Address this by setting the read timeout to a "reasonably high"
 733          * value. For the cards tested, 600ms has proven enough. If necessary,
 734          * this value can be increased if other problematic cards require this.
 735          */
 736         if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
 737                 data->timeout_ns = 600000000;
 738                 data->timeout_clks = 0;
 739         }
 740 
 741         /*
 742          * Some cards need very high timeouts if driven in SPI mode.
 743          * The worst observed timeout was 900ms after writing a
 744          * continuous stream of data until the internal logic
 745          * overflowed.
 746          */
 747         if (mmc_host_is_spi(card->host)) {
 748                 if (data->flags & MMC_DATA_WRITE) {
 749                         if (data->timeout_ns < 1000000000)
 750                                 data->timeout_ns = 1000000000;  /* 1s */
 751                 } else {
 752                         if (data->timeout_ns < 100000000)
 753                                 data->timeout_ns =  100000000;  /* 100ms */
 754                 }
 755         }
 756 }
 757 EXPORT_SYMBOL(mmc_set_data_timeout);
 758 
 759 /*
 760  * Allow claiming an already claimed host if the context is the same or there is
 761  * no context but the task is the same.
 762  */
 763 static inline bool mmc_ctx_matches(struct mmc_host *host, struct mmc_ctx *ctx,
 764                                    struct task_struct *task)
 765 {
 766         return host->claimer == ctx ||
 767                (!ctx && task && host->claimer->task == task);
 768 }
 769 
 770 static inline void mmc_ctx_set_claimer(struct mmc_host *host,
 771                                        struct mmc_ctx *ctx,
 772                                        struct task_struct *task)
 773 {
 774         if (!host->claimer) {
 775                 if (ctx)
 776                         host->claimer = ctx;
 777                 else
 778                         host->claimer = &host->default_ctx;
 779         }
 780         if (task)
 781                 host->claimer->task = task;
 782 }
 783 
 784 /**
 785  *      __mmc_claim_host - exclusively claim a host
 786  *      @host: mmc host to claim
 787  *      @ctx: context that claims the host or NULL in which case the default
 788  *      context will be used
 789  *      @abort: whether or not the operation should be aborted
 790  *
 791  *      Claim a host for a set of operations.  If @abort is non null and
 792  *      dereference a non-zero value then this will return prematurely with
 793  *      that non-zero value without acquiring the lock.  Returns zero
 794  *      with the lock held otherwise.
 795  */
 796 int __mmc_claim_host(struct mmc_host *host, struct mmc_ctx *ctx,
 797                      atomic_t *abort)
 798 {
 799         struct task_struct *task = ctx ? NULL : current;
 800         DECLARE_WAITQUEUE(wait, current);
 801         unsigned long flags;
 802         int stop;
 803         bool pm = false;
 804 
 805         might_sleep();
 806 
 807         add_wait_queue(&host->wq, &wait);
 808         spin_lock_irqsave(&host->lock, flags);
 809         while (1) {
 810                 set_current_state(TASK_UNINTERRUPTIBLE);
 811                 stop = abort ? atomic_read(abort) : 0;
 812                 if (stop || !host->claimed || mmc_ctx_matches(host, ctx, task))
 813                         break;
 814                 spin_unlock_irqrestore(&host->lock, flags);
 815                 schedule();
 816                 spin_lock_irqsave(&host->lock, flags);
 817         }
 818         set_current_state(TASK_RUNNING);
 819         if (!stop) {
 820                 host->claimed = 1;
 821                 mmc_ctx_set_claimer(host, ctx, task);
 822                 host->claim_cnt += 1;
 823                 if (host->claim_cnt == 1)
 824                         pm = true;
 825         } else
 826                 wake_up(&host->wq);
 827         spin_unlock_irqrestore(&host->lock, flags);
 828         remove_wait_queue(&host->wq, &wait);
 829 
 830         if (pm)
 831                 pm_runtime_get_sync(mmc_dev(host));
 832 
 833         return stop;
 834 }
 835 EXPORT_SYMBOL(__mmc_claim_host);
 836 
 837 /**
 838  *      mmc_release_host - release a host
 839  *      @host: mmc host to release
 840  *
 841  *      Release a MMC host, allowing others to claim the host
 842  *      for their operations.
 843  */
 844 void mmc_release_host(struct mmc_host *host)
 845 {
 846         unsigned long flags;
 847 
 848         WARN_ON(!host->claimed);
 849 
 850         spin_lock_irqsave(&host->lock, flags);
 851         if (--host->claim_cnt) {
 852                 /* Release for nested claim */
 853                 spin_unlock_irqrestore(&host->lock, flags);
 854         } else {
 855                 host->claimed = 0;
 856                 host->claimer->task = NULL;
 857                 host->claimer = NULL;
 858                 spin_unlock_irqrestore(&host->lock, flags);
 859                 wake_up(&host->wq);
 860                 pm_runtime_mark_last_busy(mmc_dev(host));
 861                 if (host->caps & MMC_CAP_SYNC_RUNTIME_PM)
 862                         pm_runtime_put_sync_suspend(mmc_dev(host));
 863                 else
 864                         pm_runtime_put_autosuspend(mmc_dev(host));
 865         }
 866 }
 867 EXPORT_SYMBOL(mmc_release_host);
 868 
 869 /*
 870  * This is a helper function, which fetches a runtime pm reference for the
 871  * card device and also claims the host.
 872  */
 873 void mmc_get_card(struct mmc_card *card, struct mmc_ctx *ctx)
 874 {
 875         pm_runtime_get_sync(&card->dev);
 876         __mmc_claim_host(card->host, ctx, NULL);
 877 }
 878 EXPORT_SYMBOL(mmc_get_card);
 879 
 880 /*
 881  * This is a helper function, which releases the host and drops the runtime
 882  * pm reference for the card device.
 883  */
 884 void mmc_put_card(struct mmc_card *card, struct mmc_ctx *ctx)
 885 {
 886         struct mmc_host *host = card->host;
 887 
 888         WARN_ON(ctx && host->claimer != ctx);
 889 
 890         mmc_release_host(host);
 891         pm_runtime_mark_last_busy(&card->dev);
 892         pm_runtime_put_autosuspend(&card->dev);
 893 }
 894 EXPORT_SYMBOL(mmc_put_card);
 895 
 896 /*
 897  * Internal function that does the actual ios call to the host driver,
 898  * optionally printing some debug output.
 899  */
 900 static inline void mmc_set_ios(struct mmc_host *host)
 901 {
 902         struct mmc_ios *ios = &host->ios;
 903 
 904         pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
 905                 "width %u timing %u\n",
 906                  mmc_hostname(host), ios->clock, ios->bus_mode,
 907                  ios->power_mode, ios->chip_select, ios->vdd,
 908                  1 << ios->bus_width, ios->timing);
 909 
 910         host->ops->set_ios(host, ios);
 911 }
 912 
 913 /*
 914  * Control chip select pin on a host.
 915  */
 916 void mmc_set_chip_select(struct mmc_host *host, int mode)
 917 {
 918         host->ios.chip_select = mode;
 919         mmc_set_ios(host);
 920 }
 921 
 922 /*
 923  * Sets the host clock to the highest possible frequency that
 924  * is below "hz".
 925  */
 926 void mmc_set_clock(struct mmc_host *host, unsigned int hz)
 927 {
 928         WARN_ON(hz && hz < host->f_min);
 929 
 930         if (hz > host->f_max)
 931                 hz = host->f_max;
 932 
 933         host->ios.clock = hz;
 934         mmc_set_ios(host);
 935 }
 936 
 937 int mmc_execute_tuning(struct mmc_card *card)
 938 {
 939         struct mmc_host *host = card->host;
 940         u32 opcode;
 941         int err;
 942 
 943         if (!host->ops->execute_tuning)
 944                 return 0;
 945 
 946         if (host->cqe_on)
 947                 host->cqe_ops->cqe_off(host);
 948 
 949         if (mmc_card_mmc(card))
 950                 opcode = MMC_SEND_TUNING_BLOCK_HS200;
 951         else
 952                 opcode = MMC_SEND_TUNING_BLOCK;
 953 
 954         err = host->ops->execute_tuning(host, opcode);
 955 
 956         if (err)
 957                 pr_err("%s: tuning execution failed: %d\n",
 958                         mmc_hostname(host), err);
 959         else
 960                 mmc_retune_enable(host);
 961 
 962         return err;
 963 }
 964 
 965 /*
 966  * Change the bus mode (open drain/push-pull) of a host.
 967  */
 968 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
 969 {
 970         host->ios.bus_mode = mode;
 971         mmc_set_ios(host);
 972 }
 973 
 974 /*
 975  * Change data bus width of a host.
 976  */
 977 void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
 978 {
 979         host->ios.bus_width = width;
 980         mmc_set_ios(host);
 981 }
 982 
 983 /*
 984  * Set initial state after a power cycle or a hw_reset.
 985  */
 986 void mmc_set_initial_state(struct mmc_host *host)
 987 {
 988         if (host->cqe_on)
 989                 host->cqe_ops->cqe_off(host);
 990 
 991         mmc_retune_disable(host);
 992 
 993         if (mmc_host_is_spi(host))
 994                 host->ios.chip_select = MMC_CS_HIGH;
 995         else
 996                 host->ios.chip_select = MMC_CS_DONTCARE;
 997         host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
 998         host->ios.bus_width = MMC_BUS_WIDTH_1;
 999         host->ios.timing = MMC_TIMING_LEGACY;
1000         host->ios.drv_type = 0;
1001         host->ios.enhanced_strobe = false;
1002 
1003         /*
1004          * Make sure we are in non-enhanced strobe mode before we
1005          * actually enable it in ext_csd.
1006          */
1007         if ((host->caps2 & MMC_CAP2_HS400_ES) &&
1008              host->ops->hs400_enhanced_strobe)
1009                 host->ops->hs400_enhanced_strobe(host, &host->ios);
1010 
1011         mmc_set_ios(host);
1012 }
1013 
1014 /**
1015  * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
1016  * @vdd:        voltage (mV)
1017  * @low_bits:   prefer low bits in boundary cases
1018  *
1019  * This function returns the OCR bit number according to the provided @vdd
1020  * value. If conversion is not possible a negative errno value returned.
1021  *
1022  * Depending on the @low_bits flag the function prefers low or high OCR bits
1023  * on boundary voltages. For example,
1024  * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
1025  * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
1026  *
1027  * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
1028  */
1029 static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
1030 {
1031         const int max_bit = ilog2(MMC_VDD_35_36);
1032         int bit;
1033 
1034         if (vdd < 1650 || vdd > 3600)
1035                 return -EINVAL;
1036 
1037         if (vdd >= 1650 && vdd <= 1950)
1038                 return ilog2(MMC_VDD_165_195);
1039 
1040         if (low_bits)
1041                 vdd -= 1;
1042 
1043         /* Base 2000 mV, step 100 mV, bit's base 8. */
1044         bit = (vdd - 2000) / 100 + 8;
1045         if (bit > max_bit)
1046                 return max_bit;
1047         return bit;
1048 }
1049 
1050 /**
1051  * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
1052  * @vdd_min:    minimum voltage value (mV)
1053  * @vdd_max:    maximum voltage value (mV)
1054  *
1055  * This function returns the OCR mask bits according to the provided @vdd_min
1056  * and @vdd_max values. If conversion is not possible the function returns 0.
1057  *
1058  * Notes wrt boundary cases:
1059  * This function sets the OCR bits for all boundary voltages, for example
1060  * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
1061  * MMC_VDD_34_35 mask.
1062  */
1063 u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
1064 {
1065         u32 mask = 0;
1066 
1067         if (vdd_max < vdd_min)
1068                 return 0;
1069 
1070         /* Prefer high bits for the boundary vdd_max values. */
1071         vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
1072         if (vdd_max < 0)
1073                 return 0;
1074 
1075         /* Prefer low bits for the boundary vdd_min values. */
1076         vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
1077         if (vdd_min < 0)
1078                 return 0;
1079 
1080         /* Fill the mask, from max bit to min bit. */
1081         while (vdd_max >= vdd_min)
1082                 mask |= 1 << vdd_max--;
1083 
1084         return mask;
1085 }
1086 
1087 static int mmc_of_get_func_num(struct device_node *node)
1088 {
1089         u32 reg;
1090         int ret;
1091 
1092         ret = of_property_read_u32(node, "reg", &reg);
1093         if (ret < 0)
1094                 return ret;
1095 
1096         return reg;
1097 }
1098 
1099 struct device_node *mmc_of_find_child_device(struct mmc_host *host,
1100                 unsigned func_num)
1101 {
1102         struct device_node *node;
1103 
1104         if (!host->parent || !host->parent->of_node)
1105                 return NULL;
1106 
1107         for_each_child_of_node(host->parent->of_node, node) {
1108                 if (mmc_of_get_func_num(node) == func_num)
1109                         return node;
1110         }
1111 
1112         return NULL;
1113 }
1114 
1115 /*
1116  * Mask off any voltages we don't support and select
1117  * the lowest voltage
1118  */
1119 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1120 {
1121         int bit;
1122 
1123         /*
1124          * Sanity check the voltages that the card claims to
1125          * support.
1126          */
1127         if (ocr & 0x7F) {
1128                 dev_warn(mmc_dev(host),
1129                 "card claims to support voltages below defined range\n");
1130                 ocr &= ~0x7F;
1131         }
1132 
1133         ocr &= host->ocr_avail;
1134         if (!ocr) {
1135                 dev_warn(mmc_dev(host), "no support for card's volts\n");
1136                 return 0;
1137         }
1138 
1139         if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
1140                 bit = ffs(ocr) - 1;
1141                 ocr &= 3 << bit;
1142                 mmc_power_cycle(host, ocr);
1143         } else {
1144                 bit = fls(ocr) - 1;
1145                 ocr &= 3 << bit;
1146                 if (bit != host->ios.vdd)
1147                         dev_warn(mmc_dev(host), "exceeding card's volts\n");
1148         }
1149 
1150         return ocr;
1151 }
1152 
1153 int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
1154 {
1155         int err = 0;
1156         int old_signal_voltage = host->ios.signal_voltage;
1157 
1158         host->ios.signal_voltage = signal_voltage;
1159         if (host->ops->start_signal_voltage_switch)
1160                 err = host->ops->start_signal_voltage_switch(host, &host->ios);
1161 
1162         if (err)
1163                 host->ios.signal_voltage = old_signal_voltage;
1164 
1165         return err;
1166 
1167 }
1168 
1169 void mmc_set_initial_signal_voltage(struct mmc_host *host)
1170 {
1171         /* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */
1172         if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330))
1173                 dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n");
1174         else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180))
1175                 dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n");
1176         else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120))
1177                 dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n");
1178 }
1179 
1180 int mmc_host_set_uhs_voltage(struct mmc_host *host)
1181 {
1182         u32 clock;
1183 
1184         /*
1185          * During a signal voltage level switch, the clock must be gated
1186          * for 5 ms according to the SD spec
1187          */
1188         clock = host->ios.clock;
1189         host->ios.clock = 0;
1190         mmc_set_ios(host);
1191 
1192         if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180))
1193                 return -EAGAIN;
1194 
1195         /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
1196         mmc_delay(10);
1197         host->ios.clock = clock;
1198         mmc_set_ios(host);
1199 
1200         return 0;
1201 }
1202 
1203 int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
1204 {
1205         struct mmc_command cmd = {};
1206         int err = 0;
1207 
1208         /*
1209          * If we cannot switch voltages, return failure so the caller
1210          * can continue without UHS mode
1211          */
1212         if (!host->ops->start_signal_voltage_switch)
1213                 return -EPERM;
1214         if (!host->ops->card_busy)
1215                 pr_warn("%s: cannot verify signal voltage switch\n",
1216                         mmc_hostname(host));
1217 
1218         cmd.opcode = SD_SWITCH_VOLTAGE;
1219         cmd.arg = 0;
1220         cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1221 
1222         err = mmc_wait_for_cmd(host, &cmd, 0);
1223         if (err)
1224                 return err;
1225 
1226         if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
1227                 return -EIO;
1228 
1229         /*
1230          * The card should drive cmd and dat[0:3] low immediately
1231          * after the response of cmd11, but wait 1 ms to be sure
1232          */
1233         mmc_delay(1);
1234         if (host->ops->card_busy && !host->ops->card_busy(host)) {
1235                 err = -EAGAIN;
1236                 goto power_cycle;
1237         }
1238 
1239         if (mmc_host_set_uhs_voltage(host)) {
1240                 /*
1241                  * Voltages may not have been switched, but we've already
1242                  * sent CMD11, so a power cycle is required anyway
1243                  */
1244                 err = -EAGAIN;
1245                 goto power_cycle;
1246         }
1247 
1248         /* Wait for at least 1 ms according to spec */
1249         mmc_delay(1);
1250 
1251         /*
1252          * Failure to switch is indicated by the card holding
1253          * dat[0:3] low
1254          */
1255         if (host->ops->card_busy && host->ops->card_busy(host))
1256                 err = -EAGAIN;
1257 
1258 power_cycle:
1259         if (err) {
1260                 pr_debug("%s: Signal voltage switch failed, "
1261                         "power cycling card\n", mmc_hostname(host));
1262                 mmc_power_cycle(host, ocr);
1263         }
1264 
1265         return err;
1266 }
1267 
1268 /*
1269  * Select timing parameters for host.
1270  */
1271 void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1272 {
1273         host->ios.timing = timing;
1274         mmc_set_ios(host);
1275 }
1276 
1277 /*
1278  * Select appropriate driver type for host.
1279  */
1280 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1281 {
1282         host->ios.drv_type = drv_type;
1283         mmc_set_ios(host);
1284 }
1285 
1286 int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
1287                               int card_drv_type, int *drv_type)
1288 {
1289         struct mmc_host *host = card->host;
1290         int host_drv_type = SD_DRIVER_TYPE_B;
1291 
1292         *drv_type = 0;
1293 
1294         if (!host->ops->select_drive_strength)
1295                 return 0;
1296 
1297         /* Use SD definition of driver strength for hosts */
1298         if (host->caps & MMC_CAP_DRIVER_TYPE_A)
1299                 host_drv_type |= SD_DRIVER_TYPE_A;
1300 
1301         if (host->caps & MMC_CAP_DRIVER_TYPE_C)
1302                 host_drv_type |= SD_DRIVER_TYPE_C;
1303 
1304         if (host->caps & MMC_CAP_DRIVER_TYPE_D)
1305                 host_drv_type |= SD_DRIVER_TYPE_D;
1306 
1307         /*
1308          * The drive strength that the hardware can support
1309          * depends on the board design.  Pass the appropriate
1310          * information and let the hardware specific code
1311          * return what is possible given the options
1312          */
1313         return host->ops->select_drive_strength(card, max_dtr,
1314                                                 host_drv_type,
1315                                                 card_drv_type,
1316                                                 drv_type);
1317 }
1318 
1319 /*
1320  * Apply power to the MMC stack.  This is a two-stage process.
1321  * First, we enable power to the card without the clock running.
1322  * We then wait a bit for the power to stabilise.  Finally,
1323  * enable the bus drivers and clock to the card.
1324  *
1325  * We must _NOT_ enable the clock prior to power stablising.
1326  *
1327  * If a host does all the power sequencing itself, ignore the
1328  * initial MMC_POWER_UP stage.
1329  */
1330 void mmc_power_up(struct mmc_host *host, u32 ocr)
1331 {
1332         if (host->ios.power_mode == MMC_POWER_ON)
1333                 return;
1334 
1335         mmc_pwrseq_pre_power_on(host);
1336 
1337         host->ios.vdd = fls(ocr) - 1;
1338         host->ios.power_mode = MMC_POWER_UP;
1339         /* Set initial state and call mmc_set_ios */
1340         mmc_set_initial_state(host);
1341 
1342         mmc_set_initial_signal_voltage(host);
1343 
1344         /*
1345          * This delay should be sufficient to allow the power supply
1346          * to reach the minimum voltage.
1347          */
1348         mmc_delay(host->ios.power_delay_ms);
1349 
1350         mmc_pwrseq_post_power_on(host);
1351 
1352         host->ios.clock = host->f_init;
1353 
1354         host->ios.power_mode = MMC_POWER_ON;
1355         mmc_set_ios(host);
1356 
1357         /*
1358          * This delay must be at least 74 clock sizes, or 1 ms, or the
1359          * time required to reach a stable voltage.
1360          */
1361         mmc_delay(host->ios.power_delay_ms);
1362 }
1363 
1364 void mmc_power_off(struct mmc_host *host)
1365 {
1366         if (host->ios.power_mode == MMC_POWER_OFF)
1367                 return;
1368 
1369         mmc_pwrseq_power_off(host);
1370 
1371         host->ios.clock = 0;
1372         host->ios.vdd = 0;
1373 
1374         host->ios.power_mode = MMC_POWER_OFF;
1375         /* Set initial state and call mmc_set_ios */
1376         mmc_set_initial_state(host);
1377 
1378         /*
1379          * Some configurations, such as the 802.11 SDIO card in the OLPC
1380          * XO-1.5, require a short delay after poweroff before the card
1381          * can be successfully turned on again.
1382          */
1383         mmc_delay(1);
1384 }
1385 
1386 void mmc_power_cycle(struct mmc_host *host, u32 ocr)
1387 {
1388         mmc_power_off(host);
1389         /* Wait at least 1 ms according to SD spec */
1390         mmc_delay(1);
1391         mmc_power_up(host, ocr);
1392 }
1393 
1394 /*
1395  * Cleanup when the last reference to the bus operator is dropped.
1396  */
1397 static void __mmc_release_bus(struct mmc_host *host)
1398 {
1399         WARN_ON(!host->bus_dead);
1400 
1401         host->bus_ops = NULL;
1402 }
1403 
1404 /*
1405  * Increase reference count of bus operator
1406  */
1407 static inline void mmc_bus_get(struct mmc_host *host)
1408 {
1409         unsigned long flags;
1410 
1411         spin_lock_irqsave(&host->lock, flags);
1412         host->bus_refs++;
1413         spin_unlock_irqrestore(&host->lock, flags);
1414 }
1415 
1416 /*
1417  * Decrease reference count of bus operator and free it if
1418  * it is the last reference.
1419  */
1420 static inline void mmc_bus_put(struct mmc_host *host)
1421 {
1422         unsigned long flags;
1423 
1424         spin_lock_irqsave(&host->lock, flags);
1425         host->bus_refs--;
1426         if ((host->bus_refs == 0) && host->bus_ops)
1427                 __mmc_release_bus(host);
1428         spin_unlock_irqrestore(&host->lock, flags);
1429 }
1430 
1431 /*
1432  * Assign a mmc bus handler to a host. Only one bus handler may control a
1433  * host at any given time.
1434  */
1435 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1436 {
1437         unsigned long flags;
1438 
1439         WARN_ON(!host->claimed);
1440 
1441         spin_lock_irqsave(&host->lock, flags);
1442 
1443         WARN_ON(host->bus_ops);
1444         WARN_ON(host->bus_refs);
1445 
1446         host->bus_ops = ops;
1447         host->bus_refs = 1;
1448         host->bus_dead = 0;
1449 
1450         spin_unlock_irqrestore(&host->lock, flags);
1451 }
1452 
1453 /*
1454  * Remove the current bus handler from a host.
1455  */
1456 void mmc_detach_bus(struct mmc_host *host)
1457 {
1458         unsigned long flags;
1459 
1460         WARN_ON(!host->claimed);
1461         WARN_ON(!host->bus_ops);
1462 
1463         spin_lock_irqsave(&host->lock, flags);
1464 
1465         host->bus_dead = 1;
1466 
1467         spin_unlock_irqrestore(&host->lock, flags);
1468 
1469         mmc_bus_put(host);
1470 }
1471 
1472 void _mmc_detect_change(struct mmc_host *host, unsigned long delay, bool cd_irq)
1473 {
1474         /*
1475          * If the device is configured as wakeup, we prevent a new sleep for
1476          * 5 s to give provision for user space to consume the event.
1477          */
1478         if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) &&
1479                 device_can_wakeup(mmc_dev(host)))
1480                 pm_wakeup_event(mmc_dev(host), 5000);
1481 
1482         host->detect_change = 1;
1483         mmc_schedule_delayed_work(&host->detect, delay);
1484 }
1485 
1486 /**
1487  *      mmc_detect_change - process change of state on a MMC socket
1488  *      @host: host which changed state.
1489  *      @delay: optional delay to wait before detection (jiffies)
1490  *
1491  *      MMC drivers should call this when they detect a card has been
1492  *      inserted or removed. The MMC layer will confirm that any
1493  *      present card is still functional, and initialize any newly
1494  *      inserted.
1495  */
1496 void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1497 {
1498         _mmc_detect_change(host, delay, true);
1499 }
1500 EXPORT_SYMBOL(mmc_detect_change);
1501 
1502 void mmc_init_erase(struct mmc_card *card)
1503 {
1504         unsigned int sz;
1505 
1506         if (is_power_of_2(card->erase_size))
1507                 card->erase_shift = ffs(card->erase_size) - 1;
1508         else
1509                 card->erase_shift = 0;
1510 
1511         /*
1512          * It is possible to erase an arbitrarily large area of an SD or MMC
1513          * card.  That is not desirable because it can take a long time
1514          * (minutes) potentially delaying more important I/O, and also the
1515          * timeout calculations become increasingly hugely over-estimated.
1516          * Consequently, 'pref_erase' is defined as a guide to limit erases
1517          * to that size and alignment.
1518          *
1519          * For SD cards that define Allocation Unit size, limit erases to one
1520          * Allocation Unit at a time.
1521          * For MMC, have a stab at ai good value and for modern cards it will
1522          * end up being 4MiB. Note that if the value is too small, it can end
1523          * up taking longer to erase. Also note, erase_size is already set to
1524          * High Capacity Erase Size if available when this function is called.
1525          */
1526         if (mmc_card_sd(card) && card->ssr.au) {
1527                 card->pref_erase = card->ssr.au;
1528                 card->erase_shift = ffs(card->ssr.au) - 1;
1529         } else if (card->erase_size) {
1530                 sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
1531                 if (sz < 128)
1532                         card->pref_erase = 512 * 1024 / 512;
1533                 else if (sz < 512)
1534                         card->pref_erase = 1024 * 1024 / 512;
1535                 else if (sz < 1024)
1536                         card->pref_erase = 2 * 1024 * 1024 / 512;
1537                 else
1538                         card->pref_erase = 4 * 1024 * 1024 / 512;
1539                 if (card->pref_erase < card->erase_size)
1540                         card->pref_erase = card->erase_size;
1541                 else {
1542                         sz = card->pref_erase % card->erase_size;
1543                         if (sz)
1544                                 card->pref_erase += card->erase_size - sz;
1545                 }
1546         } else
1547                 card->pref_erase = 0;
1548 }
1549 
1550 static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
1551                                           unsigned int arg, unsigned int qty)
1552 {
1553         unsigned int erase_timeout;
1554 
1555         if (arg == MMC_DISCARD_ARG ||
1556             (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
1557                 erase_timeout = card->ext_csd.trim_timeout;
1558         } else if (card->ext_csd.erase_group_def & 1) {
1559                 /* High Capacity Erase Group Size uses HC timeouts */
1560                 if (arg == MMC_TRIM_ARG)
1561                         erase_timeout = card->ext_csd.trim_timeout;
1562                 else
1563                         erase_timeout = card->ext_csd.hc_erase_timeout;
1564         } else {
1565                 /* CSD Erase Group Size uses write timeout */
1566                 unsigned int mult = (10 << card->csd.r2w_factor);
1567                 unsigned int timeout_clks = card->csd.taac_clks * mult;
1568                 unsigned int timeout_us;
1569 
1570                 /* Avoid overflow: e.g. taac_ns=80000000 mult=1280 */
1571                 if (card->csd.taac_ns < 1000000)
1572                         timeout_us = (card->csd.taac_ns * mult) / 1000;
1573                 else
1574                         timeout_us = (card->csd.taac_ns / 1000) * mult;
1575 
1576                 /*
1577                  * ios.clock is only a target.  The real clock rate might be
1578                  * less but not that much less, so fudge it by multiplying by 2.
1579                  */
1580                 timeout_clks <<= 1;
1581                 timeout_us += (timeout_clks * 1000) /
1582                               (card->host->ios.clock / 1000);
1583 
1584                 erase_timeout = timeout_us / 1000;
1585 
1586                 /*
1587                  * Theoretically, the calculation could underflow so round up
1588                  * to 1ms in that case.
1589                  */
1590                 if (!erase_timeout)
1591                         erase_timeout = 1;
1592         }
1593 
1594         /* Multiplier for secure operations */
1595         if (arg & MMC_SECURE_ARGS) {
1596                 if (arg == MMC_SECURE_ERASE_ARG)
1597                         erase_timeout *= card->ext_csd.sec_erase_mult;
1598                 else
1599                         erase_timeout *= card->ext_csd.sec_trim_mult;
1600         }
1601 
1602         erase_timeout *= qty;
1603 
1604         /*
1605          * Ensure at least a 1 second timeout for SPI as per
1606          * 'mmc_set_data_timeout()'
1607          */
1608         if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
1609                 erase_timeout = 1000;
1610 
1611         return erase_timeout;
1612 }
1613 
1614 static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
1615                                          unsigned int arg,
1616                                          unsigned int qty)
1617 {
1618         unsigned int erase_timeout;
1619 
1620         /* for DISCARD none of the below calculation applies.
1621          * the busy timeout is 250msec per discard command.
1622          */
1623         if (arg == SD_DISCARD_ARG)
1624                 return SD_DISCARD_TIMEOUT_MS;
1625 
1626         if (card->ssr.erase_timeout) {
1627                 /* Erase timeout specified in SD Status Register (SSR) */
1628                 erase_timeout = card->ssr.erase_timeout * qty +
1629                                 card->ssr.erase_offset;
1630         } else {
1631                 /*
1632                  * Erase timeout not specified in SD Status Register (SSR) so
1633                  * use 250ms per write block.
1634                  */
1635                 erase_timeout = 250 * qty;
1636         }
1637 
1638         /* Must not be less than 1 second */
1639         if (erase_timeout < 1000)
1640                 erase_timeout = 1000;
1641 
1642         return erase_timeout;
1643 }
1644 
1645 static unsigned int mmc_erase_timeout(struct mmc_card *card,
1646                                       unsigned int arg,
1647                                       unsigned int qty)
1648 {
1649         if (mmc_card_sd(card))
1650                 return mmc_sd_erase_timeout(card, arg, qty);
1651         else
1652                 return mmc_mmc_erase_timeout(card, arg, qty);
1653 }
1654 
1655 static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1656                         unsigned int to, unsigned int arg)
1657 {
1658         struct mmc_command cmd = {};
1659         unsigned int qty = 0, busy_timeout = 0;
1660         bool use_r1b_resp = false;
1661         unsigned long timeout;
1662         int loop_udelay=64, udelay_max=32768;
1663         int err;
1664 
1665         mmc_retune_hold(card->host);
1666 
1667         /*
1668          * qty is used to calculate the erase timeout which depends on how many
1669          * erase groups (or allocation units in SD terminology) are affected.
1670          * We count erasing part of an erase group as one erase group.
1671          * For SD, the allocation units are always a power of 2.  For MMC, the
1672          * erase group size is almost certainly also power of 2, but it does not
1673          * seem to insist on that in the JEDEC standard, so we fall back to
1674          * division in that case.  SD may not specify an allocation unit size,
1675          * in which case the timeout is based on the number of write blocks.
1676          *
1677          * Note that the timeout for secure trim 2 will only be correct if the
1678          * number of erase groups specified is the same as the total of all
1679          * preceding secure trim 1 commands.  Since the power may have been
1680          * lost since the secure trim 1 commands occurred, it is generally
1681          * impossible to calculate the secure trim 2 timeout correctly.
1682          */
1683         if (card->erase_shift)
1684                 qty += ((to >> card->erase_shift) -
1685                         (from >> card->erase_shift)) + 1;
1686         else if (mmc_card_sd(card))
1687                 qty += to - from + 1;
1688         else
1689                 qty += ((to / card->erase_size) -
1690                         (from / card->erase_size)) + 1;
1691 
1692         if (!mmc_card_blockaddr(card)) {
1693                 from <<= 9;
1694                 to <<= 9;
1695         }
1696 
1697         if (mmc_card_sd(card))
1698                 cmd.opcode = SD_ERASE_WR_BLK_START;
1699         else
1700                 cmd.opcode = MMC_ERASE_GROUP_START;
1701         cmd.arg = from;
1702         cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1703         err = mmc_wait_for_cmd(card->host, &cmd, 0);
1704         if (err) {
1705                 pr_err("mmc_erase: group start error %d, "
1706                        "status %#x\n", err, cmd.resp[0]);
1707                 err = -EIO;
1708                 goto out;
1709         }
1710 
1711         memset(&cmd, 0, sizeof(struct mmc_command));
1712         if (mmc_card_sd(card))
1713                 cmd.opcode = SD_ERASE_WR_BLK_END;
1714         else
1715                 cmd.opcode = MMC_ERASE_GROUP_END;
1716         cmd.arg = to;
1717         cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1718         err = mmc_wait_for_cmd(card->host, &cmd, 0);
1719         if (err) {
1720                 pr_err("mmc_erase: group end error %d, status %#x\n",
1721                        err, cmd.resp[0]);
1722                 err = -EIO;
1723                 goto out;
1724         }
1725 
1726         memset(&cmd, 0, sizeof(struct mmc_command));
1727         cmd.opcode = MMC_ERASE;
1728         cmd.arg = arg;
1729         busy_timeout = mmc_erase_timeout(card, arg, qty);
1730         /*
1731          * If the host controller supports busy signalling and the timeout for
1732          * the erase operation does not exceed the max_busy_timeout, we should
1733          * use R1B response. Or we need to prevent the host from doing hw busy
1734          * detection, which is done by converting to a R1 response instead.
1735          * Note, some hosts requires R1B, which also means they are on their own
1736          * when it comes to deal with the busy timeout.
1737          */
1738         if (!(card->host->caps & MMC_CAP_NEED_RSP_BUSY) &&
1739             card->host->max_busy_timeout &&
1740             busy_timeout > card->host->max_busy_timeout) {
1741                 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1742         } else {
1743                 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1744                 cmd.busy_timeout = busy_timeout;
1745                 use_r1b_resp = true;
1746         }
1747 
1748         err = mmc_wait_for_cmd(card->host, &cmd, 0);
1749         if (err) {
1750                 pr_err("mmc_erase: erase error %d, status %#x\n",
1751                        err, cmd.resp[0]);
1752                 err = -EIO;
1753                 goto out;
1754         }
1755 
1756         if (mmc_host_is_spi(card->host))
1757                 goto out;
1758 
1759         /*
1760          * In case of when R1B + MMC_CAP_WAIT_WHILE_BUSY is used, the polling
1761          * shall be avoided.
1762          */
1763         if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
1764                 goto out;
1765 
1766         timeout = jiffies + msecs_to_jiffies(busy_timeout);
1767         do {
1768                 memset(&cmd, 0, sizeof(struct mmc_command));
1769                 cmd.opcode = MMC_SEND_STATUS;
1770                 cmd.arg = card->rca << 16;
1771                 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1772                 /* Do not retry else we can't see errors */
1773                 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1774                 if (err || R1_STATUS(cmd.resp[0])) {
1775                         pr_err("error %d requesting status %#x\n",
1776                                 err, cmd.resp[0]);
1777                         err = -EIO;
1778                         goto out;
1779                 }
1780 
1781                 /* Timeout if the device never becomes ready for data and
1782                  * never leaves the program state.
1783                  */
1784                 if (time_after(jiffies, timeout)) {
1785                         pr_err("%s: Card stuck in programming state! %s\n",
1786                                 mmc_hostname(card->host), __func__);
1787                         err =  -EIO;
1788                         goto out;
1789                 }
1790                 if ((cmd.resp[0] & R1_READY_FOR_DATA) &&
1791                     R1_CURRENT_STATE(cmd.resp[0]) != R1_STATE_PRG)
1792                         break;
1793 
1794                 usleep_range(loop_udelay, loop_udelay*2);
1795                 if (loop_udelay < udelay_max)
1796                         loop_udelay *= 2;
1797         } while (1);
1798 
1799 out:
1800         mmc_retune_release(card->host);
1801         return err;
1802 }
1803 
1804 static unsigned int mmc_align_erase_size(struct mmc_card *card,
1805                                          unsigned int *from,
1806                                          unsigned int *to,
1807                                          unsigned int nr)
1808 {
1809         unsigned int from_new = *from, nr_new = nr, rem;
1810 
1811         /*
1812          * When the 'card->erase_size' is power of 2, we can use round_up/down()
1813          * to align the erase size efficiently.
1814          */
1815         if (is_power_of_2(card->erase_size)) {
1816                 unsigned int temp = from_new;
1817 
1818                 from_new = round_up(temp, card->erase_size);
1819                 rem = from_new - temp;
1820 
1821                 if (nr_new > rem)
1822                         nr_new -= rem;
1823                 else
1824                         return 0;
1825 
1826                 nr_new = round_down(nr_new, card->erase_size);
1827         } else {
1828                 rem = from_new % card->erase_size;
1829                 if (rem) {
1830                         rem = card->erase_size - rem;
1831                         from_new += rem;
1832                         if (nr_new > rem)
1833                                 nr_new -= rem;
1834                         else
1835                                 return 0;
1836                 }
1837 
1838                 rem = nr_new % card->erase_size;
1839                 if (rem)
1840                         nr_new -= rem;
1841         }
1842 
1843         if (nr_new == 0)
1844                 return 0;
1845 
1846         *to = from_new + nr_new;
1847         *from = from_new;
1848 
1849         return nr_new;
1850 }
1851 
1852 /**
1853  * mmc_erase - erase sectors.
1854  * @card: card to erase
1855  * @from: first sector to erase
1856  * @nr: number of sectors to erase
1857  * @arg: erase command argument
1858  *
1859  * Caller must claim host before calling this function.
1860  */
1861 int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
1862               unsigned int arg)
1863 {
1864         unsigned int rem, to = from + nr;
1865         int err;
1866 
1867         if (!(card->host->caps & MMC_CAP_ERASE) ||
1868             !(card->csd.cmdclass & CCC_ERASE))
1869                 return -EOPNOTSUPP;
1870 
1871         if (!card->erase_size)
1872                 return -EOPNOTSUPP;
1873 
1874         if (mmc_card_sd(card) && arg != SD_ERASE_ARG && arg != SD_DISCARD_ARG)
1875                 return -EOPNOTSUPP;
1876 
1877         if (mmc_card_mmc(card) && (arg & MMC_SECURE_ARGS) &&
1878             !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
1879                 return -EOPNOTSUPP;
1880 
1881         if (mmc_card_mmc(card) && (arg & MMC_TRIM_ARGS) &&
1882             !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
1883                 return -EOPNOTSUPP;
1884 
1885         if (arg == MMC_SECURE_ERASE_ARG) {
1886                 if (from % card->erase_size || nr % card->erase_size)
1887                         return -EINVAL;
1888         }
1889 
1890         if (arg == MMC_ERASE_ARG)
1891                 nr = mmc_align_erase_size(card, &from, &to, nr);
1892 
1893         if (nr == 0)
1894                 return 0;
1895 
1896         if (to <= from)
1897                 return -EINVAL;
1898 
1899         /* 'from' and 'to' are inclusive */
1900         to -= 1;
1901 
1902         /*
1903          * Special case where only one erase-group fits in the timeout budget:
1904          * If the region crosses an erase-group boundary on this particular
1905          * case, we will be trimming more than one erase-group which, does not
1906          * fit in the timeout budget of the controller, so we need to split it
1907          * and call mmc_do_erase() twice if necessary. This special case is
1908          * identified by the card->eg_boundary flag.
1909          */
1910         rem = card->erase_size - (from % card->erase_size);
1911         if ((arg & MMC_TRIM_ARGS) && (card->eg_boundary) && (nr > rem)) {
1912                 err = mmc_do_erase(card, from, from + rem - 1, arg);
1913                 from += rem;
1914                 if ((err) || (to <= from))
1915                         return err;
1916         }
1917 
1918         return mmc_do_erase(card, from, to, arg);
1919 }
1920 EXPORT_SYMBOL(mmc_erase);
1921 
1922 int mmc_can_erase(struct mmc_card *card)
1923 {
1924         if ((card->host->caps & MMC_CAP_ERASE) &&
1925             (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
1926                 return 1;
1927         return 0;
1928 }
1929 EXPORT_SYMBOL(mmc_can_erase);
1930 
1931 int mmc_can_trim(struct mmc_card *card)
1932 {
1933         if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) &&
1934             (!(card->quirks & MMC_QUIRK_TRIM_BROKEN)))
1935                 return 1;
1936         return 0;
1937 }
1938 EXPORT_SYMBOL(mmc_can_trim);
1939 
1940 int mmc_can_discard(struct mmc_card *card)
1941 {
1942         /*
1943          * As there's no way to detect the discard support bit at v4.5
1944          * use the s/w feature support filed.
1945          */
1946         if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
1947                 return 1;
1948         return 0;
1949 }
1950 EXPORT_SYMBOL(mmc_can_discard);
1951 
1952 int mmc_can_sanitize(struct mmc_card *card)
1953 {
1954         if (!mmc_can_trim(card) && !mmc_can_erase(card))
1955                 return 0;
1956         if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
1957                 return 1;
1958         return 0;
1959 }
1960 EXPORT_SYMBOL(mmc_can_sanitize);
1961 
1962 int mmc_can_secure_erase_trim(struct mmc_card *card)
1963 {
1964         if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
1965             !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
1966                 return 1;
1967         return 0;
1968 }
1969 EXPORT_SYMBOL(mmc_can_secure_erase_trim);
1970 
1971 int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
1972                             unsigned int nr)
1973 {
1974         if (!card->erase_size)
1975                 return 0;
1976         if (from % card->erase_size || nr % card->erase_size)
1977                 return 0;
1978         return 1;
1979 }
1980 EXPORT_SYMBOL(mmc_erase_group_aligned);
1981 
1982 static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
1983                                             unsigned int arg)
1984 {
1985         struct mmc_host *host = card->host;
1986         unsigned int max_discard, x, y, qty = 0, max_qty, min_qty, timeout;
1987         unsigned int last_timeout = 0;
1988         unsigned int max_busy_timeout = host->max_busy_timeout ?
1989                         host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS;
1990 
1991         if (card->erase_shift) {
1992                 max_qty = UINT_MAX >> card->erase_shift;
1993                 min_qty = card->pref_erase >> card->erase_shift;
1994         } else if (mmc_card_sd(card)) {
1995                 max_qty = UINT_MAX;
1996                 min_qty = card->pref_erase;
1997         } else {
1998                 max_qty = UINT_MAX / card->erase_size;
1999                 min_qty = card->pref_erase / card->erase_size;
2000         }
2001 
2002         /*
2003          * We should not only use 'host->max_busy_timeout' as the limitation
2004          * when deciding the max discard sectors. We should set a balance value
2005          * to improve the erase speed, and it can not get too long timeout at
2006          * the same time.
2007          *
2008          * Here we set 'card->pref_erase' as the minimal discard sectors no
2009          * matter what size of 'host->max_busy_timeout', but if the
2010          * 'host->max_busy_timeout' is large enough for more discard sectors,
2011          * then we can continue to increase the max discard sectors until we
2012          * get a balance value. In cases when the 'host->max_busy_timeout'
2013          * isn't specified, use the default max erase timeout.
2014          */
2015         do {
2016                 y = 0;
2017                 for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
2018                         timeout = mmc_erase_timeout(card, arg, qty + x);
2019 
2020                         if (qty + x > min_qty && timeout > max_busy_timeout)
2021                                 break;
2022 
2023                         if (timeout < last_timeout)
2024                                 break;
2025                         last_timeout = timeout;
2026                         y = x;
2027                 }
2028                 qty += y;
2029         } while (y);
2030 
2031         if (!qty)
2032                 return 0;
2033 
2034         /*
2035          * When specifying a sector range to trim, chances are we might cross
2036          * an erase-group boundary even if the amount of sectors is less than
2037          * one erase-group.
2038          * If we can only fit one erase-group in the controller timeout budget,
2039          * we have to care that erase-group boundaries are not crossed by a
2040          * single trim operation. We flag that special case with "eg_boundary".
2041          * In all other cases we can just decrement qty and pretend that we
2042          * always touch (qty + 1) erase-groups as a simple optimization.
2043          */
2044         if (qty == 1)
2045                 card->eg_boundary = 1;
2046         else
2047                 qty--;
2048 
2049         /* Convert qty to sectors */
2050         if (card->erase_shift)
2051                 max_discard = qty << card->erase_shift;
2052         else if (mmc_card_sd(card))
2053                 max_discard = qty + 1;
2054         else
2055                 max_discard = qty * card->erase_size;
2056 
2057         return max_discard;
2058 }
2059 
2060 unsigned int mmc_calc_max_discard(struct mmc_card *card)
2061 {
2062         struct mmc_host *host = card->host;
2063         unsigned int max_discard, max_trim;
2064 
2065         /*
2066          * Without erase_group_def set, MMC erase timeout depends on clock
2067          * frequence which can change.  In that case, the best choice is
2068          * just the preferred erase size.
2069          */
2070         if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
2071                 return card->pref_erase;
2072 
2073         max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
2074         if (mmc_can_trim(card)) {
2075                 max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
2076                 if (max_trim < max_discard || max_discard == 0)
2077                         max_discard = max_trim;
2078         } else if (max_discard < card->erase_size) {
2079                 max_discard = 0;
2080         }
2081         pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
2082                 mmc_hostname(host), max_discard, host->max_busy_timeout ?
2083                 host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS);
2084         return max_discard;
2085 }
2086 EXPORT_SYMBOL(mmc_calc_max_discard);
2087 
2088 bool mmc_card_is_blockaddr(struct mmc_card *card)
2089 {
2090         return card ? mmc_card_blockaddr(card) : false;
2091 }
2092 EXPORT_SYMBOL(mmc_card_is_blockaddr);
2093 
2094 int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
2095 {
2096         struct mmc_command cmd = {};
2097 
2098         if (mmc_card_blockaddr(card) || mmc_card_ddr52(card) ||
2099             mmc_card_hs400(card) || mmc_card_hs400es(card))
2100                 return 0;
2101 
2102         cmd.opcode = MMC_SET_BLOCKLEN;
2103         cmd.arg = blocklen;
2104         cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2105         return mmc_wait_for_cmd(card->host, &cmd, 5);
2106 }
2107 EXPORT_SYMBOL(mmc_set_blocklen);
2108 
2109 static void mmc_hw_reset_for_init(struct mmc_host *host)
2110 {
2111         mmc_pwrseq_reset(host);
2112 
2113         if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
2114                 return;
2115         host->ops->hw_reset(host);
2116 }
2117 
2118 int mmc_hw_reset(struct mmc_host *host)
2119 {
2120         int ret;
2121 
2122         if (!host->card)
2123                 return -EINVAL;
2124 
2125         mmc_bus_get(host);
2126         if (!host->bus_ops || host->bus_dead || !host->bus_ops->hw_reset) {
2127                 mmc_bus_put(host);
2128                 return -EOPNOTSUPP;
2129         }
2130 
2131         ret = host->bus_ops->hw_reset(host);
2132         mmc_bus_put(host);
2133 
2134         if (ret < 0)
2135                 pr_warn("%s: tried to HW reset card, got error %d\n",
2136                         mmc_hostname(host), ret);
2137 
2138         return ret;
2139 }
2140 EXPORT_SYMBOL(mmc_hw_reset);
2141 
2142 int mmc_sw_reset(struct mmc_host *host)
2143 {
2144         int ret;
2145 
2146         if (!host->card)
2147                 return -EINVAL;
2148 
2149         mmc_bus_get(host);
2150         if (!host->bus_ops || host->bus_dead || !host->bus_ops->sw_reset) {
2151                 mmc_bus_put(host);
2152                 return -EOPNOTSUPP;
2153         }
2154 
2155         ret = host->bus_ops->sw_reset(host);
2156         mmc_bus_put(host);
2157 
2158         if (ret)
2159                 pr_warn("%s: tried to SW reset card, got error %d\n",
2160                         mmc_hostname(host), ret);
2161 
2162         return ret;
2163 }
2164 EXPORT_SYMBOL(mmc_sw_reset);
2165 
2166 static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
2167 {
2168         host->f_init = freq;
2169 
2170         pr_debug("%s: %s: trying to init card at %u Hz\n",
2171                 mmc_hostname(host), __func__, host->f_init);
2172 
2173         mmc_power_up(host, host->ocr_avail);
2174 
2175         /*
2176          * Some eMMCs (with VCCQ always on) may not be reset after power up, so
2177          * do a hardware reset if possible.
2178          */
2179         mmc_hw_reset_for_init(host);
2180 
2181         /*
2182          * sdio_reset sends CMD52 to reset card.  Since we do not know
2183          * if the card is being re-initialized, just send it.  CMD52
2184          * should be ignored by SD/eMMC cards.
2185          * Skip it if we already know that we do not support SDIO commands
2186          */
2187         if (!(host->caps2 & MMC_CAP2_NO_SDIO))
2188                 sdio_reset(host);
2189 
2190         mmc_go_idle(host);
2191 
2192         if (!(host->caps2 & MMC_CAP2_NO_SD))
2193                 mmc_send_if_cond(host, host->ocr_avail);
2194 
2195         /* Order's important: probe SDIO, then SD, then MMC */
2196         if (!(host->caps2 & MMC_CAP2_NO_SDIO))
2197                 if (!mmc_attach_sdio(host))
2198                         return 0;
2199 
2200         if (!(host->caps2 & MMC_CAP2_NO_SD))
2201                 if (!mmc_attach_sd(host))
2202                         return 0;
2203 
2204         if (!(host->caps2 & MMC_CAP2_NO_MMC))
2205                 if (!mmc_attach_mmc(host))
2206                         return 0;
2207 
2208         mmc_power_off(host);
2209         return -EIO;
2210 }
2211 
2212 int _mmc_detect_card_removed(struct mmc_host *host)
2213 {
2214         int ret;
2215 
2216         if (!host->card || mmc_card_removed(host->card))
2217                 return 1;
2218 
2219         ret = host->bus_ops->alive(host);
2220 
2221         /*
2222          * Card detect status and alive check may be out of sync if card is
2223          * removed slowly, when card detect switch changes while card/slot
2224          * pads are still contacted in hardware (refer to "SD Card Mechanical
2225          * Addendum, Appendix C: Card Detection Switch"). So reschedule a
2226          * detect work 200ms later for this case.
2227          */
2228         if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
2229                 mmc_detect_change(host, msecs_to_jiffies(200));
2230                 pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
2231         }
2232 
2233         if (ret) {
2234                 mmc_card_set_removed(host->card);
2235                 pr_debug("%s: card remove detected\n", mmc_hostname(host));
2236         }
2237 
2238         return ret;
2239 }
2240 
2241 int mmc_detect_card_removed(struct mmc_host *host)
2242 {
2243         struct mmc_card *card = host->card;
2244         int ret;
2245 
2246         WARN_ON(!host->claimed);
2247 
2248         if (!card)
2249                 return 1;
2250 
2251         if (!mmc_card_is_removable(host))
2252                 return 0;
2253 
2254         ret = mmc_card_removed(card);
2255         /*
2256          * The card will be considered unchanged unless we have been asked to
2257          * detect a change or host requires polling to provide card detection.
2258          */
2259         if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
2260                 return ret;
2261 
2262         host->detect_change = 0;
2263         if (!ret) {
2264                 ret = _mmc_detect_card_removed(host);
2265                 if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) {
2266                         /*
2267                          * Schedule a detect work as soon as possible to let a
2268                          * rescan handle the card removal.
2269                          */
2270                         cancel_delayed_work(&host->detect);
2271                         _mmc_detect_change(host, 0, false);
2272                 }
2273         }
2274 
2275         return ret;
2276 }
2277 EXPORT_SYMBOL(mmc_detect_card_removed);
2278 
2279 void mmc_rescan(struct work_struct *work)
2280 {
2281         struct mmc_host *host =
2282                 container_of(work, struct mmc_host, detect.work);
2283         int i;
2284 
2285         if (host->rescan_disable)
2286                 return;
2287 
2288         /* If there is a non-removable card registered, only scan once */
2289         if (!mmc_card_is_removable(host) && host->rescan_entered)
2290                 return;
2291         host->rescan_entered = 1;
2292 
2293         if (host->trigger_card_event && host->ops->card_event) {
2294                 mmc_claim_host(host);
2295                 host->ops->card_event(host);
2296                 mmc_release_host(host);
2297                 host->trigger_card_event = false;
2298         }
2299 
2300         mmc_bus_get(host);
2301 
2302         /* Verify a registered card to be functional, else remove it. */
2303         if (host->bus_ops && !host->bus_dead)
2304                 host->bus_ops->detect(host);
2305 
2306         host->detect_change = 0;
2307 
2308         /*
2309          * Let mmc_bus_put() free the bus/bus_ops if we've found that
2310          * the card is no longer present.
2311          */
2312         mmc_bus_put(host);
2313         mmc_bus_get(host);
2314 
2315         /* if there still is a card present, stop here */
2316         if (host->bus_ops != NULL) {
2317                 mmc_bus_put(host);
2318                 goto out;
2319         }
2320 
2321         /*
2322          * Only we can add a new handler, so it's safe to
2323          * release the lock here.
2324          */
2325         mmc_bus_put(host);
2326 
2327         mmc_claim_host(host);
2328         if (mmc_card_is_removable(host) && host->ops->get_cd &&
2329                         host->ops->get_cd(host) == 0) {
2330                 mmc_power_off(host);
2331                 mmc_release_host(host);
2332                 goto out;
2333         }
2334 
2335         for (i = 0; i < ARRAY_SIZE(freqs); i++) {
2336                 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
2337                         break;
2338                 if (freqs[i] <= host->f_min)
2339                         break;
2340         }
2341         mmc_release_host(host);
2342 
2343  out:
2344         if (host->caps & MMC_CAP_NEEDS_POLL)
2345                 mmc_schedule_delayed_work(&host->detect, HZ);
2346 }
2347 
2348 void mmc_start_host(struct mmc_host *host)
2349 {
2350         host->f_init = max(freqs[0], host->f_min);
2351         host->rescan_disable = 0;
2352         host->ios.power_mode = MMC_POWER_UNDEFINED;
2353 
2354         if (!(host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)) {
2355                 mmc_claim_host(host);
2356                 mmc_power_up(host, host->ocr_avail);
2357                 mmc_release_host(host);
2358         }
2359 
2360         mmc_gpiod_request_cd_irq(host);
2361         _mmc_detect_change(host, 0, false);
2362 }
2363 
2364 void mmc_stop_host(struct mmc_host *host)
2365 {
2366         if (host->slot.cd_irq >= 0) {
2367                 mmc_gpio_set_cd_wake(host, false);
2368                 disable_irq(host->slot.cd_irq);
2369         }
2370 
2371         host->rescan_disable = 1;
2372         cancel_delayed_work_sync(&host->detect);
2373 
2374         /* clear pm flags now and let card drivers set them as needed */
2375         host->pm_flags = 0;
2376 
2377         mmc_bus_get(host);
2378         if (host->bus_ops && !host->bus_dead) {
2379                 /* Calling bus_ops->remove() with a claimed host can deadlock */
2380                 host->bus_ops->remove(host);
2381                 mmc_claim_host(host);
2382                 mmc_detach_bus(host);
2383                 mmc_power_off(host);
2384                 mmc_release_host(host);
2385                 mmc_bus_put(host);
2386                 return;
2387         }
2388         mmc_bus_put(host);
2389 
2390         mmc_claim_host(host);
2391         mmc_power_off(host);
2392         mmc_release_host(host);
2393 }
2394 
2395 #ifdef CONFIG_PM_SLEEP
2396 /* Do the card removal on suspend if card is assumed removeable
2397  * Do that in pm notifier while userspace isn't yet frozen, so we will be able
2398    to sync the card.
2399 */
2400 static int mmc_pm_notify(struct notifier_block *notify_block,
2401                         unsigned long mode, void *unused)
2402 {
2403         struct mmc_host *host = container_of(
2404                 notify_block, struct mmc_host, pm_notify);
2405         unsigned long flags;
2406         int err = 0;
2407 
2408         switch (mode) {
2409         case PM_HIBERNATION_PREPARE:
2410         case PM_SUSPEND_PREPARE:
2411         case PM_RESTORE_PREPARE:
2412                 spin_lock_irqsave(&host->lock, flags);
2413                 host->rescan_disable = 1;
2414                 spin_unlock_irqrestore(&host->lock, flags);
2415                 cancel_delayed_work_sync(&host->detect);
2416 
2417                 if (!host->bus_ops)
2418                         break;
2419 
2420                 /* Validate prerequisites for suspend */
2421                 if (host->bus_ops->pre_suspend)
2422                         err = host->bus_ops->pre_suspend(host);
2423                 if (!err)
2424                         break;
2425 
2426                 if (!mmc_card_is_removable(host)) {
2427                         dev_warn(mmc_dev(host),
2428                                  "pre_suspend failed for non-removable host: "
2429                                  "%d\n", err);
2430                         /* Avoid removing non-removable hosts */
2431                         break;
2432                 }
2433 
2434                 /* Calling bus_ops->remove() with a claimed host can deadlock */
2435                 host->bus_ops->remove(host);
2436                 mmc_claim_host(host);
2437                 mmc_detach_bus(host);
2438                 mmc_power_off(host);
2439                 mmc_release_host(host);
2440                 host->pm_flags = 0;
2441                 break;
2442 
2443         case PM_POST_SUSPEND:
2444         case PM_POST_HIBERNATION:
2445         case PM_POST_RESTORE:
2446 
2447                 spin_lock_irqsave(&host->lock, flags);
2448                 host->rescan_disable = 0;
2449                 spin_unlock_irqrestore(&host->lock, flags);
2450                 _mmc_detect_change(host, 0, false);
2451 
2452         }
2453 
2454         return 0;
2455 }
2456 
2457 void mmc_register_pm_notifier(struct mmc_host *host)
2458 {
2459         host->pm_notify.notifier_call = mmc_pm_notify;
2460         register_pm_notifier(&host->pm_notify);
2461 }
2462 
2463 void mmc_unregister_pm_notifier(struct mmc_host *host)
2464 {
2465         unregister_pm_notifier(&host->pm_notify);
2466 }
2467 #endif
2468 
2469 static int __init mmc_init(void)
2470 {
2471         int ret;
2472 
2473         ret = mmc_register_bus();
2474         if (ret)
2475                 return ret;
2476 
2477         ret = mmc_register_host_class();
2478         if (ret)
2479                 goto unregister_bus;
2480 
2481         ret = sdio_register_bus();
2482         if (ret)
2483                 goto unregister_host_class;
2484 
2485         return 0;
2486 
2487 unregister_host_class:
2488         mmc_unregister_host_class();
2489 unregister_bus:
2490         mmc_unregister_bus();
2491         return ret;
2492 }
2493 
2494 static void __exit mmc_exit(void)
2495 {
2496         sdio_unregister_bus();
2497         mmc_unregister_host_class();
2498         mmc_unregister_bus();
2499 }
2500 
2501 subsys_initcall(mmc_init);
2502 module_exit(mmc_exit);
2503 
2504 MODULE_LICENSE("GPL");

/* [<][>][^][v][top][bottom][index][help] */