root/drivers/mmc/core/mmc_ops.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __mmc_send_status
  2. mmc_send_status
  3. _mmc_select_card
  4. mmc_select_card
  5. mmc_deselect_cards
  6. mmc_set_dsr
  7. mmc_go_idle
  8. mmc_send_op_cond
  9. mmc_set_relative_addr
  10. mmc_send_cxd_native
  11. mmc_send_cxd_data
  12. mmc_spi_send_csd
  13. mmc_send_csd
  14. mmc_spi_send_cid
  15. mmc_send_cid
  16. mmc_get_ext_csd
  17. mmc_spi_read_ocr
  18. mmc_spi_set_crc
  19. mmc_switch_status_error
  20. __mmc_switch_status
  21. mmc_switch_status
  22. mmc_poll_for_busy
  23. __mmc_switch
  24. mmc_switch
  25. mmc_send_tuning
  26. mmc_abort_tuning
  27. mmc_send_bus_test
  28. mmc_bus_test
  29. mmc_send_hpi_cmd
  30. mmc_interrupt_hpi
  31. mmc_can_ext_csd
  32. mmc_read_bkops_status
  33. mmc_run_bkops
  34. mmc_flush_cache
  35. mmc_cmdq_switch
  36. mmc_cmdq_enable
  37. mmc_cmdq_disable

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  *  linux/drivers/mmc/core/mmc_ops.h
   4  *
   5  *  Copyright 2006-2007 Pierre Ossman
   6  */
   7 
   8 #include <linux/slab.h>
   9 #include <linux/export.h>
  10 #include <linux/types.h>
  11 #include <linux/scatterlist.h>
  12 
  13 #include <linux/mmc/host.h>
  14 #include <linux/mmc/card.h>
  15 #include <linux/mmc/mmc.h>
  16 
  17 #include "core.h"
  18 #include "card.h"
  19 #include "host.h"
  20 #include "mmc_ops.h"
  21 
  22 #define MMC_OPS_TIMEOUT_MS      (10 * 60 * 1000) /* 10 minute timeout */
  23 
  24 static const u8 tuning_blk_pattern_4bit[] = {
  25         0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
  26         0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
  27         0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
  28         0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
  29         0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
  30         0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
  31         0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
  32         0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
  33 };
  34 
  35 static const u8 tuning_blk_pattern_8bit[] = {
  36         0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
  37         0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
  38         0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
  39         0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
  40         0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
  41         0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
  42         0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
  43         0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
  44         0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
  45         0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
  46         0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
  47         0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
  48         0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
  49         0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
  50         0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
  51         0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
  52 };
  53 
  54 int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
  55 {
  56         int err;
  57         struct mmc_command cmd = {};
  58 
  59         cmd.opcode = MMC_SEND_STATUS;
  60         if (!mmc_host_is_spi(card->host))
  61                 cmd.arg = card->rca << 16;
  62         cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
  63 
  64         err = mmc_wait_for_cmd(card->host, &cmd, retries);
  65         if (err)
  66                 return err;
  67 
  68         /* NOTE: callers are required to understand the difference
  69          * between "native" and SPI format status words!
  70          */
  71         if (status)
  72                 *status = cmd.resp[0];
  73 
  74         return 0;
  75 }
  76 EXPORT_SYMBOL_GPL(__mmc_send_status);
  77 
  78 int mmc_send_status(struct mmc_card *card, u32 *status)
  79 {
  80         return __mmc_send_status(card, status, MMC_CMD_RETRIES);
  81 }
  82 EXPORT_SYMBOL_GPL(mmc_send_status);
  83 
  84 static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
  85 {
  86         struct mmc_command cmd = {};
  87 
  88         cmd.opcode = MMC_SELECT_CARD;
  89 
  90         if (card) {
  91                 cmd.arg = card->rca << 16;
  92                 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  93         } else {
  94                 cmd.arg = 0;
  95                 cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
  96         }
  97 
  98         return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  99 }
 100 
 101 int mmc_select_card(struct mmc_card *card)
 102 {
 103 
 104         return _mmc_select_card(card->host, card);
 105 }
 106 
 107 int mmc_deselect_cards(struct mmc_host *host)
 108 {
 109         return _mmc_select_card(host, NULL);
 110 }
 111 
 112 /*
 113  * Write the value specified in the device tree or board code into the optional
 114  * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
 115  * drive strength of the DAT and CMD outputs. The actual meaning of a given
 116  * value is hardware dependant.
 117  * The presence of the DSR register can be determined from the CSD register,
 118  * bit 76.
 119  */
 120 int mmc_set_dsr(struct mmc_host *host)
 121 {
 122         struct mmc_command cmd = {};
 123 
 124         cmd.opcode = MMC_SET_DSR;
 125 
 126         cmd.arg = (host->dsr << 16) | 0xffff;
 127         cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
 128 
 129         return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 130 }
 131 
 132 int mmc_go_idle(struct mmc_host *host)
 133 {
 134         int err;
 135         struct mmc_command cmd = {};
 136 
 137         /*
 138          * Non-SPI hosts need to prevent chipselect going active during
 139          * GO_IDLE; that would put chips into SPI mode.  Remind them of
 140          * that in case of hardware that won't pull up DAT3/nCS otherwise.
 141          *
 142          * SPI hosts ignore ios.chip_select; it's managed according to
 143          * rules that must accommodate non-MMC slaves which this layer
 144          * won't even know about.
 145          */
 146         if (!mmc_host_is_spi(host)) {
 147                 mmc_set_chip_select(host, MMC_CS_HIGH);
 148                 mmc_delay(1);
 149         }
 150 
 151         cmd.opcode = MMC_GO_IDLE_STATE;
 152         cmd.arg = 0;
 153         cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
 154 
 155         err = mmc_wait_for_cmd(host, &cmd, 0);
 156 
 157         mmc_delay(1);
 158 
 159         if (!mmc_host_is_spi(host)) {
 160                 mmc_set_chip_select(host, MMC_CS_DONTCARE);
 161                 mmc_delay(1);
 162         }
 163 
 164         host->use_spi_crc = 0;
 165 
 166         return err;
 167 }
 168 
 169 int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
 170 {
 171         struct mmc_command cmd = {};
 172         int i, err = 0;
 173 
 174         cmd.opcode = MMC_SEND_OP_COND;
 175         cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
 176         cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
 177 
 178         for (i = 100; i; i--) {
 179                 err = mmc_wait_for_cmd(host, &cmd, 0);
 180                 if (err)
 181                         break;
 182 
 183                 /* wait until reset completes */
 184                 if (mmc_host_is_spi(host)) {
 185                         if (!(cmd.resp[0] & R1_SPI_IDLE))
 186                                 break;
 187                 } else {
 188                         if (cmd.resp[0] & MMC_CARD_BUSY)
 189                                 break;
 190                 }
 191 
 192                 err = -ETIMEDOUT;
 193 
 194                 mmc_delay(10);
 195 
 196                 /*
 197                  * According to eMMC specification v5.1 section 6.4.3, we
 198                  * should issue CMD1 repeatedly in the idle state until
 199                  * the eMMC is ready. Otherwise some eMMC devices seem to enter
 200                  * the inactive mode after mmc_init_card() issued CMD0 when
 201                  * the eMMC device is busy.
 202                  */
 203                 if (!ocr && !mmc_host_is_spi(host))
 204                         cmd.arg = cmd.resp[0] | BIT(30);
 205         }
 206 
 207         if (rocr && !mmc_host_is_spi(host))
 208                 *rocr = cmd.resp[0];
 209 
 210         return err;
 211 }
 212 
 213 int mmc_set_relative_addr(struct mmc_card *card)
 214 {
 215         struct mmc_command cmd = {};
 216 
 217         cmd.opcode = MMC_SET_RELATIVE_ADDR;
 218         cmd.arg = card->rca << 16;
 219         cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 220 
 221         return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
 222 }
 223 
 224 static int
 225 mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
 226 {
 227         int err;
 228         struct mmc_command cmd = {};
 229 
 230         cmd.opcode = opcode;
 231         cmd.arg = arg;
 232         cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
 233 
 234         err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 235         if (err)
 236                 return err;
 237 
 238         memcpy(cxd, cmd.resp, sizeof(u32) * 4);
 239 
 240         return 0;
 241 }
 242 
 243 /*
 244  * NOTE: void *buf, caller for the buf is required to use DMA-capable
 245  * buffer or on-stack buffer (with some overhead in callee).
 246  */
 247 static int
 248 mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
 249                 u32 opcode, void *buf, unsigned len)
 250 {
 251         struct mmc_request mrq = {};
 252         struct mmc_command cmd = {};
 253         struct mmc_data data = {};
 254         struct scatterlist sg;
 255 
 256         mrq.cmd = &cmd;
 257         mrq.data = &data;
 258 
 259         cmd.opcode = opcode;
 260         cmd.arg = 0;
 261 
 262         /* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
 263          * rely on callers to never use this with "native" calls for reading
 264          * CSD or CID.  Native versions of those commands use the R2 type,
 265          * not R1 plus a data block.
 266          */
 267         cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
 268 
 269         data.blksz = len;
 270         data.blocks = 1;
 271         data.flags = MMC_DATA_READ;
 272         data.sg = &sg;
 273         data.sg_len = 1;
 274 
 275         sg_init_one(&sg, buf, len);
 276 
 277         if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
 278                 /*
 279                  * The spec states that CSR and CID accesses have a timeout
 280                  * of 64 clock cycles.
 281                  */
 282                 data.timeout_ns = 0;
 283                 data.timeout_clks = 64;
 284         } else
 285                 mmc_set_data_timeout(&data, card);
 286 
 287         mmc_wait_for_req(host, &mrq);
 288 
 289         if (cmd.error)
 290                 return cmd.error;
 291         if (data.error)
 292                 return data.error;
 293 
 294         return 0;
 295 }
 296 
 297 static int mmc_spi_send_csd(struct mmc_card *card, u32 *csd)
 298 {
 299         int ret, i;
 300         __be32 *csd_tmp;
 301 
 302         csd_tmp = kzalloc(16, GFP_KERNEL);
 303         if (!csd_tmp)
 304                 return -ENOMEM;
 305 
 306         ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
 307         if (ret)
 308                 goto err;
 309 
 310         for (i = 0; i < 4; i++)
 311                 csd[i] = be32_to_cpu(csd_tmp[i]);
 312 
 313 err:
 314         kfree(csd_tmp);
 315         return ret;
 316 }
 317 
 318 int mmc_send_csd(struct mmc_card *card, u32 *csd)
 319 {
 320         if (mmc_host_is_spi(card->host))
 321                 return mmc_spi_send_csd(card, csd);
 322 
 323         return mmc_send_cxd_native(card->host, card->rca << 16, csd,
 324                                 MMC_SEND_CSD);
 325 }
 326 
 327 static int mmc_spi_send_cid(struct mmc_host *host, u32 *cid)
 328 {
 329         int ret, i;
 330         __be32 *cid_tmp;
 331 
 332         cid_tmp = kzalloc(16, GFP_KERNEL);
 333         if (!cid_tmp)
 334                 return -ENOMEM;
 335 
 336         ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
 337         if (ret)
 338                 goto err;
 339 
 340         for (i = 0; i < 4; i++)
 341                 cid[i] = be32_to_cpu(cid_tmp[i]);
 342 
 343 err:
 344         kfree(cid_tmp);
 345         return ret;
 346 }
 347 
 348 int mmc_send_cid(struct mmc_host *host, u32 *cid)
 349 {
 350         if (mmc_host_is_spi(host))
 351                 return mmc_spi_send_cid(host, cid);
 352 
 353         return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID);
 354 }
 355 
 356 int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
 357 {
 358         int err;
 359         u8 *ext_csd;
 360 
 361         if (!card || !new_ext_csd)
 362                 return -EINVAL;
 363 
 364         if (!mmc_can_ext_csd(card))
 365                 return -EOPNOTSUPP;
 366 
 367         /*
 368          * As the ext_csd is so large and mostly unused, we don't store the
 369          * raw block in mmc_card.
 370          */
 371         ext_csd = kzalloc(512, GFP_KERNEL);
 372         if (!ext_csd)
 373                 return -ENOMEM;
 374 
 375         err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd,
 376                                 512);
 377         if (err)
 378                 kfree(ext_csd);
 379         else
 380                 *new_ext_csd = ext_csd;
 381 
 382         return err;
 383 }
 384 EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
 385 
 386 int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
 387 {
 388         struct mmc_command cmd = {};
 389         int err;
 390 
 391         cmd.opcode = MMC_SPI_READ_OCR;
 392         cmd.arg = highcap ? (1 << 30) : 0;
 393         cmd.flags = MMC_RSP_SPI_R3;
 394 
 395         err = mmc_wait_for_cmd(host, &cmd, 0);
 396 
 397         *ocrp = cmd.resp[1];
 398         return err;
 399 }
 400 
 401 int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
 402 {
 403         struct mmc_command cmd = {};
 404         int err;
 405 
 406         cmd.opcode = MMC_SPI_CRC_ON_OFF;
 407         cmd.flags = MMC_RSP_SPI_R1;
 408         cmd.arg = use_crc;
 409 
 410         err = mmc_wait_for_cmd(host, &cmd, 0);
 411         if (!err)
 412                 host->use_spi_crc = use_crc;
 413         return err;
 414 }
 415 
 416 static int mmc_switch_status_error(struct mmc_host *host, u32 status)
 417 {
 418         if (mmc_host_is_spi(host)) {
 419                 if (status & R1_SPI_ILLEGAL_COMMAND)
 420                         return -EBADMSG;
 421         } else {
 422                 if (R1_STATUS(status))
 423                         pr_warn("%s: unexpected status %#x after switch\n",
 424                                 mmc_hostname(host), status);
 425                 if (status & R1_SWITCH_ERROR)
 426                         return -EBADMSG;
 427         }
 428         return 0;
 429 }
 430 
 431 /* Caller must hold re-tuning */
 432 int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
 433 {
 434         u32 status;
 435         int err;
 436 
 437         err = mmc_send_status(card, &status);
 438         if (!crc_err_fatal && err == -EILSEQ)
 439                 return 0;
 440         if (err)
 441                 return err;
 442 
 443         return mmc_switch_status_error(card->host, status);
 444 }
 445 
 446 int mmc_switch_status(struct mmc_card *card)
 447 {
 448         return __mmc_switch_status(card, true);
 449 }
 450 
 451 static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
 452                         bool send_status, bool retry_crc_err)
 453 {
 454         struct mmc_host *host = card->host;
 455         int err;
 456         unsigned long timeout;
 457         u32 status = 0;
 458         bool expired = false;
 459         bool busy = false;
 460 
 461         /* We have an unspecified cmd timeout, use the fallback value. */
 462         if (!timeout_ms)
 463                 timeout_ms = MMC_OPS_TIMEOUT_MS;
 464 
 465         /*
 466          * In cases when not allowed to poll by using CMD13 or because we aren't
 467          * capable of polling by using ->card_busy(), then rely on waiting the
 468          * stated timeout to be sufficient.
 469          */
 470         if (!send_status && !host->ops->card_busy) {
 471                 mmc_delay(timeout_ms);
 472                 return 0;
 473         }
 474 
 475         timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
 476         do {
 477                 /*
 478                  * Due to the possibility of being preempted while polling,
 479                  * check the expiration time first.
 480                  */
 481                 expired = time_after(jiffies, timeout);
 482 
 483                 if (host->ops->card_busy) {
 484                         busy = host->ops->card_busy(host);
 485                 } else {
 486                         err = mmc_send_status(card, &status);
 487                         if (retry_crc_err && err == -EILSEQ) {
 488                                 busy = true;
 489                         } else if (err) {
 490                                 return err;
 491                         } else {
 492                                 err = mmc_switch_status_error(host, status);
 493                                 if (err)
 494                                         return err;
 495                                 busy = R1_CURRENT_STATE(status) == R1_STATE_PRG;
 496                         }
 497                 }
 498 
 499                 /* Timeout if the device still remains busy. */
 500                 if (expired && busy) {
 501                         pr_err("%s: Card stuck being busy! %s\n",
 502                                 mmc_hostname(host), __func__);
 503                         return -ETIMEDOUT;
 504                 }
 505         } while (busy);
 506 
 507         return 0;
 508 }
 509 
 510 /**
 511  *      __mmc_switch - modify EXT_CSD register
 512  *      @card: the MMC card associated with the data transfer
 513  *      @set: cmd set values
 514  *      @index: EXT_CSD register index
 515  *      @value: value to program into EXT_CSD register
 516  *      @timeout_ms: timeout (ms) for operation performed by register write,
 517  *                   timeout of zero implies maximum possible timeout
 518  *      @timing: new timing to change to
 519  *      @use_busy_signal: use the busy signal as response type
 520  *      @send_status: send status cmd to poll for busy
 521  *      @retry_crc_err: retry when CRC errors when polling with CMD13 for busy
 522  *
 523  *      Modifies the EXT_CSD register for selected card.
 524  */
 525 int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
 526                 unsigned int timeout_ms, unsigned char timing,
 527                 bool use_busy_signal, bool send_status, bool retry_crc_err)
 528 {
 529         struct mmc_host *host = card->host;
 530         int err;
 531         struct mmc_command cmd = {};
 532         bool use_r1b_resp = use_busy_signal;
 533         unsigned char old_timing = host->ios.timing;
 534 
 535         mmc_retune_hold(host);
 536 
 537         /*
 538          * If the cmd timeout and the max_busy_timeout of the host are both
 539          * specified, let's validate them. A failure means we need to prevent
 540          * the host from doing hw busy detection, which is done by converting
 541          * to a R1 response instead of a R1B. Note, some hosts requires R1B,
 542          * which also means they are on their own when it comes to deal with the
 543          * busy timeout.
 544          */
 545         if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && timeout_ms &&
 546             host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
 547                 use_r1b_resp = false;
 548 
 549         cmd.opcode = MMC_SWITCH;
 550         cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
 551                   (index << 16) |
 552                   (value << 8) |
 553                   set;
 554         cmd.flags = MMC_CMD_AC;
 555         if (use_r1b_resp) {
 556                 cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
 557                 /*
 558                  * A busy_timeout of zero means the host can decide to use
 559                  * whatever value it finds suitable.
 560                  */
 561                 cmd.busy_timeout = timeout_ms;
 562         } else {
 563                 cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
 564         }
 565 
 566         if (index == EXT_CSD_SANITIZE_START)
 567                 cmd.sanitize_busy = true;
 568 
 569         err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 570         if (err)
 571                 goto out;
 572 
 573         /* No need to check card status in case of unblocking command */
 574         if (!use_busy_signal)
 575                 goto out;
 576 
 577         /*If SPI or used HW busy detection above, then we don't need to poll. */
 578         if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
 579                 mmc_host_is_spi(host))
 580                 goto out_tim;
 581 
 582         /* Let's try to poll to find out when the command is completed. */
 583         err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err);
 584         if (err)
 585                 goto out;
 586 
 587 out_tim:
 588         /* Switch to new timing before check switch status. */
 589         if (timing)
 590                 mmc_set_timing(host, timing);
 591 
 592         if (send_status) {
 593                 err = mmc_switch_status(card);
 594                 if (err && timing)
 595                         mmc_set_timing(host, old_timing);
 596         }
 597 out:
 598         mmc_retune_release(host);
 599 
 600         return err;
 601 }
 602 
 603 int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
 604                 unsigned int timeout_ms)
 605 {
 606         return __mmc_switch(card, set, index, value, timeout_ms, 0,
 607                         true, true, false);
 608 }
 609 EXPORT_SYMBOL_GPL(mmc_switch);
 610 
 611 int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
 612 {
 613         struct mmc_request mrq = {};
 614         struct mmc_command cmd = {};
 615         struct mmc_data data = {};
 616         struct scatterlist sg;
 617         struct mmc_ios *ios = &host->ios;
 618         const u8 *tuning_block_pattern;
 619         int size, err = 0;
 620         u8 *data_buf;
 621 
 622         if (ios->bus_width == MMC_BUS_WIDTH_8) {
 623                 tuning_block_pattern = tuning_blk_pattern_8bit;
 624                 size = sizeof(tuning_blk_pattern_8bit);
 625         } else if (ios->bus_width == MMC_BUS_WIDTH_4) {
 626                 tuning_block_pattern = tuning_blk_pattern_4bit;
 627                 size = sizeof(tuning_blk_pattern_4bit);
 628         } else
 629                 return -EINVAL;
 630 
 631         data_buf = kzalloc(size, GFP_KERNEL);
 632         if (!data_buf)
 633                 return -ENOMEM;
 634 
 635         mrq.cmd = &cmd;
 636         mrq.data = &data;
 637 
 638         cmd.opcode = opcode;
 639         cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
 640 
 641         data.blksz = size;
 642         data.blocks = 1;
 643         data.flags = MMC_DATA_READ;
 644 
 645         /*
 646          * According to the tuning specs, Tuning process
 647          * is normally shorter 40 executions of CMD19,
 648          * and timeout value should be shorter than 150 ms
 649          */
 650         data.timeout_ns = 150 * NSEC_PER_MSEC;
 651 
 652         data.sg = &sg;
 653         data.sg_len = 1;
 654         sg_init_one(&sg, data_buf, size);
 655 
 656         mmc_wait_for_req(host, &mrq);
 657 
 658         if (cmd_error)
 659                 *cmd_error = cmd.error;
 660 
 661         if (cmd.error) {
 662                 err = cmd.error;
 663                 goto out;
 664         }
 665 
 666         if (data.error) {
 667                 err = data.error;
 668                 goto out;
 669         }
 670 
 671         if (memcmp(data_buf, tuning_block_pattern, size))
 672                 err = -EIO;
 673 
 674 out:
 675         kfree(data_buf);
 676         return err;
 677 }
 678 EXPORT_SYMBOL_GPL(mmc_send_tuning);
 679 
 680 int mmc_abort_tuning(struct mmc_host *host, u32 opcode)
 681 {
 682         struct mmc_command cmd = {};
 683 
 684         /*
 685          * eMMC specification specifies that CMD12 can be used to stop a tuning
 686          * command, but SD specification does not, so do nothing unless it is
 687          * eMMC.
 688          */
 689         if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
 690                 return 0;
 691 
 692         cmd.opcode = MMC_STOP_TRANSMISSION;
 693         cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
 694 
 695         /*
 696          * For drivers that override R1 to R1b, set an arbitrary timeout based
 697          * on the tuning timeout i.e. 150ms.
 698          */
 699         cmd.busy_timeout = 150;
 700 
 701         return mmc_wait_for_cmd(host, &cmd, 0);
 702 }
 703 EXPORT_SYMBOL_GPL(mmc_abort_tuning);
 704 
 705 static int
 706 mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
 707                   u8 len)
 708 {
 709         struct mmc_request mrq = {};
 710         struct mmc_command cmd = {};
 711         struct mmc_data data = {};
 712         struct scatterlist sg;
 713         u8 *data_buf;
 714         u8 *test_buf;
 715         int i, err;
 716         static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
 717         static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
 718 
 719         /* dma onto stack is unsafe/nonportable, but callers to this
 720          * routine normally provide temporary on-stack buffers ...
 721          */
 722         data_buf = kmalloc(len, GFP_KERNEL);
 723         if (!data_buf)
 724                 return -ENOMEM;
 725 
 726         if (len == 8)
 727                 test_buf = testdata_8bit;
 728         else if (len == 4)
 729                 test_buf = testdata_4bit;
 730         else {
 731                 pr_err("%s: Invalid bus_width %d\n",
 732                        mmc_hostname(host), len);
 733                 kfree(data_buf);
 734                 return -EINVAL;
 735         }
 736 
 737         if (opcode == MMC_BUS_TEST_W)
 738                 memcpy(data_buf, test_buf, len);
 739 
 740         mrq.cmd = &cmd;
 741         mrq.data = &data;
 742         cmd.opcode = opcode;
 743         cmd.arg = 0;
 744 
 745         /* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
 746          * rely on callers to never use this with "native" calls for reading
 747          * CSD or CID.  Native versions of those commands use the R2 type,
 748          * not R1 plus a data block.
 749          */
 750         cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
 751 
 752         data.blksz = len;
 753         data.blocks = 1;
 754         if (opcode == MMC_BUS_TEST_R)
 755                 data.flags = MMC_DATA_READ;
 756         else
 757                 data.flags = MMC_DATA_WRITE;
 758 
 759         data.sg = &sg;
 760         data.sg_len = 1;
 761         mmc_set_data_timeout(&data, card);
 762         sg_init_one(&sg, data_buf, len);
 763         mmc_wait_for_req(host, &mrq);
 764         err = 0;
 765         if (opcode == MMC_BUS_TEST_R) {
 766                 for (i = 0; i < len / 4; i++)
 767                         if ((test_buf[i] ^ data_buf[i]) != 0xff) {
 768                                 err = -EIO;
 769                                 break;
 770                         }
 771         }
 772         kfree(data_buf);
 773 
 774         if (cmd.error)
 775                 return cmd.error;
 776         if (data.error)
 777                 return data.error;
 778 
 779         return err;
 780 }
 781 
 782 int mmc_bus_test(struct mmc_card *card, u8 bus_width)
 783 {
 784         int width;
 785 
 786         if (bus_width == MMC_BUS_WIDTH_8)
 787                 width = 8;
 788         else if (bus_width == MMC_BUS_WIDTH_4)
 789                 width = 4;
 790         else if (bus_width == MMC_BUS_WIDTH_1)
 791                 return 0; /* no need for test */
 792         else
 793                 return -EINVAL;
 794 
 795         /*
 796          * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
 797          * is a problem.  This improves chances that the test will work.
 798          */
 799         mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
 800         return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
 801 }
 802 
 803 static int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
 804 {
 805         struct mmc_command cmd = {};
 806         unsigned int opcode;
 807         int err;
 808 
 809         opcode = card->ext_csd.hpi_cmd;
 810         if (opcode == MMC_STOP_TRANSMISSION)
 811                 cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
 812         else if (opcode == MMC_SEND_STATUS)
 813                 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 814 
 815         cmd.opcode = opcode;
 816         cmd.arg = card->rca << 16 | 1;
 817 
 818         err = mmc_wait_for_cmd(card->host, &cmd, 0);
 819         if (err) {
 820                 pr_warn("%s: error %d interrupting operation. "
 821                         "HPI command response %#x\n", mmc_hostname(card->host),
 822                         err, cmd.resp[0]);
 823                 return err;
 824         }
 825         if (status)
 826                 *status = cmd.resp[0];
 827 
 828         return 0;
 829 }
 830 
 831 /**
 832  *      mmc_interrupt_hpi - Issue for High priority Interrupt
 833  *      @card: the MMC card associated with the HPI transfer
 834  *
 835  *      Issued High Priority Interrupt, and check for card status
 836  *      until out-of prg-state.
 837  */
 838 int mmc_interrupt_hpi(struct mmc_card *card)
 839 {
 840         int err;
 841         u32 status;
 842         unsigned long prg_wait;
 843 
 844         if (!card->ext_csd.hpi_en) {
 845                 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
 846                 return 1;
 847         }
 848 
 849         err = mmc_send_status(card, &status);
 850         if (err) {
 851                 pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
 852                 goto out;
 853         }
 854 
 855         switch (R1_CURRENT_STATE(status)) {
 856         case R1_STATE_IDLE:
 857         case R1_STATE_READY:
 858         case R1_STATE_STBY:
 859         case R1_STATE_TRAN:
 860                 /*
 861                  * In idle and transfer states, HPI is not needed and the caller
 862                  * can issue the next intended command immediately
 863                  */
 864                 goto out;
 865         case R1_STATE_PRG:
 866                 break;
 867         default:
 868                 /* In all other states, it's illegal to issue HPI */
 869                 pr_debug("%s: HPI cannot be sent. Card state=%d\n",
 870                         mmc_hostname(card->host), R1_CURRENT_STATE(status));
 871                 err = -EINVAL;
 872                 goto out;
 873         }
 874 
 875         err = mmc_send_hpi_cmd(card, &status);
 876         if (err)
 877                 goto out;
 878 
 879         prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
 880         do {
 881                 err = mmc_send_status(card, &status);
 882 
 883                 if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
 884                         break;
 885                 if (time_after(jiffies, prg_wait))
 886                         err = -ETIMEDOUT;
 887         } while (!err);
 888 
 889 out:
 890         return err;
 891 }
 892 
 893 int mmc_can_ext_csd(struct mmc_card *card)
 894 {
 895         return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
 896 }
 897 
 898 static int mmc_read_bkops_status(struct mmc_card *card)
 899 {
 900         int err;
 901         u8 *ext_csd;
 902 
 903         err = mmc_get_ext_csd(card, &ext_csd);
 904         if (err)
 905                 return err;
 906 
 907         card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
 908         card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
 909         kfree(ext_csd);
 910         return 0;
 911 }
 912 
 913 /**
 914  *      mmc_run_bkops - Run BKOPS for supported cards
 915  *      @card: MMC card to run BKOPS for
 916  *
 917  *      Run background operations synchronously for cards having manual BKOPS
 918  *      enabled and in case it reports urgent BKOPS level.
 919 */
 920 void mmc_run_bkops(struct mmc_card *card)
 921 {
 922         int err;
 923 
 924         if (!card->ext_csd.man_bkops_en)
 925                 return;
 926 
 927         err = mmc_read_bkops_status(card);
 928         if (err) {
 929                 pr_err("%s: Failed to read bkops status: %d\n",
 930                        mmc_hostname(card->host), err);
 931                 return;
 932         }
 933 
 934         if (!card->ext_csd.raw_bkops_status ||
 935             card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2)
 936                 return;
 937 
 938         mmc_retune_hold(card->host);
 939 
 940         /*
 941          * For urgent BKOPS status, LEVEL_2 and higher, let's execute
 942          * synchronously. Future wise, we may consider to start BKOPS, for less
 943          * urgent levels by using an asynchronous background task, when idle.
 944          */
 945         err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 946                         EXT_CSD_BKOPS_START, 1, MMC_OPS_TIMEOUT_MS);
 947         if (err)
 948                 pr_warn("%s: Error %d starting bkops\n",
 949                         mmc_hostname(card->host), err);
 950 
 951         mmc_retune_release(card->host);
 952 }
 953 EXPORT_SYMBOL(mmc_run_bkops);
 954 
 955 /*
 956  * Flush the cache to the non-volatile storage.
 957  */
 958 int mmc_flush_cache(struct mmc_card *card)
 959 {
 960         int err = 0;
 961 
 962         if (mmc_card_mmc(card) &&
 963                         (card->ext_csd.cache_size > 0) &&
 964                         (card->ext_csd.cache_ctrl & 1)) {
 965                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 966                                 EXT_CSD_FLUSH_CACHE, 1, 0);
 967                 if (err)
 968                         pr_err("%s: cache flush error %d\n",
 969                                         mmc_hostname(card->host), err);
 970         }
 971 
 972         return err;
 973 }
 974 EXPORT_SYMBOL(mmc_flush_cache);
 975 
 976 static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
 977 {
 978         u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
 979         int err;
 980 
 981         if (!card->ext_csd.cmdq_support)
 982                 return -EOPNOTSUPP;
 983 
 984         err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN,
 985                          val, card->ext_csd.generic_cmd6_time);
 986         if (!err)
 987                 card->ext_csd.cmdq_en = enable;
 988 
 989         return err;
 990 }
 991 
 992 int mmc_cmdq_enable(struct mmc_card *card)
 993 {
 994         return mmc_cmdq_switch(card, true);
 995 }
 996 EXPORT_SYMBOL_GPL(mmc_cmdq_enable);
 997 
 998 int mmc_cmdq_disable(struct mmc_card *card)
 999 {
1000         return mmc_cmdq_switch(card, false);
1001 }
1002 EXPORT_SYMBOL_GPL(mmc_cmdq_disable);

/* [<][>][^][v][top][bottom][index][help] */