root/drivers/mmc/host/android-goldfish.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. goldfish_mmc_cover_is_open
  2. goldfish_mmc_show_cover_switch
  3. goldfish_mmc_start_command
  4. goldfish_mmc_xfer_done
  5. goldfish_mmc_end_of_data
  6. goldfish_mmc_cmd_done
  7. goldfish_mmc_irq
  8. goldfish_mmc_prepare_data
  9. goldfish_mmc_request
  10. goldfish_mmc_set_ios
  11. goldfish_mmc_get_ro
  12. goldfish_mmc_probe
  13. goldfish_mmc_remove

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  *  Copyright 2007, Google Inc.
   4  *  Copyright 2012, Intel Inc.
   5  *
   6  *  based on omap.c driver, which was
   7  *  Copyright (C) 2004 Nokia Corporation
   8  *  Written by Tuukka Tikkanen and Juha Yrjölä <juha.yrjola@nokia.com>
   9  *  Misc hacks here and there by Tony Lindgren <tony@atomide.com>
  10  *  Other hacks (DMA, SD, etc) by David Brownell
  11  */
  12 
  13 #include <linux/module.h>
  14 #include <linux/platform_device.h>
  15 #include <linux/major.h>
  16 
  17 #include <linux/types.h>
  18 #include <linux/pci.h>
  19 #include <linux/interrupt.h>
  20 
  21 #include <linux/kernel.h>
  22 #include <linux/fs.h>
  23 #include <linux/errno.h>
  24 #include <linux/hdreg.h>
  25 #include <linux/kdev_t.h>
  26 #include <linux/blkdev.h>
  27 #include <linux/mutex.h>
  28 #include <linux/scatterlist.h>
  29 #include <linux/mmc/mmc.h>
  30 #include <linux/mmc/sdio.h>
  31 #include <linux/mmc/host.h>
  32 #include <linux/mmc/card.h>
  33 
  34 #include <linux/moduleparam.h>
  35 #include <linux/init.h>
  36 #include <linux/ioport.h>
  37 #include <linux/dma-mapping.h>
  38 #include <linux/delay.h>
  39 #include <linux/spinlock.h>
  40 #include <linux/timer.h>
  41 #include <linux/clk.h>
  42 
  43 #include <asm/io.h>
  44 #include <asm/irq.h>
  45 
  46 #include <asm/types.h>
  47 #include <linux/uaccess.h>
  48 
  49 #define DRIVER_NAME "goldfish_mmc"
  50 
  51 #define BUFFER_SIZE   16384
  52 
  53 #define GOLDFISH_MMC_READ(host, addr)   (readl(host->reg_base + addr))
  54 #define GOLDFISH_MMC_WRITE(host, addr, x)   (writel(x, host->reg_base + addr))
  55 
  56 enum {
  57         /* status register */
  58         MMC_INT_STATUS          = 0x00,
  59         /* set this to enable IRQ */
  60         MMC_INT_ENABLE          = 0x04,
  61         /* set this to specify buffer address */
  62         MMC_SET_BUFFER          = 0x08,
  63 
  64         /* MMC command number */
  65         MMC_CMD                 = 0x0C,
  66 
  67         /* MMC argument */
  68         MMC_ARG                 = 0x10,
  69 
  70         /* MMC response (or R2 bits 0 - 31) */
  71         MMC_RESP_0                      = 0x14,
  72 
  73         /* MMC R2 response bits 32 - 63 */
  74         MMC_RESP_1                      = 0x18,
  75 
  76         /* MMC R2 response bits 64 - 95 */
  77         MMC_RESP_2                      = 0x1C,
  78 
  79         /* MMC R2 response bits 96 - 127 */
  80         MMC_RESP_3                      = 0x20,
  81 
  82         MMC_BLOCK_LENGTH        = 0x24,
  83         MMC_BLOCK_COUNT         = 0x28,
  84 
  85         /* MMC state flags */
  86         MMC_STATE               = 0x2C,
  87 
  88         /* MMC_INT_STATUS bits */
  89 
  90         MMC_STAT_END_OF_CMD     = 1U << 0,
  91         MMC_STAT_END_OF_DATA    = 1U << 1,
  92         MMC_STAT_STATE_CHANGE   = 1U << 2,
  93         MMC_STAT_CMD_TIMEOUT    = 1U << 3,
  94 
  95         /* MMC_STATE bits */
  96         MMC_STATE_INSERTED     = 1U << 0,
  97         MMC_STATE_READ_ONLY    = 1U << 1,
  98 };
  99 
 100 /*
 101  * Command types
 102  */
 103 #define OMAP_MMC_CMDTYPE_BC     0
 104 #define OMAP_MMC_CMDTYPE_BCR    1
 105 #define OMAP_MMC_CMDTYPE_AC     2
 106 #define OMAP_MMC_CMDTYPE_ADTC   3
 107 
 108 
 109 struct goldfish_mmc_host {
 110         struct mmc_request      *mrq;
 111         struct mmc_command      *cmd;
 112         struct mmc_data         *data;
 113         struct device           *dev;
 114         unsigned char           id; /* 16xx chips have 2 MMC blocks */
 115         void                    *virt_base;
 116         unsigned int            phys_base;
 117         int                     irq;
 118         unsigned char           bus_mode;
 119         unsigned char           hw_bus_mode;
 120 
 121         unsigned int            sg_len;
 122         unsigned                dma_done:1;
 123         unsigned                dma_in_use:1;
 124 
 125         void __iomem            *reg_base;
 126 };
 127 
 128 static inline int
 129 goldfish_mmc_cover_is_open(struct goldfish_mmc_host *host)
 130 {
 131         return 0;
 132 }
 133 
 134 static ssize_t
 135 goldfish_mmc_show_cover_switch(struct device *dev,
 136                                struct device_attribute *attr, char *buf)
 137 {
 138         struct goldfish_mmc_host *host = dev_get_drvdata(dev);
 139 
 140         return sprintf(buf, "%s\n", goldfish_mmc_cover_is_open(host) ? "open" :
 141                        "closed");
 142 }
 143 
 144 static DEVICE_ATTR(cover_switch, S_IRUGO, goldfish_mmc_show_cover_switch, NULL);
 145 
 146 static void
 147 goldfish_mmc_start_command(struct goldfish_mmc_host *host, struct mmc_command *cmd)
 148 {
 149         u32 cmdreg;
 150         u32 resptype;
 151         u32 cmdtype;
 152 
 153         host->cmd = cmd;
 154 
 155         resptype = 0;
 156         cmdtype = 0;
 157 
 158         /* Our hardware needs to know exact type */
 159         switch (mmc_resp_type(cmd)) {
 160         case MMC_RSP_NONE:
 161                 break;
 162         case MMC_RSP_R1:
 163         case MMC_RSP_R1B:
 164                 /* resp 1, 1b, 6, 7 */
 165                 resptype = 1;
 166                 break;
 167         case MMC_RSP_R2:
 168                 resptype = 2;
 169                 break;
 170         case MMC_RSP_R3:
 171                 resptype = 3;
 172                 break;
 173         default:
 174                 dev_err(mmc_dev(mmc_from_priv(host)),
 175                         "Invalid response type: %04x\n", mmc_resp_type(cmd));
 176                 break;
 177         }
 178 
 179         if (mmc_cmd_type(cmd) == MMC_CMD_ADTC)
 180                 cmdtype = OMAP_MMC_CMDTYPE_ADTC;
 181         else if (mmc_cmd_type(cmd) == MMC_CMD_BC)
 182                 cmdtype = OMAP_MMC_CMDTYPE_BC;
 183         else if (mmc_cmd_type(cmd) == MMC_CMD_BCR)
 184                 cmdtype = OMAP_MMC_CMDTYPE_BCR;
 185         else
 186                 cmdtype = OMAP_MMC_CMDTYPE_AC;
 187 
 188         cmdreg = cmd->opcode | (resptype << 8) | (cmdtype << 12);
 189 
 190         if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
 191                 cmdreg |= 1 << 6;
 192 
 193         if (cmd->flags & MMC_RSP_BUSY)
 194                 cmdreg |= 1 << 11;
 195 
 196         if (host->data && !(host->data->flags & MMC_DATA_WRITE))
 197                 cmdreg |= 1 << 15;
 198 
 199         GOLDFISH_MMC_WRITE(host, MMC_ARG, cmd->arg);
 200         GOLDFISH_MMC_WRITE(host, MMC_CMD, cmdreg);
 201 }
 202 
 203 static void goldfish_mmc_xfer_done(struct goldfish_mmc_host *host,
 204                                    struct mmc_data *data)
 205 {
 206         if (host->dma_in_use) {
 207                 enum dma_data_direction dma_data_dir;
 208 
 209                 dma_data_dir = mmc_get_dma_dir(data);
 210 
 211                 if (dma_data_dir == DMA_FROM_DEVICE) {
 212                         /*
 213                          * We don't really have DMA, so we need
 214                          * to copy from our platform driver buffer
 215                          */
 216                         sg_copy_from_buffer(data->sg, 1, host->virt_base,
 217                                         data->sg->length);
 218                 }
 219                 host->data->bytes_xfered += data->sg->length;
 220                 dma_unmap_sg(mmc_dev(mmc_from_priv(host)), data->sg,
 221                              host->sg_len, dma_data_dir);
 222         }
 223 
 224         host->data = NULL;
 225         host->sg_len = 0;
 226 
 227         /*
 228          * NOTE:  MMC layer will sometimes poll-wait CMD13 next, issuing
 229          * dozens of requests until the card finishes writing data.
 230          * It'd be cheaper to just wait till an EOFB interrupt arrives...
 231          */
 232 
 233         if (!data->stop) {
 234                 host->mrq = NULL;
 235                 mmc_request_done(mmc_from_priv(host), data->mrq);
 236                 return;
 237         }
 238 
 239         goldfish_mmc_start_command(host, data->stop);
 240 }
 241 
 242 static void goldfish_mmc_end_of_data(struct goldfish_mmc_host *host,
 243                                      struct mmc_data *data)
 244 {
 245         if (!host->dma_in_use) {
 246                 goldfish_mmc_xfer_done(host, data);
 247                 return;
 248         }
 249         if (host->dma_done)
 250                 goldfish_mmc_xfer_done(host, data);
 251 }
 252 
 253 static void goldfish_mmc_cmd_done(struct goldfish_mmc_host *host,
 254                                   struct mmc_command *cmd)
 255 {
 256         host->cmd = NULL;
 257         if (cmd->flags & MMC_RSP_PRESENT) {
 258                 if (cmd->flags & MMC_RSP_136) {
 259                         /* response type 2 */
 260                         cmd->resp[3] =
 261                                 GOLDFISH_MMC_READ(host, MMC_RESP_0);
 262                         cmd->resp[2] =
 263                                 GOLDFISH_MMC_READ(host, MMC_RESP_1);
 264                         cmd->resp[1] =
 265                                 GOLDFISH_MMC_READ(host, MMC_RESP_2);
 266                         cmd->resp[0] =
 267                                 GOLDFISH_MMC_READ(host, MMC_RESP_3);
 268                 } else {
 269                         /* response types 1, 1b, 3, 4, 5, 6 */
 270                         cmd->resp[0] =
 271                                 GOLDFISH_MMC_READ(host, MMC_RESP_0);
 272                 }
 273         }
 274 
 275         if (host->data == NULL || cmd->error) {
 276                 host->mrq = NULL;
 277                 mmc_request_done(mmc_from_priv(host), cmd->mrq);
 278         }
 279 }
 280 
 281 static irqreturn_t goldfish_mmc_irq(int irq, void *dev_id)
 282 {
 283         struct goldfish_mmc_host *host = (struct goldfish_mmc_host *)dev_id;
 284         u16 status;
 285         int end_command = 0;
 286         int end_transfer = 0;
 287         int state_changed = 0;
 288         int cmd_timeout = 0;
 289 
 290         while ((status = GOLDFISH_MMC_READ(host, MMC_INT_STATUS)) != 0) {
 291                 GOLDFISH_MMC_WRITE(host, MMC_INT_STATUS, status);
 292 
 293                 if (status & MMC_STAT_END_OF_CMD)
 294                         end_command = 1;
 295 
 296                 if (status & MMC_STAT_END_OF_DATA)
 297                         end_transfer = 1;
 298 
 299                 if (status & MMC_STAT_STATE_CHANGE)
 300                         state_changed = 1;
 301 
 302                 if (status & MMC_STAT_CMD_TIMEOUT) {
 303                         end_command = 0;
 304                         cmd_timeout = 1;
 305                 }
 306         }
 307 
 308         if (cmd_timeout) {
 309                 struct mmc_request *mrq = host->mrq;
 310                 mrq->cmd->error = -ETIMEDOUT;
 311                 host->mrq = NULL;
 312                 mmc_request_done(mmc_from_priv(host), mrq);
 313         }
 314 
 315         if (end_command)
 316                 goldfish_mmc_cmd_done(host, host->cmd);
 317 
 318         if (end_transfer) {
 319                 host->dma_done = 1;
 320                 goldfish_mmc_end_of_data(host, host->data);
 321         } else if (host->data != NULL) {
 322                 /*
 323                  * WORKAROUND -- after porting this driver from 2.6 to 3.4,
 324                  * during device initialization, cases where host->data is
 325                  * non-null but end_transfer is false would occur. Doing
 326                  * nothing in such cases results in no further interrupts,
 327                  * and initialization failure.
 328                  * TODO -- find the real cause.
 329                  */
 330                 host->dma_done = 1;
 331                 goldfish_mmc_end_of_data(host, host->data);
 332         }
 333 
 334         if (state_changed) {
 335                 u32 state = GOLDFISH_MMC_READ(host, MMC_STATE);
 336                 pr_info("%s: Card detect now %d\n", __func__,
 337                         (state & MMC_STATE_INSERTED));
 338                 mmc_detect_change(mmc_from_priv(host), 0);
 339         }
 340 
 341         if (!end_command && !end_transfer && !state_changed && !cmd_timeout) {
 342                 status = GOLDFISH_MMC_READ(host, MMC_INT_STATUS);
 343                 dev_info(mmc_dev(mmc_from_priv(host)), "spurious irq 0x%04x\n",
 344                          status);
 345                 if (status != 0) {
 346                         GOLDFISH_MMC_WRITE(host, MMC_INT_STATUS, status);
 347                         GOLDFISH_MMC_WRITE(host, MMC_INT_ENABLE, 0);
 348                 }
 349         }
 350 
 351         return IRQ_HANDLED;
 352 }
 353 
 354 static void goldfish_mmc_prepare_data(struct goldfish_mmc_host *host,
 355                                       struct mmc_request *req)
 356 {
 357         struct mmc_data *data = req->data;
 358         int block_size;
 359         unsigned sg_len;
 360         enum dma_data_direction dma_data_dir;
 361 
 362         host->data = data;
 363         if (data == NULL) {
 364                 GOLDFISH_MMC_WRITE(host, MMC_BLOCK_LENGTH, 0);
 365                 GOLDFISH_MMC_WRITE(host, MMC_BLOCK_COUNT, 0);
 366                 host->dma_in_use = 0;
 367                 return;
 368         }
 369 
 370         block_size = data->blksz;
 371 
 372         GOLDFISH_MMC_WRITE(host, MMC_BLOCK_COUNT, data->blocks - 1);
 373         GOLDFISH_MMC_WRITE(host, MMC_BLOCK_LENGTH, block_size - 1);
 374 
 375         /*
 376          * Cope with calling layer confusion; it issues "single
 377          * block" writes using multi-block scatterlists.
 378          */
 379         sg_len = (data->blocks == 1) ? 1 : data->sg_len;
 380 
 381         dma_data_dir = mmc_get_dma_dir(data);
 382 
 383         host->sg_len = dma_map_sg(mmc_dev(mmc_from_priv(host)), data->sg,
 384                                   sg_len, dma_data_dir);
 385         host->dma_done = 0;
 386         host->dma_in_use = 1;
 387 
 388         if (dma_data_dir == DMA_TO_DEVICE) {
 389                 /*
 390                  * We don't really have DMA, so we need to copy to our
 391                  * platform driver buffer
 392                  */
 393                 sg_copy_to_buffer(data->sg, 1, host->virt_base,
 394                                 data->sg->length);
 395         }
 396 }
 397 
 398 static void goldfish_mmc_request(struct mmc_host *mmc, struct mmc_request *req)
 399 {
 400         struct goldfish_mmc_host *host = mmc_priv(mmc);
 401 
 402         WARN_ON(host->mrq != NULL);
 403 
 404         host->mrq = req;
 405         goldfish_mmc_prepare_data(host, req);
 406         goldfish_mmc_start_command(host, req->cmd);
 407 
 408         /*
 409          * This is to avoid accidentally being detected as an SDIO card
 410          * in mmc_attach_sdio().
 411          */
 412         if (req->cmd->opcode == SD_IO_SEND_OP_COND &&
 413             req->cmd->flags == (MMC_RSP_SPI_R4 | MMC_RSP_R4 | MMC_CMD_BCR))
 414                 req->cmd->error = -EINVAL;
 415 }
 416 
 417 static void goldfish_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 418 {
 419         struct goldfish_mmc_host *host = mmc_priv(mmc);
 420 
 421         host->bus_mode = ios->bus_mode;
 422         host->hw_bus_mode = host->bus_mode;
 423 }
 424 
 425 static int goldfish_mmc_get_ro(struct mmc_host *mmc)
 426 {
 427         uint32_t state;
 428         struct goldfish_mmc_host *host = mmc_priv(mmc);
 429 
 430         state = GOLDFISH_MMC_READ(host, MMC_STATE);
 431         return ((state & MMC_STATE_READ_ONLY) != 0);
 432 }
 433 
 434 static const struct mmc_host_ops goldfish_mmc_ops = {
 435         .request        = goldfish_mmc_request,
 436         .set_ios        = goldfish_mmc_set_ios,
 437         .get_ro         = goldfish_mmc_get_ro,
 438 };
 439 
 440 static int goldfish_mmc_probe(struct platform_device *pdev)
 441 {
 442         struct mmc_host *mmc;
 443         struct goldfish_mmc_host *host = NULL;
 444         struct resource *res;
 445         int ret = 0;
 446         int irq;
 447         dma_addr_t buf_addr;
 448 
 449         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 450         irq = platform_get_irq(pdev, 0);
 451         if (res == NULL || irq < 0)
 452                 return -ENXIO;
 453 
 454         mmc = mmc_alloc_host(sizeof(struct goldfish_mmc_host), &pdev->dev);
 455         if (mmc == NULL) {
 456                 ret = -ENOMEM;
 457                 goto err_alloc_host_failed;
 458         }
 459 
 460         host = mmc_priv(mmc);
 461 
 462         pr_err("mmc: Mapping %lX to %lX\n", (long)res->start, (long)res->end);
 463         host->reg_base = ioremap(res->start, resource_size(res));
 464         if (host->reg_base == NULL) {
 465                 ret = -ENOMEM;
 466                 goto ioremap_failed;
 467         }
 468         host->virt_base = dma_alloc_coherent(&pdev->dev, BUFFER_SIZE,
 469                                              &buf_addr, GFP_KERNEL);
 470 
 471         if (host->virt_base == 0) {
 472                 ret = -ENOMEM;
 473                 goto dma_alloc_failed;
 474         }
 475         host->phys_base = buf_addr;
 476 
 477         host->id = pdev->id;
 478         host->irq = irq;
 479 
 480         mmc->ops = &goldfish_mmc_ops;
 481         mmc->f_min = 400000;
 482         mmc->f_max = 24000000;
 483         mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
 484         mmc->caps = MMC_CAP_4_BIT_DATA;
 485 
 486         /* Use scatterlist DMA to reduce per-transfer costs.
 487          * NOTE max_seg_size assumption that small blocks aren't
 488          * normally used (except e.g. for reading SD registers).
 489          */
 490         mmc->max_segs = 32;
 491         mmc->max_blk_size = 2048;       /* MMC_BLOCK_LENGTH is 11 bits (+1) */
 492         mmc->max_blk_count = 2048;      /* MMC_BLOCK_COUNT is 11 bits (+1) */
 493         mmc->max_req_size = BUFFER_SIZE;
 494         mmc->max_seg_size = mmc->max_req_size;
 495 
 496         ret = request_irq(host->irq, goldfish_mmc_irq, 0, DRIVER_NAME, host);
 497         if (ret) {
 498                 dev_err(&pdev->dev, "Failed IRQ Adding goldfish MMC\n");
 499                 goto err_request_irq_failed;
 500         }
 501 
 502         host->dev = &pdev->dev;
 503         platform_set_drvdata(pdev, host);
 504 
 505         ret = device_create_file(&pdev->dev, &dev_attr_cover_switch);
 506         if (ret)
 507                 dev_warn(mmc_dev(mmc), "Unable to create sysfs attributes\n");
 508 
 509         GOLDFISH_MMC_WRITE(host, MMC_SET_BUFFER, host->phys_base);
 510         GOLDFISH_MMC_WRITE(host, MMC_INT_ENABLE,
 511                            MMC_STAT_END_OF_CMD | MMC_STAT_END_OF_DATA |
 512                            MMC_STAT_STATE_CHANGE | MMC_STAT_CMD_TIMEOUT);
 513 
 514         mmc_add_host(mmc);
 515         return 0;
 516 
 517 err_request_irq_failed:
 518         dma_free_coherent(&pdev->dev, BUFFER_SIZE, host->virt_base,
 519                           host->phys_base);
 520 dma_alloc_failed:
 521         iounmap(host->reg_base);
 522 ioremap_failed:
 523         mmc_free_host(mmc);
 524 err_alloc_host_failed:
 525         return ret;
 526 }
 527 
 528 static int goldfish_mmc_remove(struct platform_device *pdev)
 529 {
 530         struct goldfish_mmc_host *host = platform_get_drvdata(pdev);
 531         struct mmc_host *mmc = mmc_from_priv(host);
 532 
 533         BUG_ON(host == NULL);
 534 
 535         mmc_remove_host(mmc);
 536         free_irq(host->irq, host);
 537         dma_free_coherent(&pdev->dev, BUFFER_SIZE, host->virt_base, host->phys_base);
 538         iounmap(host->reg_base);
 539         mmc_free_host(mmc);
 540         return 0;
 541 }
 542 
 543 static struct platform_driver goldfish_mmc_driver = {
 544         .probe          = goldfish_mmc_probe,
 545         .remove         = goldfish_mmc_remove,
 546         .driver         = {
 547                 .name   = DRIVER_NAME,
 548         },
 549 };
 550 
 551 module_platform_driver(goldfish_mmc_driver);
 552 MODULE_LICENSE("GPL v2");

/* [<][>][^][v][top][bottom][index][help] */