root/drivers/mmc/host/cqhci.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. get_desc
  2. get_link_desc
  3. get_trans_desc_dma
  4. get_trans_desc
  5. setup_trans_desc
  6. cqhci_set_irqs
  7. cqhci_dumpregs
  8. cqhci_host_alloc_tdl
  9. __cqhci_enable
  10. __cqhci_disable
  11. cqhci_suspend
  12. cqhci_resume
  13. cqhci_enable
  14. cqhci_read_ctl
  15. cqhci_off
  16. cqhci_disable
  17. cqhci_prep_task_desc
  18. cqhci_dma_map
  19. cqhci_set_tran_desc
  20. cqhci_prep_tran_desc
  21. cqhci_prep_dcmd_desc
  22. cqhci_post_req
  23. cqhci_tag
  24. cqhci_request
  25. cqhci_recovery_needed
  26. cqhci_error_flags
  27. cqhci_error_irq
  28. cqhci_finish_mrq
  29. cqhci_irq
  30. cqhci_is_idle
  31. cqhci_wait_for_idle
  32. cqhci_timeout
  33. cqhci_tasks_cleared
  34. cqhci_clear_all_tasks
  35. cqhci_halted
  36. cqhci_halt
  37. cqhci_recovery_start
  38. cqhci_error_from_flags
  39. cqhci_recover_mrq
  40. cqhci_recover_mrqs
  41. cqhci_recovery_finish
  42. cqhci_pltfm_init
  43. cqhci_ver_major
  44. cqhci_ver_minor
  45. cqhci_init

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /* Copyright (c) 2015, The Linux Foundation. All rights reserved.
   3  */
   4 
   5 #include <linux/delay.h>
   6 #include <linux/highmem.h>
   7 #include <linux/io.h>
   8 #include <linux/iopoll.h>
   9 #include <linux/module.h>
  10 #include <linux/dma-mapping.h>
  11 #include <linux/slab.h>
  12 #include <linux/scatterlist.h>
  13 #include <linux/platform_device.h>
  14 #include <linux/ktime.h>
  15 
  16 #include <linux/mmc/mmc.h>
  17 #include <linux/mmc/host.h>
  18 #include <linux/mmc/card.h>
  19 
  20 #include "cqhci.h"
  21 
  22 #define DCMD_SLOT 31
  23 #define NUM_SLOTS 32
  24 
  25 struct cqhci_slot {
  26         struct mmc_request *mrq;
  27         unsigned int flags;
  28 #define CQHCI_EXTERNAL_TIMEOUT  BIT(0)
  29 #define CQHCI_COMPLETED         BIT(1)
  30 #define CQHCI_HOST_CRC          BIT(2)
  31 #define CQHCI_HOST_TIMEOUT      BIT(3)
  32 #define CQHCI_HOST_OTHER        BIT(4)
  33 };
  34 
  35 static inline u8 *get_desc(struct cqhci_host *cq_host, u8 tag)
  36 {
  37         return cq_host->desc_base + (tag * cq_host->slot_sz);
  38 }
  39 
  40 static inline u8 *get_link_desc(struct cqhci_host *cq_host, u8 tag)
  41 {
  42         u8 *desc = get_desc(cq_host, tag);
  43 
  44         return desc + cq_host->task_desc_len;
  45 }
  46 
  47 static inline dma_addr_t get_trans_desc_dma(struct cqhci_host *cq_host, u8 tag)
  48 {
  49         return cq_host->trans_desc_dma_base +
  50                 (cq_host->mmc->max_segs * tag *
  51                  cq_host->trans_desc_len);
  52 }
  53 
  54 static inline u8 *get_trans_desc(struct cqhci_host *cq_host, u8 tag)
  55 {
  56         return cq_host->trans_desc_base +
  57                 (cq_host->trans_desc_len * cq_host->mmc->max_segs * tag);
  58 }
  59 
  60 static void setup_trans_desc(struct cqhci_host *cq_host, u8 tag)
  61 {
  62         u8 *link_temp;
  63         dma_addr_t trans_temp;
  64 
  65         link_temp = get_link_desc(cq_host, tag);
  66         trans_temp = get_trans_desc_dma(cq_host, tag);
  67 
  68         memset(link_temp, 0, cq_host->link_desc_len);
  69         if (cq_host->link_desc_len > 8)
  70                 *(link_temp + 8) = 0;
  71 
  72         if (tag == DCMD_SLOT && (cq_host->mmc->caps2 & MMC_CAP2_CQE_DCMD)) {
  73                 *link_temp = CQHCI_VALID(0) | CQHCI_ACT(0) | CQHCI_END(1);
  74                 return;
  75         }
  76 
  77         *link_temp = CQHCI_VALID(1) | CQHCI_ACT(0x6) | CQHCI_END(0);
  78 
  79         if (cq_host->dma64) {
  80                 __le64 *data_addr = (__le64 __force *)(link_temp + 4);
  81 
  82                 data_addr[0] = cpu_to_le64(trans_temp);
  83         } else {
  84                 __le32 *data_addr = (__le32 __force *)(link_temp + 4);
  85 
  86                 data_addr[0] = cpu_to_le32(trans_temp);
  87         }
  88 }
  89 
  90 static void cqhci_set_irqs(struct cqhci_host *cq_host, u32 set)
  91 {
  92         cqhci_writel(cq_host, set, CQHCI_ISTE);
  93         cqhci_writel(cq_host, set, CQHCI_ISGE);
  94 }
  95 
  96 #define DRV_NAME "cqhci"
  97 
  98 #define CQHCI_DUMP(f, x...) \
  99         pr_err("%s: " DRV_NAME ": " f, mmc_hostname(mmc), ## x)
 100 
 101 static void cqhci_dumpregs(struct cqhci_host *cq_host)
 102 {
 103         struct mmc_host *mmc = cq_host->mmc;
 104 
 105         CQHCI_DUMP("============ CQHCI REGISTER DUMP ===========\n");
 106 
 107         CQHCI_DUMP("Caps:      0x%08x | Version:  0x%08x\n",
 108                    cqhci_readl(cq_host, CQHCI_CAP),
 109                    cqhci_readl(cq_host, CQHCI_VER));
 110         CQHCI_DUMP("Config:    0x%08x | Control:  0x%08x\n",
 111                    cqhci_readl(cq_host, CQHCI_CFG),
 112                    cqhci_readl(cq_host, CQHCI_CTL));
 113         CQHCI_DUMP("Int stat:  0x%08x | Int enab: 0x%08x\n",
 114                    cqhci_readl(cq_host, CQHCI_IS),
 115                    cqhci_readl(cq_host, CQHCI_ISTE));
 116         CQHCI_DUMP("Int sig:   0x%08x | Int Coal: 0x%08x\n",
 117                    cqhci_readl(cq_host, CQHCI_ISGE),
 118                    cqhci_readl(cq_host, CQHCI_IC));
 119         CQHCI_DUMP("TDL base:  0x%08x | TDL up32: 0x%08x\n",
 120                    cqhci_readl(cq_host, CQHCI_TDLBA),
 121                    cqhci_readl(cq_host, CQHCI_TDLBAU));
 122         CQHCI_DUMP("Doorbell:  0x%08x | TCN:      0x%08x\n",
 123                    cqhci_readl(cq_host, CQHCI_TDBR),
 124                    cqhci_readl(cq_host, CQHCI_TCN));
 125         CQHCI_DUMP("Dev queue: 0x%08x | Dev Pend: 0x%08x\n",
 126                    cqhci_readl(cq_host, CQHCI_DQS),
 127                    cqhci_readl(cq_host, CQHCI_DPT));
 128         CQHCI_DUMP("Task clr:  0x%08x | SSC1:     0x%08x\n",
 129                    cqhci_readl(cq_host, CQHCI_TCLR),
 130                    cqhci_readl(cq_host, CQHCI_SSC1));
 131         CQHCI_DUMP("SSC2:      0x%08x | DCMD rsp: 0x%08x\n",
 132                    cqhci_readl(cq_host, CQHCI_SSC2),
 133                    cqhci_readl(cq_host, CQHCI_CRDCT));
 134         CQHCI_DUMP("RED mask:  0x%08x | TERRI:    0x%08x\n",
 135                    cqhci_readl(cq_host, CQHCI_RMEM),
 136                    cqhci_readl(cq_host, CQHCI_TERRI));
 137         CQHCI_DUMP("Resp idx:  0x%08x | Resp arg: 0x%08x\n",
 138                    cqhci_readl(cq_host, CQHCI_CRI),
 139                    cqhci_readl(cq_host, CQHCI_CRA));
 140 
 141         if (cq_host->ops->dumpregs)
 142                 cq_host->ops->dumpregs(mmc);
 143         else
 144                 CQHCI_DUMP(": ===========================================\n");
 145 }
 146 
 147 /**
 148  * The allocated descriptor table for task, link & transfer descritors
 149  * looks like:
 150  * |----------|
 151  * |task desc |  |->|----------|
 152  * |----------|  |  |trans desc|
 153  * |link desc-|->|  |----------|
 154  * |----------|          .
 155  *      .                .
 156  *  no. of slots      max-segs
 157  *      .           |----------|
 158  * |----------|
 159  * The idea here is to create the [task+trans] table and mark & point the
 160  * link desc to the transfer desc table on a per slot basis.
 161  */
 162 static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
 163 {
 164         int i = 0;
 165 
 166         /* task descriptor can be 64/128 bit irrespective of arch */
 167         if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) {
 168                 cqhci_writel(cq_host, cqhci_readl(cq_host, CQHCI_CFG) |
 169                                CQHCI_TASK_DESC_SZ, CQHCI_CFG);
 170                 cq_host->task_desc_len = 16;
 171         } else {
 172                 cq_host->task_desc_len = 8;
 173         }
 174 
 175         /*
 176          * 96 bits length of transfer desc instead of 128 bits which means
 177          * ADMA would expect next valid descriptor at the 96th bit
 178          * or 128th bit
 179          */
 180         if (cq_host->dma64) {
 181                 if (cq_host->quirks & CQHCI_QUIRK_SHORT_TXFR_DESC_SZ)
 182                         cq_host->trans_desc_len = 12;
 183                 else
 184                         cq_host->trans_desc_len = 16;
 185                 cq_host->link_desc_len = 16;
 186         } else {
 187                 cq_host->trans_desc_len = 8;
 188                 cq_host->link_desc_len = 8;
 189         }
 190 
 191         /* total size of a slot: 1 task & 1 transfer (link) */
 192         cq_host->slot_sz = cq_host->task_desc_len + cq_host->link_desc_len;
 193 
 194         cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots;
 195 
 196         cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs *
 197                 cq_host->mmc->cqe_qdepth;
 198 
 199         pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n",
 200                  mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size,
 201                  cq_host->slot_sz);
 202 
 203         /*
 204          * allocate a dma-mapped chunk of memory for the descriptors
 205          * allocate a dma-mapped chunk of memory for link descriptors
 206          * setup each link-desc memory offset per slot-number to
 207          * the descriptor table.
 208          */
 209         cq_host->desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
 210                                                  cq_host->desc_size,
 211                                                  &cq_host->desc_dma_base,
 212                                                  GFP_KERNEL);
 213         if (!cq_host->desc_base)
 214                 return -ENOMEM;
 215 
 216         cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
 217                                               cq_host->data_size,
 218                                               &cq_host->trans_desc_dma_base,
 219                                               GFP_KERNEL);
 220         if (!cq_host->trans_desc_base) {
 221                 dmam_free_coherent(mmc_dev(cq_host->mmc), cq_host->desc_size,
 222                                    cq_host->desc_base,
 223                                    cq_host->desc_dma_base);
 224                 cq_host->desc_base = NULL;
 225                 cq_host->desc_dma_base = 0;
 226                 return -ENOMEM;
 227         }
 228 
 229         pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
 230                  mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base,
 231                 (unsigned long long)cq_host->desc_dma_base,
 232                 (unsigned long long)cq_host->trans_desc_dma_base);
 233 
 234         for (; i < (cq_host->num_slots); i++)
 235                 setup_trans_desc(cq_host, i);
 236 
 237         return 0;
 238 }
 239 
 240 static void __cqhci_enable(struct cqhci_host *cq_host)
 241 {
 242         struct mmc_host *mmc = cq_host->mmc;
 243         u32 cqcfg;
 244 
 245         cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
 246 
 247         /* Configuration must not be changed while enabled */
 248         if (cqcfg & CQHCI_ENABLE) {
 249                 cqcfg &= ~CQHCI_ENABLE;
 250                 cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
 251         }
 252 
 253         cqcfg &= ~(CQHCI_DCMD | CQHCI_TASK_DESC_SZ);
 254 
 255         if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
 256                 cqcfg |= CQHCI_DCMD;
 257 
 258         if (cq_host->caps & CQHCI_TASK_DESC_SZ_128)
 259                 cqcfg |= CQHCI_TASK_DESC_SZ;
 260 
 261         cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
 262 
 263         cqhci_writel(cq_host, lower_32_bits(cq_host->desc_dma_base),
 264                      CQHCI_TDLBA);
 265         cqhci_writel(cq_host, upper_32_bits(cq_host->desc_dma_base),
 266                      CQHCI_TDLBAU);
 267 
 268         cqhci_writel(cq_host, cq_host->rca, CQHCI_SSC2);
 269 
 270         cqhci_set_irqs(cq_host, 0);
 271 
 272         cqcfg |= CQHCI_ENABLE;
 273 
 274         cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
 275 
 276         mmc->cqe_on = true;
 277 
 278         if (cq_host->ops->enable)
 279                 cq_host->ops->enable(mmc);
 280 
 281         /* Ensure all writes are done before interrupts are enabled */
 282         wmb();
 283 
 284         cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
 285 
 286         cq_host->activated = true;
 287 }
 288 
 289 static void __cqhci_disable(struct cqhci_host *cq_host)
 290 {
 291         u32 cqcfg;
 292 
 293         cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
 294         cqcfg &= ~CQHCI_ENABLE;
 295         cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
 296 
 297         cq_host->mmc->cqe_on = false;
 298 
 299         cq_host->activated = false;
 300 }
 301 
 302 int cqhci_suspend(struct mmc_host *mmc)
 303 {
 304         struct cqhci_host *cq_host = mmc->cqe_private;
 305 
 306         if (cq_host->enabled)
 307                 __cqhci_disable(cq_host);
 308 
 309         return 0;
 310 }
 311 EXPORT_SYMBOL(cqhci_suspend);
 312 
 313 int cqhci_resume(struct mmc_host *mmc)
 314 {
 315         /* Re-enable is done upon first request */
 316         return 0;
 317 }
 318 EXPORT_SYMBOL(cqhci_resume);
 319 
 320 static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card)
 321 {
 322         struct cqhci_host *cq_host = mmc->cqe_private;
 323         int err;
 324 
 325         if (cq_host->enabled)
 326                 return 0;
 327 
 328         cq_host->rca = card->rca;
 329 
 330         err = cqhci_host_alloc_tdl(cq_host);
 331         if (err)
 332                 return err;
 333 
 334         __cqhci_enable(cq_host);
 335 
 336         cq_host->enabled = true;
 337 
 338 #ifdef DEBUG
 339         cqhci_dumpregs(cq_host);
 340 #endif
 341         return 0;
 342 }
 343 
 344 /* CQHCI is idle and should halt immediately, so set a small timeout */
 345 #define CQHCI_OFF_TIMEOUT 100
 346 
 347 static u32 cqhci_read_ctl(struct cqhci_host *cq_host)
 348 {
 349         return cqhci_readl(cq_host, CQHCI_CTL);
 350 }
 351 
 352 static void cqhci_off(struct mmc_host *mmc)
 353 {
 354         struct cqhci_host *cq_host = mmc->cqe_private;
 355         u32 reg;
 356         int err;
 357 
 358         if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt)
 359                 return;
 360 
 361         if (cq_host->ops->disable)
 362                 cq_host->ops->disable(mmc, false);
 363 
 364         cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL);
 365 
 366         err = readx_poll_timeout(cqhci_read_ctl, cq_host, reg,
 367                                  reg & CQHCI_HALT, 0, CQHCI_OFF_TIMEOUT);
 368         if (err < 0)
 369                 pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc));
 370         else
 371                 pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc));
 372 
 373         mmc->cqe_on = false;
 374 }
 375 
 376 static void cqhci_disable(struct mmc_host *mmc)
 377 {
 378         struct cqhci_host *cq_host = mmc->cqe_private;
 379 
 380         if (!cq_host->enabled)
 381                 return;
 382 
 383         cqhci_off(mmc);
 384 
 385         __cqhci_disable(cq_host);
 386 
 387         dmam_free_coherent(mmc_dev(mmc), cq_host->data_size,
 388                            cq_host->trans_desc_base,
 389                            cq_host->trans_desc_dma_base);
 390 
 391         dmam_free_coherent(mmc_dev(mmc), cq_host->desc_size,
 392                            cq_host->desc_base,
 393                            cq_host->desc_dma_base);
 394 
 395         cq_host->trans_desc_base = NULL;
 396         cq_host->desc_base = NULL;
 397 
 398         cq_host->enabled = false;
 399 }
 400 
 401 static void cqhci_prep_task_desc(struct mmc_request *mrq,
 402                                         u64 *data, bool intr)
 403 {
 404         u32 req_flags = mrq->data->flags;
 405 
 406         *data = CQHCI_VALID(1) |
 407                 CQHCI_END(1) |
 408                 CQHCI_INT(intr) |
 409                 CQHCI_ACT(0x5) |
 410                 CQHCI_FORCED_PROG(!!(req_flags & MMC_DATA_FORCED_PRG)) |
 411                 CQHCI_DATA_TAG(!!(req_flags & MMC_DATA_DAT_TAG)) |
 412                 CQHCI_DATA_DIR(!!(req_flags & MMC_DATA_READ)) |
 413                 CQHCI_PRIORITY(!!(req_flags & MMC_DATA_PRIO)) |
 414                 CQHCI_QBAR(!!(req_flags & MMC_DATA_QBR)) |
 415                 CQHCI_REL_WRITE(!!(req_flags & MMC_DATA_REL_WR)) |
 416                 CQHCI_BLK_COUNT(mrq->data->blocks) |
 417                 CQHCI_BLK_ADDR((u64)mrq->data->blk_addr);
 418 
 419         pr_debug("%s: cqhci: tag %d task descriptor 0x016%llx\n",
 420                  mmc_hostname(mrq->host), mrq->tag, (unsigned long long)*data);
 421 }
 422 
 423 static int cqhci_dma_map(struct mmc_host *host, struct mmc_request *mrq)
 424 {
 425         int sg_count;
 426         struct mmc_data *data = mrq->data;
 427 
 428         if (!data)
 429                 return -EINVAL;
 430 
 431         sg_count = dma_map_sg(mmc_dev(host), data->sg,
 432                               data->sg_len,
 433                               (data->flags & MMC_DATA_WRITE) ?
 434                               DMA_TO_DEVICE : DMA_FROM_DEVICE);
 435         if (!sg_count) {
 436                 pr_err("%s: sg-len: %d\n", __func__, data->sg_len);
 437                 return -ENOMEM;
 438         }
 439 
 440         return sg_count;
 441 }
 442 
 443 static void cqhci_set_tran_desc(u8 *desc, dma_addr_t addr, int len, bool end,
 444                                 bool dma64)
 445 {
 446         __le32 *attr = (__le32 __force *)desc;
 447 
 448         *attr = (CQHCI_VALID(1) |
 449                  CQHCI_END(end ? 1 : 0) |
 450                  CQHCI_INT(0) |
 451                  CQHCI_ACT(0x4) |
 452                  CQHCI_DAT_LENGTH(len));
 453 
 454         if (dma64) {
 455                 __le64 *dataddr = (__le64 __force *)(desc + 4);
 456 
 457                 dataddr[0] = cpu_to_le64(addr);
 458         } else {
 459                 __le32 *dataddr = (__le32 __force *)(desc + 4);
 460 
 461                 dataddr[0] = cpu_to_le32(addr);
 462         }
 463 }
 464 
 465 static int cqhci_prep_tran_desc(struct mmc_request *mrq,
 466                                struct cqhci_host *cq_host, int tag)
 467 {
 468         struct mmc_data *data = mrq->data;
 469         int i, sg_count, len;
 470         bool end = false;
 471         bool dma64 = cq_host->dma64;
 472         dma_addr_t addr;
 473         u8 *desc;
 474         struct scatterlist *sg;
 475 
 476         sg_count = cqhci_dma_map(mrq->host, mrq);
 477         if (sg_count < 0) {
 478                 pr_err("%s: %s: unable to map sg lists, %d\n",
 479                                 mmc_hostname(mrq->host), __func__, sg_count);
 480                 return sg_count;
 481         }
 482 
 483         desc = get_trans_desc(cq_host, tag);
 484 
 485         for_each_sg(data->sg, sg, sg_count, i) {
 486                 addr = sg_dma_address(sg);
 487                 len = sg_dma_len(sg);
 488 
 489                 if ((i+1) == sg_count)
 490                         end = true;
 491                 cqhci_set_tran_desc(desc, addr, len, end, dma64);
 492                 desc += cq_host->trans_desc_len;
 493         }
 494 
 495         return 0;
 496 }
 497 
 498 static void cqhci_prep_dcmd_desc(struct mmc_host *mmc,
 499                                    struct mmc_request *mrq)
 500 {
 501         u64 *task_desc = NULL;
 502         u64 data = 0;
 503         u8 resp_type;
 504         u8 *desc;
 505         __le64 *dataddr;
 506         struct cqhci_host *cq_host = mmc->cqe_private;
 507         u8 timing;
 508 
 509         if (!(mrq->cmd->flags & MMC_RSP_PRESENT)) {
 510                 resp_type = 0x0;
 511                 timing = 0x1;
 512         } else {
 513                 if (mrq->cmd->flags & MMC_RSP_R1B) {
 514                         resp_type = 0x3;
 515                         timing = 0x0;
 516                 } else {
 517                         resp_type = 0x2;
 518                         timing = 0x1;
 519                 }
 520         }
 521 
 522         task_desc = (__le64 __force *)get_desc(cq_host, cq_host->dcmd_slot);
 523         memset(task_desc, 0, cq_host->task_desc_len);
 524         data |= (CQHCI_VALID(1) |
 525                  CQHCI_END(1) |
 526                  CQHCI_INT(1) |
 527                  CQHCI_QBAR(1) |
 528                  CQHCI_ACT(0x5) |
 529                  CQHCI_CMD_INDEX(mrq->cmd->opcode) |
 530                  CQHCI_CMD_TIMING(timing) | CQHCI_RESP_TYPE(resp_type));
 531         if (cq_host->ops->update_dcmd_desc)
 532                 cq_host->ops->update_dcmd_desc(mmc, mrq, &data);
 533         *task_desc |= data;
 534         desc = (u8 *)task_desc;
 535         pr_debug("%s: cqhci: dcmd: cmd: %d timing: %d resp: %d\n",
 536                  mmc_hostname(mmc), mrq->cmd->opcode, timing, resp_type);
 537         dataddr = (__le64 __force *)(desc + 4);
 538         dataddr[0] = cpu_to_le64((u64)mrq->cmd->arg);
 539 
 540 }
 541 
 542 static void cqhci_post_req(struct mmc_host *host, struct mmc_request *mrq)
 543 {
 544         struct mmc_data *data = mrq->data;
 545 
 546         if (data) {
 547                 dma_unmap_sg(mmc_dev(host), data->sg, data->sg_len,
 548                              (data->flags & MMC_DATA_READ) ?
 549                              DMA_FROM_DEVICE : DMA_TO_DEVICE);
 550         }
 551 }
 552 
 553 static inline int cqhci_tag(struct mmc_request *mrq)
 554 {
 555         return mrq->cmd ? DCMD_SLOT : mrq->tag;
 556 }
 557 
 558 static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
 559 {
 560         int err = 0;
 561         u64 data = 0;
 562         u64 *task_desc = NULL;
 563         int tag = cqhci_tag(mrq);
 564         struct cqhci_host *cq_host = mmc->cqe_private;
 565         unsigned long flags;
 566 
 567         if (!cq_host->enabled) {
 568                 pr_err("%s: cqhci: not enabled\n", mmc_hostname(mmc));
 569                 return -EINVAL;
 570         }
 571 
 572         /* First request after resume has to re-enable */
 573         if (!cq_host->activated)
 574                 __cqhci_enable(cq_host);
 575 
 576         if (!mmc->cqe_on) {
 577                 cqhci_writel(cq_host, 0, CQHCI_CTL);
 578                 mmc->cqe_on = true;
 579                 pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc));
 580                 if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) {
 581                         pr_err("%s: cqhci: CQE failed to exit halt state\n",
 582                                mmc_hostname(mmc));
 583                 }
 584                 if (cq_host->ops->enable)
 585                         cq_host->ops->enable(mmc);
 586         }
 587 
 588         if (mrq->data) {
 589                 task_desc = (__le64 __force *)get_desc(cq_host, tag);
 590                 cqhci_prep_task_desc(mrq, &data, 1);
 591                 *task_desc = cpu_to_le64(data);
 592                 err = cqhci_prep_tran_desc(mrq, cq_host, tag);
 593                 if (err) {
 594                         pr_err("%s: cqhci: failed to setup tx desc: %d\n",
 595                                mmc_hostname(mmc), err);
 596                         return err;
 597                 }
 598         } else {
 599                 cqhci_prep_dcmd_desc(mmc, mrq);
 600         }
 601 
 602         spin_lock_irqsave(&cq_host->lock, flags);
 603 
 604         if (cq_host->recovery_halt) {
 605                 err = -EBUSY;
 606                 goto out_unlock;
 607         }
 608 
 609         cq_host->slot[tag].mrq = mrq;
 610         cq_host->slot[tag].flags = 0;
 611 
 612         cq_host->qcnt += 1;
 613         /* Make sure descriptors are ready before ringing the doorbell */
 614         wmb();
 615         cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR);
 616         if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag)))
 617                 pr_debug("%s: cqhci: doorbell not set for tag %d\n",
 618                          mmc_hostname(mmc), tag);
 619 out_unlock:
 620         spin_unlock_irqrestore(&cq_host->lock, flags);
 621 
 622         if (err)
 623                 cqhci_post_req(mmc, mrq);
 624 
 625         return err;
 626 }
 627 
 628 static void cqhci_recovery_needed(struct mmc_host *mmc, struct mmc_request *mrq,
 629                                   bool notify)
 630 {
 631         struct cqhci_host *cq_host = mmc->cqe_private;
 632 
 633         if (!cq_host->recovery_halt) {
 634                 cq_host->recovery_halt = true;
 635                 pr_debug("%s: cqhci: recovery needed\n", mmc_hostname(mmc));
 636                 wake_up(&cq_host->wait_queue);
 637                 if (notify && mrq->recovery_notifier)
 638                         mrq->recovery_notifier(mrq);
 639         }
 640 }
 641 
 642 static unsigned int cqhci_error_flags(int error1, int error2)
 643 {
 644         int error = error1 ? error1 : error2;
 645 
 646         switch (error) {
 647         case -EILSEQ:
 648                 return CQHCI_HOST_CRC;
 649         case -ETIMEDOUT:
 650                 return CQHCI_HOST_TIMEOUT;
 651         default:
 652                 return CQHCI_HOST_OTHER;
 653         }
 654 }
 655 
 656 static void cqhci_error_irq(struct mmc_host *mmc, u32 status, int cmd_error,
 657                             int data_error)
 658 {
 659         struct cqhci_host *cq_host = mmc->cqe_private;
 660         struct cqhci_slot *slot;
 661         u32 terri;
 662         int tag;
 663 
 664         spin_lock(&cq_host->lock);
 665 
 666         terri = cqhci_readl(cq_host, CQHCI_TERRI);
 667 
 668         pr_debug("%s: cqhci: error IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
 669                  mmc_hostname(mmc), status, cmd_error, data_error, terri);
 670 
 671         /* Forget about errors when recovery has already been triggered */
 672         if (cq_host->recovery_halt)
 673                 goto out_unlock;
 674 
 675         if (!cq_host->qcnt) {
 676                 WARN_ONCE(1, "%s: cqhci: error when idle. IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
 677                           mmc_hostname(mmc), status, cmd_error, data_error,
 678                           terri);
 679                 goto out_unlock;
 680         }
 681 
 682         if (CQHCI_TERRI_C_VALID(terri)) {
 683                 tag = CQHCI_TERRI_C_TASK(terri);
 684                 slot = &cq_host->slot[tag];
 685                 if (slot->mrq) {
 686                         slot->flags = cqhci_error_flags(cmd_error, data_error);
 687                         cqhci_recovery_needed(mmc, slot->mrq, true);
 688                 }
 689         }
 690 
 691         if (CQHCI_TERRI_D_VALID(terri)) {
 692                 tag = CQHCI_TERRI_D_TASK(terri);
 693                 slot = &cq_host->slot[tag];
 694                 if (slot->mrq) {
 695                         slot->flags = cqhci_error_flags(data_error, cmd_error);
 696                         cqhci_recovery_needed(mmc, slot->mrq, true);
 697                 }
 698         }
 699 
 700         if (!cq_host->recovery_halt) {
 701                 /*
 702                  * The only way to guarantee forward progress is to mark at
 703                  * least one task in error, so if none is indicated, pick one.
 704                  */
 705                 for (tag = 0; tag < NUM_SLOTS; tag++) {
 706                         slot = &cq_host->slot[tag];
 707                         if (!slot->mrq)
 708                                 continue;
 709                         slot->flags = cqhci_error_flags(data_error, cmd_error);
 710                         cqhci_recovery_needed(mmc, slot->mrq, true);
 711                         break;
 712                 }
 713         }
 714 
 715 out_unlock:
 716         spin_unlock(&cq_host->lock);
 717 }
 718 
 719 static void cqhci_finish_mrq(struct mmc_host *mmc, unsigned int tag)
 720 {
 721         struct cqhci_host *cq_host = mmc->cqe_private;
 722         struct cqhci_slot *slot = &cq_host->slot[tag];
 723         struct mmc_request *mrq = slot->mrq;
 724         struct mmc_data *data;
 725 
 726         if (!mrq) {
 727                 WARN_ONCE(1, "%s: cqhci: spurious TCN for tag %d\n",
 728                           mmc_hostname(mmc), tag);
 729                 return;
 730         }
 731 
 732         /* No completions allowed during recovery */
 733         if (cq_host->recovery_halt) {
 734                 slot->flags |= CQHCI_COMPLETED;
 735                 return;
 736         }
 737 
 738         slot->mrq = NULL;
 739 
 740         cq_host->qcnt -= 1;
 741 
 742         data = mrq->data;
 743         if (data) {
 744                 if (data->error)
 745                         data->bytes_xfered = 0;
 746                 else
 747                         data->bytes_xfered = data->blksz * data->blocks;
 748         }
 749 
 750         mmc_cqe_request_done(mmc, mrq);
 751 }
 752 
 753 irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
 754                       int data_error)
 755 {
 756         u32 status;
 757         unsigned long tag = 0, comp_status;
 758         struct cqhci_host *cq_host = mmc->cqe_private;
 759 
 760         status = cqhci_readl(cq_host, CQHCI_IS);
 761         cqhci_writel(cq_host, status, CQHCI_IS);
 762 
 763         pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc), status);
 764 
 765         if ((status & CQHCI_IS_RED) || cmd_error || data_error)
 766                 cqhci_error_irq(mmc, status, cmd_error, data_error);
 767 
 768         if (status & CQHCI_IS_TCC) {
 769                 /* read TCN and complete the request */
 770                 comp_status = cqhci_readl(cq_host, CQHCI_TCN);
 771                 cqhci_writel(cq_host, comp_status, CQHCI_TCN);
 772                 pr_debug("%s: cqhci: TCN: 0x%08lx\n",
 773                          mmc_hostname(mmc), comp_status);
 774 
 775                 spin_lock(&cq_host->lock);
 776 
 777                 for_each_set_bit(tag, &comp_status, cq_host->num_slots) {
 778                         /* complete the corresponding mrq */
 779                         pr_debug("%s: cqhci: completing tag %lu\n",
 780                                  mmc_hostname(mmc), tag);
 781                         cqhci_finish_mrq(mmc, tag);
 782                 }
 783 
 784                 if (cq_host->waiting_for_idle && !cq_host->qcnt) {
 785                         cq_host->waiting_for_idle = false;
 786                         wake_up(&cq_host->wait_queue);
 787                 }
 788 
 789                 spin_unlock(&cq_host->lock);
 790         }
 791 
 792         if (status & CQHCI_IS_TCL)
 793                 wake_up(&cq_host->wait_queue);
 794 
 795         if (status & CQHCI_IS_HAC)
 796                 wake_up(&cq_host->wait_queue);
 797 
 798         return IRQ_HANDLED;
 799 }
 800 EXPORT_SYMBOL(cqhci_irq);
 801 
 802 static bool cqhci_is_idle(struct cqhci_host *cq_host, int *ret)
 803 {
 804         unsigned long flags;
 805         bool is_idle;
 806 
 807         spin_lock_irqsave(&cq_host->lock, flags);
 808         is_idle = !cq_host->qcnt || cq_host->recovery_halt;
 809         *ret = cq_host->recovery_halt ? -EBUSY : 0;
 810         cq_host->waiting_for_idle = !is_idle;
 811         spin_unlock_irqrestore(&cq_host->lock, flags);
 812 
 813         return is_idle;
 814 }
 815 
 816 static int cqhci_wait_for_idle(struct mmc_host *mmc)
 817 {
 818         struct cqhci_host *cq_host = mmc->cqe_private;
 819         int ret;
 820 
 821         wait_event(cq_host->wait_queue, cqhci_is_idle(cq_host, &ret));
 822 
 823         return ret;
 824 }
 825 
 826 static bool cqhci_timeout(struct mmc_host *mmc, struct mmc_request *mrq,
 827                           bool *recovery_needed)
 828 {
 829         struct cqhci_host *cq_host = mmc->cqe_private;
 830         int tag = cqhci_tag(mrq);
 831         struct cqhci_slot *slot = &cq_host->slot[tag];
 832         unsigned long flags;
 833         bool timed_out;
 834 
 835         spin_lock_irqsave(&cq_host->lock, flags);
 836         timed_out = slot->mrq == mrq;
 837         if (timed_out) {
 838                 slot->flags |= CQHCI_EXTERNAL_TIMEOUT;
 839                 cqhci_recovery_needed(mmc, mrq, false);
 840                 *recovery_needed = cq_host->recovery_halt;
 841         }
 842         spin_unlock_irqrestore(&cq_host->lock, flags);
 843 
 844         if (timed_out) {
 845                 pr_err("%s: cqhci: timeout for tag %d\n",
 846                        mmc_hostname(mmc), tag);
 847                 cqhci_dumpregs(cq_host);
 848         }
 849 
 850         return timed_out;
 851 }
 852 
 853 static bool cqhci_tasks_cleared(struct cqhci_host *cq_host)
 854 {
 855         return !(cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_CLEAR_ALL_TASKS);
 856 }
 857 
 858 static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout)
 859 {
 860         struct cqhci_host *cq_host = mmc->cqe_private;
 861         bool ret;
 862         u32 ctl;
 863 
 864         cqhci_set_irqs(cq_host, CQHCI_IS_TCL);
 865 
 866         ctl = cqhci_readl(cq_host, CQHCI_CTL);
 867         ctl |= CQHCI_CLEAR_ALL_TASKS;
 868         cqhci_writel(cq_host, ctl, CQHCI_CTL);
 869 
 870         wait_event_timeout(cq_host->wait_queue, cqhci_tasks_cleared(cq_host),
 871                            msecs_to_jiffies(timeout) + 1);
 872 
 873         cqhci_set_irqs(cq_host, 0);
 874 
 875         ret = cqhci_tasks_cleared(cq_host);
 876 
 877         if (!ret)
 878                 pr_debug("%s: cqhci: Failed to clear tasks\n",
 879                          mmc_hostname(mmc));
 880 
 881         return ret;
 882 }
 883 
 884 static bool cqhci_halted(struct cqhci_host *cq_host)
 885 {
 886         return cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT;
 887 }
 888 
 889 static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
 890 {
 891         struct cqhci_host *cq_host = mmc->cqe_private;
 892         bool ret;
 893         u32 ctl;
 894 
 895         if (cqhci_halted(cq_host))
 896                 return true;
 897 
 898         cqhci_set_irqs(cq_host, CQHCI_IS_HAC);
 899 
 900         ctl = cqhci_readl(cq_host, CQHCI_CTL);
 901         ctl |= CQHCI_HALT;
 902         cqhci_writel(cq_host, ctl, CQHCI_CTL);
 903 
 904         wait_event_timeout(cq_host->wait_queue, cqhci_halted(cq_host),
 905                            msecs_to_jiffies(timeout) + 1);
 906 
 907         cqhci_set_irqs(cq_host, 0);
 908 
 909         ret = cqhci_halted(cq_host);
 910 
 911         if (!ret)
 912                 pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
 913 
 914         return ret;
 915 }
 916 
 917 /*
 918  * After halting we expect to be able to use the command line. We interpret the
 919  * failure to halt to mean the data lines might still be in use (and the upper
 920  * layers will need to send a STOP command), so we set the timeout based on a
 921  * generous command timeout.
 922  */
 923 #define CQHCI_START_HALT_TIMEOUT        5
 924 
 925 static void cqhci_recovery_start(struct mmc_host *mmc)
 926 {
 927         struct cqhci_host *cq_host = mmc->cqe_private;
 928 
 929         pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
 930 
 931         WARN_ON(!cq_host->recovery_halt);
 932 
 933         cqhci_halt(mmc, CQHCI_START_HALT_TIMEOUT);
 934 
 935         if (cq_host->ops->disable)
 936                 cq_host->ops->disable(mmc, true);
 937 
 938         mmc->cqe_on = false;
 939 }
 940 
 941 static int cqhci_error_from_flags(unsigned int flags)
 942 {
 943         if (!flags)
 944                 return 0;
 945 
 946         /* CRC errors might indicate re-tuning so prefer to report that */
 947         if (flags & CQHCI_HOST_CRC)
 948                 return -EILSEQ;
 949 
 950         if (flags & (CQHCI_EXTERNAL_TIMEOUT | CQHCI_HOST_TIMEOUT))
 951                 return -ETIMEDOUT;
 952 
 953         return -EIO;
 954 }
 955 
 956 static void cqhci_recover_mrq(struct cqhci_host *cq_host, unsigned int tag)
 957 {
 958         struct cqhci_slot *slot = &cq_host->slot[tag];
 959         struct mmc_request *mrq = slot->mrq;
 960         struct mmc_data *data;
 961 
 962         if (!mrq)
 963                 return;
 964 
 965         slot->mrq = NULL;
 966 
 967         cq_host->qcnt -= 1;
 968 
 969         data = mrq->data;
 970         if (data) {
 971                 data->bytes_xfered = 0;
 972                 data->error = cqhci_error_from_flags(slot->flags);
 973         } else {
 974                 mrq->cmd->error = cqhci_error_from_flags(slot->flags);
 975         }
 976 
 977         mmc_cqe_request_done(cq_host->mmc, mrq);
 978 }
 979 
 980 static void cqhci_recover_mrqs(struct cqhci_host *cq_host)
 981 {
 982         int i;
 983 
 984         for (i = 0; i < cq_host->num_slots; i++)
 985                 cqhci_recover_mrq(cq_host, i);
 986 }
 987 
 988 /*
 989  * By now the command and data lines should be unused so there is no reason for
 990  * CQHCI to take a long time to halt, but if it doesn't halt there could be
 991  * problems clearing tasks, so be generous.
 992  */
 993 #define CQHCI_FINISH_HALT_TIMEOUT       20
 994 
 995 /* CQHCI could be expected to clear it's internal state pretty quickly */
 996 #define CQHCI_CLEAR_TIMEOUT             20
 997 
 998 static void cqhci_recovery_finish(struct mmc_host *mmc)
 999 {
1000         struct cqhci_host *cq_host = mmc->cqe_private;
1001         unsigned long flags;
1002         u32 cqcfg;
1003         bool ok;
1004 
1005         pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
1006 
1007         WARN_ON(!cq_host->recovery_halt);
1008 
1009         ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
1010 
1011         if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
1012                 ok = false;
1013 
1014         /*
1015          * The specification contradicts itself, by saying that tasks cannot be
1016          * cleared if CQHCI does not halt, but if CQHCI does not halt, it should
1017          * be disabled/re-enabled, but not to disable before clearing tasks.
1018          * Have a go anyway.
1019          */
1020         if (!ok) {
1021                 pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc));
1022                 cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
1023                 cqcfg &= ~CQHCI_ENABLE;
1024                 cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
1025                 cqcfg |= CQHCI_ENABLE;
1026                 cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
1027                 /* Be sure that there are no tasks */
1028                 ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
1029                 if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
1030                         ok = false;
1031                 WARN_ON(!ok);
1032         }
1033 
1034         cqhci_recover_mrqs(cq_host);
1035 
1036         WARN_ON(cq_host->qcnt);
1037 
1038         spin_lock_irqsave(&cq_host->lock, flags);
1039         cq_host->qcnt = 0;
1040         cq_host->recovery_halt = false;
1041         mmc->cqe_on = false;
1042         spin_unlock_irqrestore(&cq_host->lock, flags);
1043 
1044         /* Ensure all writes are done before interrupts are re-enabled */
1045         wmb();
1046 
1047         cqhci_writel(cq_host, CQHCI_IS_HAC | CQHCI_IS_TCL, CQHCI_IS);
1048 
1049         cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
1050 
1051         pr_debug("%s: cqhci: recovery done\n", mmc_hostname(mmc));
1052 }
1053 
1054 static const struct mmc_cqe_ops cqhci_cqe_ops = {
1055         .cqe_enable = cqhci_enable,
1056         .cqe_disable = cqhci_disable,
1057         .cqe_request = cqhci_request,
1058         .cqe_post_req = cqhci_post_req,
1059         .cqe_off = cqhci_off,
1060         .cqe_wait_for_idle = cqhci_wait_for_idle,
1061         .cqe_timeout = cqhci_timeout,
1062         .cqe_recovery_start = cqhci_recovery_start,
1063         .cqe_recovery_finish = cqhci_recovery_finish,
1064 };
1065 
1066 struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev)
1067 {
1068         struct cqhci_host *cq_host;
1069         struct resource *cqhci_memres = NULL;
1070 
1071         /* check and setup CMDQ interface */
1072         cqhci_memres = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1073                                                    "cqhci_mem");
1074         if (!cqhci_memres) {
1075                 dev_dbg(&pdev->dev, "CMDQ not supported\n");
1076                 return ERR_PTR(-EINVAL);
1077         }
1078 
1079         cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL);
1080         if (!cq_host)
1081                 return ERR_PTR(-ENOMEM);
1082         cq_host->mmio = devm_ioremap(&pdev->dev,
1083                                      cqhci_memres->start,
1084                                      resource_size(cqhci_memres));
1085         if (!cq_host->mmio) {
1086                 dev_err(&pdev->dev, "failed to remap cqhci regs\n");
1087                 return ERR_PTR(-EBUSY);
1088         }
1089         dev_dbg(&pdev->dev, "CMDQ ioremap: done\n");
1090 
1091         return cq_host;
1092 }
1093 EXPORT_SYMBOL(cqhci_pltfm_init);
1094 
1095 static unsigned int cqhci_ver_major(struct cqhci_host *cq_host)
1096 {
1097         return CQHCI_VER_MAJOR(cqhci_readl(cq_host, CQHCI_VER));
1098 }
1099 
1100 static unsigned int cqhci_ver_minor(struct cqhci_host *cq_host)
1101 {
1102         u32 ver = cqhci_readl(cq_host, CQHCI_VER);
1103 
1104         return CQHCI_VER_MINOR1(ver) * 10 + CQHCI_VER_MINOR2(ver);
1105 }
1106 
1107 int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc,
1108               bool dma64)
1109 {
1110         int err;
1111 
1112         cq_host->dma64 = dma64;
1113         cq_host->mmc = mmc;
1114         cq_host->mmc->cqe_private = cq_host;
1115 
1116         cq_host->num_slots = NUM_SLOTS;
1117         cq_host->dcmd_slot = DCMD_SLOT;
1118 
1119         mmc->cqe_ops = &cqhci_cqe_ops;
1120 
1121         mmc->cqe_qdepth = NUM_SLOTS;
1122         if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
1123                 mmc->cqe_qdepth -= 1;
1124 
1125         cq_host->slot = devm_kcalloc(mmc_dev(mmc), cq_host->num_slots,
1126                                      sizeof(*cq_host->slot), GFP_KERNEL);
1127         if (!cq_host->slot) {
1128                 err = -ENOMEM;
1129                 goto out_err;
1130         }
1131 
1132         spin_lock_init(&cq_host->lock);
1133 
1134         init_completion(&cq_host->halt_comp);
1135         init_waitqueue_head(&cq_host->wait_queue);
1136 
1137         pr_info("%s: CQHCI version %u.%02u\n",
1138                 mmc_hostname(mmc), cqhci_ver_major(cq_host),
1139                 cqhci_ver_minor(cq_host));
1140 
1141         return 0;
1142 
1143 out_err:
1144         pr_err("%s: CQHCI version %u.%02u failed to initialize, error %d\n",
1145                mmc_hostname(mmc), cqhci_ver_major(cq_host),
1146                cqhci_ver_minor(cq_host), err);
1147         return err;
1148 }
1149 EXPORT_SYMBOL(cqhci_init);
1150 
1151 MODULE_AUTHOR("Venkat Gopalakrishnan <venkatg@codeaurora.org>");
1152 MODULE_DESCRIPTION("Command Queue Host Controller Interface driver");
1153 MODULE_LICENSE("GPL v2");

/* [<][>][^][v][top][bottom][index][help] */