1/* 2 * Copyright (C) 2013-2014 Renesas Electronics Europe Ltd. 3 * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of version 2 of the GNU General Public License as 7 * published by the Free Software Foundation. 8 */ 9 10#include <linux/clk.h> 11#include <linux/delay.h> 12#include <linux/device.h> 13#include <linux/dma-mapping.h> 14#include <linux/dmaengine.h> 15#include <linux/highmem.h> 16#include <linux/interrupt.h> 17#include <linux/io.h> 18#include <linux/log2.h> 19#include <linux/mmc/host.h> 20#include <linux/mmc/mmc.h> 21#include <linux/mmc/sd.h> 22#include <linux/mmc/sdio.h> 23#include <linux/module.h> 24#include <linux/pagemap.h> 25#include <linux/platform_device.h> 26#include <linux/scatterlist.h> 27#include <linux/string.h> 28#include <linux/time.h> 29#include <linux/virtio.h> 30#include <linux/workqueue.h> 31 32#define USDHI6_SD_CMD 0x0000 33#define USDHI6_SD_PORT_SEL 0x0004 34#define USDHI6_SD_ARG 0x0008 35#define USDHI6_SD_STOP 0x0010 36#define USDHI6_SD_SECCNT 0x0014 37#define USDHI6_SD_RSP10 0x0018 38#define USDHI6_SD_RSP32 0x0020 39#define USDHI6_SD_RSP54 0x0028 40#define USDHI6_SD_RSP76 0x0030 41#define USDHI6_SD_INFO1 0x0038 42#define USDHI6_SD_INFO2 0x003c 43#define USDHI6_SD_INFO1_MASK 0x0040 44#define USDHI6_SD_INFO2_MASK 0x0044 45#define USDHI6_SD_CLK_CTRL 0x0048 46#define USDHI6_SD_SIZE 0x004c 47#define USDHI6_SD_OPTION 0x0050 48#define USDHI6_SD_ERR_STS1 0x0058 49#define USDHI6_SD_ERR_STS2 0x005c 50#define USDHI6_SD_BUF0 0x0060 51#define USDHI6_SDIO_MODE 0x0068 52#define USDHI6_SDIO_INFO1 0x006c 53#define USDHI6_SDIO_INFO1_MASK 0x0070 54#define USDHI6_CC_EXT_MODE 0x01b0 55#define USDHI6_SOFT_RST 0x01c0 56#define USDHI6_VERSION 0x01c4 57#define USDHI6_HOST_MODE 0x01c8 58#define USDHI6_SDIF_MODE 0x01cc 59 60#define USDHI6_SD_CMD_APP 0x0040 61#define USDHI6_SD_CMD_MODE_RSP_AUTO 0x0000 62#define USDHI6_SD_CMD_MODE_RSP_NONE 0x0300 63#define USDHI6_SD_CMD_MODE_RSP_R1 0x0400 /* Also R5, R6, R7 */ 64#define USDHI6_SD_CMD_MODE_RSP_R1B 0x0500 /* R1b */ 65#define USDHI6_SD_CMD_MODE_RSP_R2 0x0600 66#define USDHI6_SD_CMD_MODE_RSP_R3 0x0700 /* Also R4 */ 67#define USDHI6_SD_CMD_DATA 0x0800 68#define USDHI6_SD_CMD_READ 0x1000 69#define USDHI6_SD_CMD_MULTI 0x2000 70#define USDHI6_SD_CMD_CMD12_AUTO_OFF 0x4000 71 72#define USDHI6_CC_EXT_MODE_SDRW BIT(1) 73 74#define USDHI6_SD_INFO1_RSP_END BIT(0) 75#define USDHI6_SD_INFO1_ACCESS_END BIT(2) 76#define USDHI6_SD_INFO1_CARD_OUT BIT(3) 77#define USDHI6_SD_INFO1_CARD_IN BIT(4) 78#define USDHI6_SD_INFO1_CD BIT(5) 79#define USDHI6_SD_INFO1_WP BIT(7) 80#define USDHI6_SD_INFO1_D3_CARD_OUT BIT(8) 81#define USDHI6_SD_INFO1_D3_CARD_IN BIT(9) 82 83#define USDHI6_SD_INFO2_CMD_ERR BIT(0) 84#define USDHI6_SD_INFO2_CRC_ERR BIT(1) 85#define USDHI6_SD_INFO2_END_ERR BIT(2) 86#define USDHI6_SD_INFO2_TOUT BIT(3) 87#define USDHI6_SD_INFO2_IWA_ERR BIT(4) 88#define USDHI6_SD_INFO2_IRA_ERR BIT(5) 89#define USDHI6_SD_INFO2_RSP_TOUT BIT(6) 90#define USDHI6_SD_INFO2_SDDAT0 BIT(7) 91#define USDHI6_SD_INFO2_BRE BIT(8) 92#define USDHI6_SD_INFO2_BWE BIT(9) 93#define USDHI6_SD_INFO2_SCLKDIVEN BIT(13) 94#define USDHI6_SD_INFO2_CBSY BIT(14) 95#define USDHI6_SD_INFO2_ILA BIT(15) 96 97#define USDHI6_SD_INFO1_CARD_INSERT (USDHI6_SD_INFO1_CARD_IN | USDHI6_SD_INFO1_D3_CARD_IN) 98#define USDHI6_SD_INFO1_CARD_EJECT (USDHI6_SD_INFO1_CARD_OUT | USDHI6_SD_INFO1_D3_CARD_OUT) 99#define USDHI6_SD_INFO1_CARD (USDHI6_SD_INFO1_CARD_INSERT | USDHI6_SD_INFO1_CARD_EJECT) 100#define USDHI6_SD_INFO1_CARD_CD (USDHI6_SD_INFO1_CARD_IN | USDHI6_SD_INFO1_CARD_OUT) 101 102#define USDHI6_SD_INFO2_ERR (USDHI6_SD_INFO2_CMD_ERR | \ 103 USDHI6_SD_INFO2_CRC_ERR | USDHI6_SD_INFO2_END_ERR | \ 104 USDHI6_SD_INFO2_TOUT | USDHI6_SD_INFO2_IWA_ERR | \ 105 USDHI6_SD_INFO2_IRA_ERR | USDHI6_SD_INFO2_RSP_TOUT | \ 106 USDHI6_SD_INFO2_ILA) 107 108#define USDHI6_SD_INFO1_IRQ (USDHI6_SD_INFO1_RSP_END | USDHI6_SD_INFO1_ACCESS_END | \ 109 USDHI6_SD_INFO1_CARD) 110 111#define USDHI6_SD_INFO2_IRQ (USDHI6_SD_INFO2_ERR | USDHI6_SD_INFO2_BRE | \ 112 USDHI6_SD_INFO2_BWE | 0x0800 | USDHI6_SD_INFO2_ILA) 113 114#define USDHI6_SD_CLK_CTRL_SCLKEN BIT(8) 115 116#define USDHI6_SD_STOP_STP BIT(0) 117#define USDHI6_SD_STOP_SEC BIT(8) 118 119#define USDHI6_SDIO_INFO1_IOIRQ BIT(0) 120#define USDHI6_SDIO_INFO1_EXPUB52 BIT(14) 121#define USDHI6_SDIO_INFO1_EXWT BIT(15) 122 123#define USDHI6_SD_ERR_STS1_CRC_NO_ERROR BIT(13) 124 125#define USDHI6_SOFT_RST_RESERVED (BIT(1) | BIT(2)) 126#define USDHI6_SOFT_RST_RESET BIT(0) 127 128#define USDHI6_SD_OPTION_TIMEOUT_SHIFT 4 129#define USDHI6_SD_OPTION_TIMEOUT_MASK (0xf << USDHI6_SD_OPTION_TIMEOUT_SHIFT) 130#define USDHI6_SD_OPTION_WIDTH_1 BIT(15) 131 132#define USDHI6_SD_PORT_SEL_PORTS_SHIFT 8 133 134#define USDHI6_SD_CLK_CTRL_DIV_MASK 0xff 135 136#define USDHI6_SDIO_INFO1_IRQ (USDHI6_SDIO_INFO1_IOIRQ | 3 | \ 137 USDHI6_SDIO_INFO1_EXPUB52 | USDHI6_SDIO_INFO1_EXWT) 138 139#define USDHI6_MIN_DMA 64 140 141enum usdhi6_wait_for { 142 USDHI6_WAIT_FOR_REQUEST, 143 USDHI6_WAIT_FOR_CMD, 144 USDHI6_WAIT_FOR_MREAD, 145 USDHI6_WAIT_FOR_MWRITE, 146 USDHI6_WAIT_FOR_READ, 147 USDHI6_WAIT_FOR_WRITE, 148 USDHI6_WAIT_FOR_DATA_END, 149 USDHI6_WAIT_FOR_STOP, 150 USDHI6_WAIT_FOR_DMA, 151}; 152 153struct usdhi6_page { 154 struct page *page; 155 void *mapped; /* mapped page */ 156}; 157 158struct usdhi6_host { 159 struct mmc_host *mmc; 160 struct mmc_request *mrq; 161 void __iomem *base; 162 struct clk *clk; 163 164 /* SG memory handling */ 165 166 /* Common for multiple and single block requests */ 167 struct usdhi6_page pg; /* current page from an SG */ 168 void *blk_page; /* either a mapped page, or the bounce buffer */ 169 size_t offset; /* offset within a page, including sg->offset */ 170 171 /* Blocks, crossing a page boundary */ 172 size_t head_len; 173 struct usdhi6_page head_pg; 174 175 /* A bounce buffer for unaligned blocks or blocks, crossing a page boundary */ 176 struct scatterlist bounce_sg; 177 u8 bounce_buf[512]; 178 179 /* Multiple block requests only */ 180 struct scatterlist *sg; /* current SG segment */ 181 int page_idx; /* page index within an SG segment */ 182 183 enum usdhi6_wait_for wait; 184 u32 status_mask; 185 u32 status2_mask; 186 u32 sdio_mask; 187 u32 io_error; 188 u32 irq_status; 189 unsigned long imclk; 190 unsigned long rate; 191 bool app_cmd; 192 193 /* Timeout handling */ 194 struct delayed_work timeout_work; 195 unsigned long timeout; 196 197 /* DMA support */ 198 struct dma_chan *chan_rx; 199 struct dma_chan *chan_tx; 200 bool dma_active; 201}; 202 203/* I/O primitives */ 204 205static void usdhi6_write(struct usdhi6_host *host, u32 reg, u32 data) 206{ 207 iowrite32(data, host->base + reg); 208 dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__, 209 host->base, reg, data); 210} 211 212static void usdhi6_write16(struct usdhi6_host *host, u32 reg, u16 data) 213{ 214 iowrite16(data, host->base + reg); 215 dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__, 216 host->base, reg, data); 217} 218 219static u32 usdhi6_read(struct usdhi6_host *host, u32 reg) 220{ 221 u32 data = ioread32(host->base + reg); 222 dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__, 223 host->base, reg, data); 224 return data; 225} 226 227static u16 usdhi6_read16(struct usdhi6_host *host, u32 reg) 228{ 229 u16 data = ioread16(host->base + reg); 230 dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__, 231 host->base, reg, data); 232 return data; 233} 234 235static void usdhi6_irq_enable(struct usdhi6_host *host, u32 info1, u32 info2) 236{ 237 host->status_mask = USDHI6_SD_INFO1_IRQ & ~info1; 238 host->status2_mask = USDHI6_SD_INFO2_IRQ & ~info2; 239 usdhi6_write(host, USDHI6_SD_INFO1_MASK, host->status_mask); 240 usdhi6_write(host, USDHI6_SD_INFO2_MASK, host->status2_mask); 241} 242 243static void usdhi6_wait_for_resp(struct usdhi6_host *host) 244{ 245 usdhi6_irq_enable(host, USDHI6_SD_INFO1_RSP_END | 246 USDHI6_SD_INFO1_ACCESS_END | USDHI6_SD_INFO1_CARD_CD, 247 USDHI6_SD_INFO2_ERR); 248} 249 250static void usdhi6_wait_for_brwe(struct usdhi6_host *host, bool read) 251{ 252 usdhi6_irq_enable(host, USDHI6_SD_INFO1_ACCESS_END | 253 USDHI6_SD_INFO1_CARD_CD, USDHI6_SD_INFO2_ERR | 254 (read ? USDHI6_SD_INFO2_BRE : USDHI6_SD_INFO2_BWE)); 255} 256 257static void usdhi6_only_cd(struct usdhi6_host *host) 258{ 259 /* Mask all except card hotplug */ 260 usdhi6_irq_enable(host, USDHI6_SD_INFO1_CARD_CD, 0); 261} 262 263static void usdhi6_mask_all(struct usdhi6_host *host) 264{ 265 usdhi6_irq_enable(host, 0, 0); 266} 267 268static int usdhi6_error_code(struct usdhi6_host *host) 269{ 270 u32 err; 271 272 usdhi6_write(host, USDHI6_SD_STOP, USDHI6_SD_STOP_STP); 273 274 if (host->io_error & 275 (USDHI6_SD_INFO2_RSP_TOUT | USDHI6_SD_INFO2_TOUT)) { 276 u32 rsp54 = usdhi6_read(host, USDHI6_SD_RSP54); 277 int opc = host->mrq ? host->mrq->cmd->opcode : -1; 278 279 err = usdhi6_read(host, USDHI6_SD_ERR_STS2); 280 /* Response timeout is often normal, don't spam the log */ 281 if (host->wait == USDHI6_WAIT_FOR_CMD) 282 dev_dbg(mmc_dev(host->mmc), 283 "T-out sts 0x%x, resp 0x%x, state %u, CMD%d\n", 284 err, rsp54, host->wait, opc); 285 else 286 dev_warn(mmc_dev(host->mmc), 287 "T-out sts 0x%x, resp 0x%x, state %u, CMD%d\n", 288 err, rsp54, host->wait, opc); 289 return -ETIMEDOUT; 290 } 291 292 err = usdhi6_read(host, USDHI6_SD_ERR_STS1); 293 if (err != USDHI6_SD_ERR_STS1_CRC_NO_ERROR) 294 dev_warn(mmc_dev(host->mmc), "Err sts 0x%x, state %u, CMD%d\n", 295 err, host->wait, host->mrq ? host->mrq->cmd->opcode : -1); 296 if (host->io_error & USDHI6_SD_INFO2_ILA) 297 return -EILSEQ; 298 299 return -EIO; 300} 301 302/* Scatter-Gather management */ 303 304/* 305 * In PIO mode we have to map each page separately, using kmap(). That way 306 * adjacent pages are mapped to non-adjacent virtual addresses. That's why we 307 * have to use a bounce buffer for blocks, crossing page boundaries. Such blocks 308 * have been observed with an SDIO WiFi card (b43 driver). 309 */ 310static void usdhi6_blk_bounce(struct usdhi6_host *host, 311 struct scatterlist *sg) 312{ 313 struct mmc_data *data = host->mrq->data; 314 size_t blk_head = host->head_len; 315 316 dev_dbg(mmc_dev(host->mmc), "%s(): CMD%u of %u SG: %ux%u @ 0x%x\n", 317 __func__, host->mrq->cmd->opcode, data->sg_len, 318 data->blksz, data->blocks, sg->offset); 319 320 host->head_pg.page = host->pg.page; 321 host->head_pg.mapped = host->pg.mapped; 322 host->pg.page = nth_page(host->pg.page, 1); 323 host->pg.mapped = kmap(host->pg.page); 324 325 host->blk_page = host->bounce_buf; 326 host->offset = 0; 327 328 if (data->flags & MMC_DATA_READ) 329 return; 330 331 memcpy(host->bounce_buf, host->head_pg.mapped + PAGE_SIZE - blk_head, 332 blk_head); 333 memcpy(host->bounce_buf + blk_head, host->pg.mapped, 334 data->blksz - blk_head); 335} 336 337/* Only called for multiple block IO */ 338static void usdhi6_sg_prep(struct usdhi6_host *host) 339{ 340 struct mmc_request *mrq = host->mrq; 341 struct mmc_data *data = mrq->data; 342 343 usdhi6_write(host, USDHI6_SD_SECCNT, data->blocks); 344 345 host->sg = data->sg; 346 /* TODO: if we always map, this is redundant */ 347 host->offset = host->sg->offset; 348} 349 350/* Map the first page in an SG segment: common for multiple and single block IO */ 351static void *usdhi6_sg_map(struct usdhi6_host *host) 352{ 353 struct mmc_data *data = host->mrq->data; 354 struct scatterlist *sg = data->sg_len > 1 ? host->sg : data->sg; 355 size_t head = PAGE_SIZE - sg->offset; 356 size_t blk_head = head % data->blksz; 357 358 WARN(host->pg.page, "%p not properly unmapped!\n", host->pg.page); 359 if (WARN(sg_dma_len(sg) % data->blksz, 360 "SG size %u isn't a multiple of block size %u\n", 361 sg_dma_len(sg), data->blksz)) 362 return NULL; 363 364 host->pg.page = sg_page(sg); 365 host->pg.mapped = kmap(host->pg.page); 366 host->offset = sg->offset; 367 368 /* 369 * Block size must be a power of 2 for multi-block transfers, 370 * therefore blk_head is equal for all pages in this SG 371 */ 372 host->head_len = blk_head; 373 374 if (head < data->blksz) 375 /* 376 * The first block in the SG crosses a page boundary. 377 * Max blksz = 512, so blocks can only span 2 pages 378 */ 379 usdhi6_blk_bounce(host, sg); 380 else 381 host->blk_page = host->pg.mapped; 382 383 dev_dbg(mmc_dev(host->mmc), "Mapped %p (%lx) at %p + %u for CMD%u @ 0x%p\n", 384 host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped, 385 sg->offset, host->mrq->cmd->opcode, host->mrq); 386 387 return host->blk_page + host->offset; 388} 389 390/* Unmap the current page: common for multiple and single block IO */ 391static void usdhi6_sg_unmap(struct usdhi6_host *host, bool force) 392{ 393 struct mmc_data *data = host->mrq->data; 394 struct page *page = host->head_pg.page; 395 396 if (page) { 397 /* Previous block was cross-page boundary */ 398 struct scatterlist *sg = data->sg_len > 1 ? 399 host->sg : data->sg; 400 size_t blk_head = host->head_len; 401 402 if (!data->error && data->flags & MMC_DATA_READ) { 403 memcpy(host->head_pg.mapped + PAGE_SIZE - blk_head, 404 host->bounce_buf, blk_head); 405 memcpy(host->pg.mapped, host->bounce_buf + blk_head, 406 data->blksz - blk_head); 407 } 408 409 flush_dcache_page(page); 410 kunmap(page); 411 412 host->head_pg.page = NULL; 413 414 if (!force && sg_dma_len(sg) + sg->offset > 415 (host->page_idx << PAGE_SHIFT) + data->blksz - blk_head) 416 /* More blocks in this SG, don't unmap the next page */ 417 return; 418 } 419 420 page = host->pg.page; 421 if (!page) 422 return; 423 424 flush_dcache_page(page); 425 kunmap(page); 426 427 host->pg.page = NULL; 428} 429 430/* Called from MMC_WRITE_MULTIPLE_BLOCK or MMC_READ_MULTIPLE_BLOCK */ 431static void usdhi6_sg_advance(struct usdhi6_host *host) 432{ 433 struct mmc_data *data = host->mrq->data; 434 size_t done, total; 435 436 /* New offset: set at the end of the previous block */ 437 if (host->head_pg.page) { 438 /* Finished a cross-page block, jump to the new page */ 439 host->page_idx++; 440 host->offset = data->blksz - host->head_len; 441 host->blk_page = host->pg.mapped; 442 usdhi6_sg_unmap(host, false); 443 } else { 444 host->offset += data->blksz; 445 /* The completed block didn't cross a page boundary */ 446 if (host->offset == PAGE_SIZE) { 447 /* If required, we'll map the page below */ 448 host->offset = 0; 449 host->page_idx++; 450 } 451 } 452 453 /* 454 * Now host->blk_page + host->offset point at the end of our last block 455 * and host->page_idx is the index of the page, in which our new block 456 * is located, if any 457 */ 458 459 done = (host->page_idx << PAGE_SHIFT) + host->offset; 460 total = host->sg->offset + sg_dma_len(host->sg); 461 462 dev_dbg(mmc_dev(host->mmc), "%s(): %zu of %zu @ %zu\n", __func__, 463 done, total, host->offset); 464 465 if (done < total && host->offset) { 466 /* More blocks in this page */ 467 if (host->offset + data->blksz > PAGE_SIZE) 468 /* We approached at a block, that spans 2 pages */ 469 usdhi6_blk_bounce(host, host->sg); 470 471 return; 472 } 473 474 /* Finished current page or an SG segment */ 475 usdhi6_sg_unmap(host, false); 476 477 if (done == total) { 478 /* 479 * End of an SG segment or the complete SG: jump to the next 480 * segment, we'll map it later in usdhi6_blk_read() or 481 * usdhi6_blk_write() 482 */ 483 struct scatterlist *next = sg_next(host->sg); 484 485 host->page_idx = 0; 486 487 if (!next) 488 host->wait = USDHI6_WAIT_FOR_DATA_END; 489 host->sg = next; 490 491 if (WARN(next && sg_dma_len(next) % data->blksz, 492 "SG size %u isn't a multiple of block size %u\n", 493 sg_dma_len(next), data->blksz)) 494 data->error = -EINVAL; 495 496 return; 497 } 498 499 /* We cannot get here after crossing a page border */ 500 501 /* Next page in the same SG */ 502 host->pg.page = nth_page(sg_page(host->sg), host->page_idx); 503 host->pg.mapped = kmap(host->pg.page); 504 host->blk_page = host->pg.mapped; 505 506 dev_dbg(mmc_dev(host->mmc), "Mapped %p (%lx) at %p for CMD%u @ 0x%p\n", 507 host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped, 508 host->mrq->cmd->opcode, host->mrq); 509} 510 511/* DMA handling */ 512 513static void usdhi6_dma_release(struct usdhi6_host *host) 514{ 515 host->dma_active = false; 516 if (host->chan_tx) { 517 struct dma_chan *chan = host->chan_tx; 518 host->chan_tx = NULL; 519 dma_release_channel(chan); 520 } 521 if (host->chan_rx) { 522 struct dma_chan *chan = host->chan_rx; 523 host->chan_rx = NULL; 524 dma_release_channel(chan); 525 } 526} 527 528static void usdhi6_dma_stop_unmap(struct usdhi6_host *host) 529{ 530 struct mmc_data *data = host->mrq->data; 531 532 if (!host->dma_active) 533 return; 534 535 usdhi6_write(host, USDHI6_CC_EXT_MODE, 0); 536 host->dma_active = false; 537 538 if (data->flags & MMC_DATA_READ) 539 dma_unmap_sg(host->chan_rx->device->dev, data->sg, 540 data->sg_len, DMA_FROM_DEVICE); 541 else 542 dma_unmap_sg(host->chan_tx->device->dev, data->sg, 543 data->sg_len, DMA_TO_DEVICE); 544} 545 546static void usdhi6_dma_complete(void *arg) 547{ 548 struct usdhi6_host *host = arg; 549 struct mmc_request *mrq = host->mrq; 550 551 if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion for %p!\n", 552 dev_name(mmc_dev(host->mmc)), mrq)) 553 return; 554 555 dev_dbg(mmc_dev(host->mmc), "%s(): CMD%u DMA completed\n", __func__, 556 mrq->cmd->opcode); 557 558 usdhi6_dma_stop_unmap(host); 559 usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ); 560} 561 562static int usdhi6_dma_setup(struct usdhi6_host *host, struct dma_chan *chan, 563 enum dma_transfer_direction dir) 564{ 565 struct mmc_data *data = host->mrq->data; 566 struct scatterlist *sg = data->sg; 567 struct dma_async_tx_descriptor *desc = NULL; 568 dma_cookie_t cookie = -EINVAL; 569 enum dma_data_direction data_dir; 570 int ret; 571 572 switch (dir) { 573 case DMA_MEM_TO_DEV: 574 data_dir = DMA_TO_DEVICE; 575 break; 576 case DMA_DEV_TO_MEM: 577 data_dir = DMA_FROM_DEVICE; 578 break; 579 default: 580 return -EINVAL; 581 } 582 583 ret = dma_map_sg(chan->device->dev, sg, data->sg_len, data_dir); 584 if (ret > 0) { 585 host->dma_active = true; 586 desc = dmaengine_prep_slave_sg(chan, sg, ret, dir, 587 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 588 } 589 590 if (desc) { 591 desc->callback = usdhi6_dma_complete; 592 desc->callback_param = host; 593 cookie = dmaengine_submit(desc); 594 } 595 596 dev_dbg(mmc_dev(host->mmc), "%s(): mapped %d -> %d, cookie %d @ %p\n", 597 __func__, data->sg_len, ret, cookie, desc); 598 599 if (cookie < 0) { 600 /* DMA failed, fall back to PIO */ 601 if (ret >= 0) 602 ret = cookie; 603 usdhi6_dma_release(host); 604 dev_warn(mmc_dev(host->mmc), 605 "DMA failed: %d, falling back to PIO\n", ret); 606 } 607 608 return cookie; 609} 610 611static int usdhi6_dma_start(struct usdhi6_host *host) 612{ 613 if (!host->chan_rx || !host->chan_tx) 614 return -ENODEV; 615 616 if (host->mrq->data->flags & MMC_DATA_READ) 617 return usdhi6_dma_setup(host, host->chan_rx, DMA_DEV_TO_MEM); 618 619 return usdhi6_dma_setup(host, host->chan_tx, DMA_MEM_TO_DEV); 620} 621 622static void usdhi6_dma_kill(struct usdhi6_host *host) 623{ 624 struct mmc_data *data = host->mrq->data; 625 626 dev_dbg(mmc_dev(host->mmc), "%s(): SG of %u: %ux%u\n", 627 __func__, data->sg_len, data->blocks, data->blksz); 628 /* Abort DMA */ 629 if (data->flags & MMC_DATA_READ) 630 dmaengine_terminate_all(host->chan_rx); 631 else 632 dmaengine_terminate_all(host->chan_tx); 633} 634 635static void usdhi6_dma_check_error(struct usdhi6_host *host) 636{ 637 struct mmc_data *data = host->mrq->data; 638 639 dev_dbg(mmc_dev(host->mmc), "%s(): IO error %d, status 0x%x\n", 640 __func__, host->io_error, usdhi6_read(host, USDHI6_SD_INFO1)); 641 642 if (host->io_error) { 643 data->error = usdhi6_error_code(host); 644 data->bytes_xfered = 0; 645 usdhi6_dma_kill(host); 646 usdhi6_dma_release(host); 647 dev_warn(mmc_dev(host->mmc), 648 "DMA failed: %d, falling back to PIO\n", data->error); 649 return; 650 } 651 652 /* 653 * The datasheet tells us to check a response from the card, whereas 654 * responses only come after the command phase, not after the data 655 * phase. Let's check anyway. 656 */ 657 if (host->irq_status & USDHI6_SD_INFO1_RSP_END) 658 dev_warn(mmc_dev(host->mmc), "Unexpected response received!\n"); 659} 660 661static void usdhi6_dma_kick(struct usdhi6_host *host) 662{ 663 if (host->mrq->data->flags & MMC_DATA_READ) 664 dma_async_issue_pending(host->chan_rx); 665 else 666 dma_async_issue_pending(host->chan_tx); 667} 668 669static void usdhi6_dma_request(struct usdhi6_host *host, phys_addr_t start) 670{ 671 struct dma_slave_config cfg = { 672 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 673 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 674 }; 675 int ret; 676 677 host->chan_tx = dma_request_slave_channel(mmc_dev(host->mmc), "tx"); 678 dev_dbg(mmc_dev(host->mmc), "%s: TX: got channel %p\n", __func__, 679 host->chan_tx); 680 681 if (!host->chan_tx) 682 return; 683 684 cfg.direction = DMA_MEM_TO_DEV; 685 cfg.dst_addr = start + USDHI6_SD_BUF0; 686 cfg.dst_maxburst = 128; /* 128 words * 4 bytes = 512 bytes */ 687 cfg.src_addr = 0; 688 ret = dmaengine_slave_config(host->chan_tx, &cfg); 689 if (ret < 0) 690 goto e_release_tx; 691 692 host->chan_rx = dma_request_slave_channel(mmc_dev(host->mmc), "rx"); 693 dev_dbg(mmc_dev(host->mmc), "%s: RX: got channel %p\n", __func__, 694 host->chan_rx); 695 696 if (!host->chan_rx) 697 goto e_release_tx; 698 699 cfg.direction = DMA_DEV_TO_MEM; 700 cfg.src_addr = cfg.dst_addr; 701 cfg.src_maxburst = 128; /* 128 words * 4 bytes = 512 bytes */ 702 cfg.dst_addr = 0; 703 ret = dmaengine_slave_config(host->chan_rx, &cfg); 704 if (ret < 0) 705 goto e_release_rx; 706 707 return; 708 709e_release_rx: 710 dma_release_channel(host->chan_rx); 711 host->chan_rx = NULL; 712e_release_tx: 713 dma_release_channel(host->chan_tx); 714 host->chan_tx = NULL; 715} 716 717/* API helpers */ 718 719static void usdhi6_clk_set(struct usdhi6_host *host, struct mmc_ios *ios) 720{ 721 unsigned long rate = ios->clock; 722 u32 val; 723 unsigned int i; 724 725 for (i = 1000; i; i--) { 726 if (usdhi6_read(host, USDHI6_SD_INFO2) & USDHI6_SD_INFO2_SCLKDIVEN) 727 break; 728 usleep_range(10, 100); 729 } 730 731 if (!i) { 732 dev_err(mmc_dev(host->mmc), "SD bus busy, clock set aborted\n"); 733 return; 734 } 735 736 val = usdhi6_read(host, USDHI6_SD_CLK_CTRL) & ~USDHI6_SD_CLK_CTRL_DIV_MASK; 737 738 if (rate) { 739 unsigned long new_rate; 740 741 if (host->imclk <= rate) { 742 if (ios->timing != MMC_TIMING_UHS_DDR50) { 743 /* Cannot have 1-to-1 clock in DDR mode */ 744 new_rate = host->imclk; 745 val |= 0xff; 746 } else { 747 new_rate = host->imclk / 2; 748 } 749 } else { 750 unsigned long div = 751 roundup_pow_of_two(DIV_ROUND_UP(host->imclk, rate)); 752 val |= div >> 2; 753 new_rate = host->imclk / div; 754 } 755 756 if (host->rate == new_rate) 757 return; 758 759 host->rate = new_rate; 760 761 dev_dbg(mmc_dev(host->mmc), "target %lu, div %u, set %lu\n", 762 rate, (val & 0xff) << 2, new_rate); 763 } 764 765 /* 766 * if old or new rate is equal to input rate, have to switch the clock 767 * off before changing and on after 768 */ 769 if (host->imclk == rate || host->imclk == host->rate || !rate) 770 usdhi6_write(host, USDHI6_SD_CLK_CTRL, 771 val & ~USDHI6_SD_CLK_CTRL_SCLKEN); 772 773 if (!rate) { 774 host->rate = 0; 775 return; 776 } 777 778 usdhi6_write(host, USDHI6_SD_CLK_CTRL, val); 779 780 if (host->imclk == rate || host->imclk == host->rate || 781 !(val & USDHI6_SD_CLK_CTRL_SCLKEN)) 782 usdhi6_write(host, USDHI6_SD_CLK_CTRL, 783 val | USDHI6_SD_CLK_CTRL_SCLKEN); 784} 785 786static void usdhi6_set_power(struct usdhi6_host *host, struct mmc_ios *ios) 787{ 788 struct mmc_host *mmc = host->mmc; 789 790 if (!IS_ERR(mmc->supply.vmmc)) 791 /* Errors ignored... */ 792 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 793 ios->power_mode ? ios->vdd : 0); 794} 795 796static int usdhi6_reset(struct usdhi6_host *host) 797{ 798 int i; 799 800 usdhi6_write(host, USDHI6_SOFT_RST, USDHI6_SOFT_RST_RESERVED); 801 cpu_relax(); 802 usdhi6_write(host, USDHI6_SOFT_RST, USDHI6_SOFT_RST_RESERVED | USDHI6_SOFT_RST_RESET); 803 for (i = 1000; i; i--) 804 if (usdhi6_read(host, USDHI6_SOFT_RST) & USDHI6_SOFT_RST_RESET) 805 break; 806 807 return i ? 0 : -ETIMEDOUT; 808} 809 810static void usdhi6_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 811{ 812 struct usdhi6_host *host = mmc_priv(mmc); 813 u32 option, mode; 814 int ret; 815 816 dev_dbg(mmc_dev(mmc), "%uHz, OCR: %u, power %u, bus-width %u, timing %u\n", 817 ios->clock, ios->vdd, ios->power_mode, ios->bus_width, ios->timing); 818 819 switch (ios->power_mode) { 820 case MMC_POWER_OFF: 821 usdhi6_set_power(host, ios); 822 usdhi6_only_cd(host); 823 break; 824 case MMC_POWER_UP: 825 /* 826 * We only also touch USDHI6_SD_OPTION from .request(), which 827 * cannot race with MMC_POWER_UP 828 */ 829 ret = usdhi6_reset(host); 830 if (ret < 0) { 831 dev_err(mmc_dev(mmc), "Cannot reset the interface!\n"); 832 } else { 833 usdhi6_set_power(host, ios); 834 usdhi6_only_cd(host); 835 } 836 break; 837 case MMC_POWER_ON: 838 option = usdhi6_read(host, USDHI6_SD_OPTION); 839 /* 840 * The eMMC standard only allows 4 or 8 bits in the DDR mode, 841 * the same probably holds for SD cards. We check here anyway, 842 * since the datasheet explicitly requires 4 bits for DDR. 843 */ 844 if (ios->bus_width == MMC_BUS_WIDTH_1) { 845 if (ios->timing == MMC_TIMING_UHS_DDR50) 846 dev_err(mmc_dev(mmc), 847 "4 bits are required for DDR\n"); 848 option |= USDHI6_SD_OPTION_WIDTH_1; 849 mode = 0; 850 } else { 851 option &= ~USDHI6_SD_OPTION_WIDTH_1; 852 mode = ios->timing == MMC_TIMING_UHS_DDR50; 853 } 854 usdhi6_write(host, USDHI6_SD_OPTION, option); 855 usdhi6_write(host, USDHI6_SDIF_MODE, mode); 856 break; 857 } 858 859 if (host->rate != ios->clock) 860 usdhi6_clk_set(host, ios); 861} 862 863/* This is data timeout. Response timeout is fixed to 640 clock cycles */ 864static void usdhi6_timeout_set(struct usdhi6_host *host) 865{ 866 struct mmc_request *mrq = host->mrq; 867 u32 val; 868 unsigned long ticks; 869 870 if (!mrq->data) 871 ticks = host->rate / 1000 * mrq->cmd->busy_timeout; 872 else 873 ticks = host->rate / 1000000 * (mrq->data->timeout_ns / 1000) + 874 mrq->data->timeout_clks; 875 876 if (!ticks || ticks > 1 << 27) 877 /* Max timeout */ 878 val = 14; 879 else if (ticks < 1 << 13) 880 /* Min timeout */ 881 val = 0; 882 else 883 val = order_base_2(ticks) - 13; 884 885 dev_dbg(mmc_dev(host->mmc), "Set %s timeout %lu ticks @ %lu Hz\n", 886 mrq->data ? "data" : "cmd", ticks, host->rate); 887 888 /* Timeout Counter mask: 0xf0 */ 889 usdhi6_write(host, USDHI6_SD_OPTION, (val << USDHI6_SD_OPTION_TIMEOUT_SHIFT) | 890 (usdhi6_read(host, USDHI6_SD_OPTION) & ~USDHI6_SD_OPTION_TIMEOUT_MASK)); 891} 892 893static void usdhi6_request_done(struct usdhi6_host *host) 894{ 895 struct mmc_request *mrq = host->mrq; 896 struct mmc_data *data = mrq->data; 897 898 if (WARN(host->pg.page || host->head_pg.page, 899 "Page %p or %p not unmapped: wait %u, CMD%d(%c) @ +0x%zx %ux%u in SG%u!\n", 900 host->pg.page, host->head_pg.page, host->wait, mrq->cmd->opcode, 901 data ? (data->flags & MMC_DATA_READ ? 'R' : 'W') : '-', 902 data ? host->offset : 0, data ? data->blocks : 0, 903 data ? data->blksz : 0, data ? data->sg_len : 0)) 904 usdhi6_sg_unmap(host, true); 905 906 if (mrq->cmd->error || 907 (data && data->error) || 908 (mrq->stop && mrq->stop->error)) 909 dev_dbg(mmc_dev(host->mmc), "%s(CMD%d: %ux%u): err %d %d %d\n", 910 __func__, mrq->cmd->opcode, data ? data->blocks : 0, 911 data ? data->blksz : 0, 912 mrq->cmd->error, 913 data ? data->error : 1, 914 mrq->stop ? mrq->stop->error : 1); 915 916 /* Disable DMA */ 917 usdhi6_write(host, USDHI6_CC_EXT_MODE, 0); 918 host->wait = USDHI6_WAIT_FOR_REQUEST; 919 host->mrq = NULL; 920 921 mmc_request_done(host->mmc, mrq); 922} 923 924static int usdhi6_cmd_flags(struct usdhi6_host *host) 925{ 926 struct mmc_request *mrq = host->mrq; 927 struct mmc_command *cmd = mrq->cmd; 928 u16 opc = cmd->opcode; 929 930 if (host->app_cmd) { 931 host->app_cmd = false; 932 opc |= USDHI6_SD_CMD_APP; 933 } 934 935 if (mrq->data) { 936 opc |= USDHI6_SD_CMD_DATA; 937 938 if (mrq->data->flags & MMC_DATA_READ) 939 opc |= USDHI6_SD_CMD_READ; 940 941 if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK || 942 cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || 943 (cmd->opcode == SD_IO_RW_EXTENDED && 944 mrq->data->blocks > 1)) { 945 opc |= USDHI6_SD_CMD_MULTI; 946 if (!mrq->stop) 947 opc |= USDHI6_SD_CMD_CMD12_AUTO_OFF; 948 } 949 950 switch (mmc_resp_type(cmd)) { 951 case MMC_RSP_NONE: 952 opc |= USDHI6_SD_CMD_MODE_RSP_NONE; 953 break; 954 case MMC_RSP_R1: 955 opc |= USDHI6_SD_CMD_MODE_RSP_R1; 956 break; 957 case MMC_RSP_R1B: 958 opc |= USDHI6_SD_CMD_MODE_RSP_R1B; 959 break; 960 case MMC_RSP_R2: 961 opc |= USDHI6_SD_CMD_MODE_RSP_R2; 962 break; 963 case MMC_RSP_R3: 964 opc |= USDHI6_SD_CMD_MODE_RSP_R3; 965 break; 966 default: 967 dev_warn(mmc_dev(host->mmc), 968 "Unknown response type %d\n", 969 mmc_resp_type(cmd)); 970 return -EINVAL; 971 } 972 } 973 974 return opc; 975} 976 977static int usdhi6_rq_start(struct usdhi6_host *host) 978{ 979 struct mmc_request *mrq = host->mrq; 980 struct mmc_command *cmd = mrq->cmd; 981 struct mmc_data *data = mrq->data; 982 int opc = usdhi6_cmd_flags(host); 983 int i; 984 985 if (opc < 0) 986 return opc; 987 988 for (i = 1000; i; i--) { 989 if (!(usdhi6_read(host, USDHI6_SD_INFO2) & USDHI6_SD_INFO2_CBSY)) 990 break; 991 usleep_range(10, 100); 992 } 993 994 if (!i) { 995 dev_dbg(mmc_dev(host->mmc), "Command active, request aborted\n"); 996 return -EAGAIN; 997 } 998 999 if (data) { 1000 bool use_dma; 1001 int ret = 0; 1002 1003 host->page_idx = 0; 1004 1005 if (cmd->opcode == SD_IO_RW_EXTENDED && data->blocks > 1) { 1006 switch (data->blksz) { 1007 case 512: 1008 break; 1009 case 32: 1010 case 64: 1011 case 128: 1012 case 256: 1013 if (mrq->stop) 1014 ret = -EINVAL; 1015 break; 1016 default: 1017 ret = -EINVAL; 1018 } 1019 } else if ((cmd->opcode == MMC_READ_MULTIPLE_BLOCK || 1020 cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) && 1021 data->blksz != 512) { 1022 ret = -EINVAL; 1023 } 1024 1025 if (ret < 0) { 1026 dev_warn(mmc_dev(host->mmc), "%s(): %u blocks of %u bytes\n", 1027 __func__, data->blocks, data->blksz); 1028 return -EINVAL; 1029 } 1030 1031 if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK || 1032 cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || 1033 (cmd->opcode == SD_IO_RW_EXTENDED && 1034 data->blocks > 1)) 1035 usdhi6_sg_prep(host); 1036 1037 usdhi6_write(host, USDHI6_SD_SIZE, data->blksz); 1038 1039 if ((data->blksz >= USDHI6_MIN_DMA || 1040 data->blocks > 1) && 1041 (data->blksz % 4 || 1042 data->sg->offset % 4)) 1043 dev_dbg(mmc_dev(host->mmc), 1044 "Bad SG of %u: %ux%u @ %u\n", data->sg_len, 1045 data->blksz, data->blocks, data->sg->offset); 1046 1047 /* Enable DMA for USDHI6_MIN_DMA bytes or more */ 1048 use_dma = data->blksz >= USDHI6_MIN_DMA && 1049 !(data->blksz % 4) && 1050 usdhi6_dma_start(host) >= DMA_MIN_COOKIE; 1051 1052 if (use_dma) 1053 usdhi6_write(host, USDHI6_CC_EXT_MODE, USDHI6_CC_EXT_MODE_SDRW); 1054 1055 dev_dbg(mmc_dev(host->mmc), 1056 "%s(): request opcode %u, %u blocks of %u bytes in %u segments, %s %s @+0x%x%s\n", 1057 __func__, cmd->opcode, data->blocks, data->blksz, 1058 data->sg_len, use_dma ? "DMA" : "PIO", 1059 data->flags & MMC_DATA_READ ? "read" : "write", 1060 data->sg->offset, mrq->stop ? " + stop" : ""); 1061 } else { 1062 dev_dbg(mmc_dev(host->mmc), "%s(): request opcode %u\n", 1063 __func__, cmd->opcode); 1064 } 1065 1066 /* We have to get a command completion interrupt with DMA too */ 1067 usdhi6_wait_for_resp(host); 1068 1069 host->wait = USDHI6_WAIT_FOR_CMD; 1070 schedule_delayed_work(&host->timeout_work, host->timeout); 1071 1072 /* SEC bit is required to enable block counting by the core */ 1073 usdhi6_write(host, USDHI6_SD_STOP, 1074 data && data->blocks > 1 ? USDHI6_SD_STOP_SEC : 0); 1075 usdhi6_write(host, USDHI6_SD_ARG, cmd->arg); 1076 1077 /* Kick command execution */ 1078 usdhi6_write(host, USDHI6_SD_CMD, opc); 1079 1080 return 0; 1081} 1082 1083static void usdhi6_request(struct mmc_host *mmc, struct mmc_request *mrq) 1084{ 1085 struct usdhi6_host *host = mmc_priv(mmc); 1086 int ret; 1087 1088 cancel_delayed_work_sync(&host->timeout_work); 1089 1090 host->mrq = mrq; 1091 host->sg = NULL; 1092 1093 usdhi6_timeout_set(host); 1094 ret = usdhi6_rq_start(host); 1095 if (ret < 0) { 1096 mrq->cmd->error = ret; 1097 usdhi6_request_done(host); 1098 } 1099} 1100 1101static int usdhi6_get_cd(struct mmc_host *mmc) 1102{ 1103 struct usdhi6_host *host = mmc_priv(mmc); 1104 /* Read is atomic, no need to lock */ 1105 u32 status = usdhi6_read(host, USDHI6_SD_INFO1) & USDHI6_SD_INFO1_CD; 1106 1107/* 1108 * level status.CD CD_ACTIVE_HIGH card present 1109 * 1 0 0 0 1110 * 1 0 1 1 1111 * 0 1 0 1 1112 * 0 1 1 0 1113 */ 1114 return !status ^ !(mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH); 1115} 1116 1117static int usdhi6_get_ro(struct mmc_host *mmc) 1118{ 1119 struct usdhi6_host *host = mmc_priv(mmc); 1120 /* No locking as above */ 1121 u32 status = usdhi6_read(host, USDHI6_SD_INFO1) & USDHI6_SD_INFO1_WP; 1122 1123/* 1124 * level status.WP RO_ACTIVE_HIGH card read-only 1125 * 1 0 0 0 1126 * 1 0 1 1 1127 * 0 1 0 1 1128 * 0 1 1 0 1129 */ 1130 return !status ^ !(mmc->caps2 & MMC_CAP2_RO_ACTIVE_HIGH); 1131} 1132 1133static void usdhi6_enable_sdio_irq(struct mmc_host *mmc, int enable) 1134{ 1135 struct usdhi6_host *host = mmc_priv(mmc); 1136 1137 dev_dbg(mmc_dev(mmc), "%s(): %sable\n", __func__, enable ? "en" : "dis"); 1138 1139 if (enable) { 1140 host->sdio_mask = USDHI6_SDIO_INFO1_IRQ & ~USDHI6_SDIO_INFO1_IOIRQ; 1141 usdhi6_write(host, USDHI6_SDIO_INFO1_MASK, host->sdio_mask); 1142 usdhi6_write(host, USDHI6_SDIO_MODE, 1); 1143 } else { 1144 usdhi6_write(host, USDHI6_SDIO_MODE, 0); 1145 usdhi6_write(host, USDHI6_SDIO_INFO1_MASK, USDHI6_SDIO_INFO1_IRQ); 1146 host->sdio_mask = USDHI6_SDIO_INFO1_IRQ; 1147 } 1148} 1149 1150static struct mmc_host_ops usdhi6_ops = { 1151 .request = usdhi6_request, 1152 .set_ios = usdhi6_set_ios, 1153 .get_cd = usdhi6_get_cd, 1154 .get_ro = usdhi6_get_ro, 1155 .enable_sdio_irq = usdhi6_enable_sdio_irq, 1156}; 1157 1158/* State machine handlers */ 1159 1160static void usdhi6_resp_cmd12(struct usdhi6_host *host) 1161{ 1162 struct mmc_command *cmd = host->mrq->stop; 1163 cmd->resp[0] = usdhi6_read(host, USDHI6_SD_RSP10); 1164} 1165 1166static void usdhi6_resp_read(struct usdhi6_host *host) 1167{ 1168 struct mmc_command *cmd = host->mrq->cmd; 1169 u32 *rsp = cmd->resp, tmp = 0; 1170 int i; 1171 1172/* 1173 * RSP10 39-8 1174 * RSP32 71-40 1175 * RSP54 103-72 1176 * RSP76 127-104 1177 * R2-type response: 1178 * resp[0] = r[127..96] 1179 * resp[1] = r[95..64] 1180 * resp[2] = r[63..32] 1181 * resp[3] = r[31..0] 1182 * Other responses: 1183 * resp[0] = r[39..8] 1184 */ 1185 1186 if (mmc_resp_type(cmd) == MMC_RSP_NONE) 1187 return; 1188 1189 if (!(host->irq_status & USDHI6_SD_INFO1_RSP_END)) { 1190 dev_err(mmc_dev(host->mmc), 1191 "CMD%d: response expected but is missing!\n", cmd->opcode); 1192 return; 1193 } 1194 1195 if (mmc_resp_type(cmd) & MMC_RSP_136) 1196 for (i = 0; i < 4; i++) { 1197 if (i) 1198 rsp[3 - i] = tmp >> 24; 1199 tmp = usdhi6_read(host, USDHI6_SD_RSP10 + i * 8); 1200 rsp[3 - i] |= tmp << 8; 1201 } 1202 else if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK || 1203 cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) 1204 /* Read RSP54 to avoid conflict with auto CMD12 */ 1205 rsp[0] = usdhi6_read(host, USDHI6_SD_RSP54); 1206 else 1207 rsp[0] = usdhi6_read(host, USDHI6_SD_RSP10); 1208 1209 dev_dbg(mmc_dev(host->mmc), "Response 0x%x\n", rsp[0]); 1210} 1211 1212static int usdhi6_blk_read(struct usdhi6_host *host) 1213{ 1214 struct mmc_data *data = host->mrq->data; 1215 u32 *p; 1216 int i, rest; 1217 1218 if (host->io_error) { 1219 data->error = usdhi6_error_code(host); 1220 goto error; 1221 } 1222 1223 if (host->pg.page) { 1224 p = host->blk_page + host->offset; 1225 } else { 1226 p = usdhi6_sg_map(host); 1227 if (!p) { 1228 data->error = -ENOMEM; 1229 goto error; 1230 } 1231 } 1232 1233 for (i = 0; i < data->blksz / 4; i++, p++) 1234 *p = usdhi6_read(host, USDHI6_SD_BUF0); 1235 1236 rest = data->blksz % 4; 1237 for (i = 0; i < (rest + 1) / 2; i++) { 1238 u16 d = usdhi6_read16(host, USDHI6_SD_BUF0); 1239 ((u8 *)p)[2 * i] = ((u8 *)&d)[0]; 1240 if (rest > 1 && !i) 1241 ((u8 *)p)[2 * i + 1] = ((u8 *)&d)[1]; 1242 } 1243 1244 return 0; 1245 1246error: 1247 dev_dbg(mmc_dev(host->mmc), "%s(): %d\n", __func__, data->error); 1248 host->wait = USDHI6_WAIT_FOR_REQUEST; 1249 return data->error; 1250} 1251 1252static int usdhi6_blk_write(struct usdhi6_host *host) 1253{ 1254 struct mmc_data *data = host->mrq->data; 1255 u32 *p; 1256 int i, rest; 1257 1258 if (host->io_error) { 1259 data->error = usdhi6_error_code(host); 1260 goto error; 1261 } 1262 1263 if (host->pg.page) { 1264 p = host->blk_page + host->offset; 1265 } else { 1266 p = usdhi6_sg_map(host); 1267 if (!p) { 1268 data->error = -ENOMEM; 1269 goto error; 1270 } 1271 } 1272 1273 for (i = 0; i < data->blksz / 4; i++, p++) 1274 usdhi6_write(host, USDHI6_SD_BUF0, *p); 1275 1276 rest = data->blksz % 4; 1277 for (i = 0; i < (rest + 1) / 2; i++) { 1278 u16 d; 1279 ((u8 *)&d)[0] = ((u8 *)p)[2 * i]; 1280 if (rest > 1 && !i) 1281 ((u8 *)&d)[1] = ((u8 *)p)[2 * i + 1]; 1282 else 1283 ((u8 *)&d)[1] = 0; 1284 usdhi6_write16(host, USDHI6_SD_BUF0, d); 1285 } 1286 1287 return 0; 1288 1289error: 1290 dev_dbg(mmc_dev(host->mmc), "%s(): %d\n", __func__, data->error); 1291 host->wait = USDHI6_WAIT_FOR_REQUEST; 1292 return data->error; 1293} 1294 1295static int usdhi6_stop_cmd(struct usdhi6_host *host) 1296{ 1297 struct mmc_request *mrq = host->mrq; 1298 1299 switch (mrq->cmd->opcode) { 1300 case MMC_READ_MULTIPLE_BLOCK: 1301 case MMC_WRITE_MULTIPLE_BLOCK: 1302 if (mrq->stop->opcode == MMC_STOP_TRANSMISSION) { 1303 host->wait = USDHI6_WAIT_FOR_STOP; 1304 return 0; 1305 } 1306 /* Unsupported STOP command */ 1307 default: 1308 dev_err(mmc_dev(host->mmc), 1309 "unsupported stop CMD%d for CMD%d\n", 1310 mrq->stop->opcode, mrq->cmd->opcode); 1311 mrq->stop->error = -EOPNOTSUPP; 1312 } 1313 1314 return -EOPNOTSUPP; 1315} 1316 1317static bool usdhi6_end_cmd(struct usdhi6_host *host) 1318{ 1319 struct mmc_request *mrq = host->mrq; 1320 struct mmc_command *cmd = mrq->cmd; 1321 1322 if (host->io_error) { 1323 cmd->error = usdhi6_error_code(host); 1324 return false; 1325 } 1326 1327 usdhi6_resp_read(host); 1328 1329 if (!mrq->data) 1330 return false; 1331 1332 if (host->dma_active) { 1333 usdhi6_dma_kick(host); 1334 if (!mrq->stop) 1335 host->wait = USDHI6_WAIT_FOR_DMA; 1336 else if (usdhi6_stop_cmd(host) < 0) 1337 return false; 1338 } else if (mrq->data->flags & MMC_DATA_READ) { 1339 if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK || 1340 (cmd->opcode == SD_IO_RW_EXTENDED && 1341 mrq->data->blocks > 1)) 1342 host->wait = USDHI6_WAIT_FOR_MREAD; 1343 else 1344 host->wait = USDHI6_WAIT_FOR_READ; 1345 } else { 1346 if (cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || 1347 (cmd->opcode == SD_IO_RW_EXTENDED && 1348 mrq->data->blocks > 1)) 1349 host->wait = USDHI6_WAIT_FOR_MWRITE; 1350 else 1351 host->wait = USDHI6_WAIT_FOR_WRITE; 1352 } 1353 1354 return true; 1355} 1356 1357static bool usdhi6_read_block(struct usdhi6_host *host) 1358{ 1359 /* ACCESS_END IRQ is already unmasked */ 1360 int ret = usdhi6_blk_read(host); 1361 1362 /* 1363 * Have to force unmapping both pages: the single block could have been 1364 * cross-page, in which case for single-block IO host->page_idx == 0. 1365 * So, if we don't force, the second page won't be unmapped. 1366 */ 1367 usdhi6_sg_unmap(host, true); 1368 1369 if (ret < 0) 1370 return false; 1371 1372 host->wait = USDHI6_WAIT_FOR_DATA_END; 1373 return true; 1374} 1375 1376static bool usdhi6_mread_block(struct usdhi6_host *host) 1377{ 1378 int ret = usdhi6_blk_read(host); 1379 1380 if (ret < 0) 1381 return false; 1382 1383 usdhi6_sg_advance(host); 1384 1385 return !host->mrq->data->error && 1386 (host->wait != USDHI6_WAIT_FOR_DATA_END || !host->mrq->stop); 1387} 1388 1389static bool usdhi6_write_block(struct usdhi6_host *host) 1390{ 1391 int ret = usdhi6_blk_write(host); 1392 1393 /* See comment in usdhi6_read_block() */ 1394 usdhi6_sg_unmap(host, true); 1395 1396 if (ret < 0) 1397 return false; 1398 1399 host->wait = USDHI6_WAIT_FOR_DATA_END; 1400 return true; 1401} 1402 1403static bool usdhi6_mwrite_block(struct usdhi6_host *host) 1404{ 1405 int ret = usdhi6_blk_write(host); 1406 1407 if (ret < 0) 1408 return false; 1409 1410 usdhi6_sg_advance(host); 1411 1412 return !host->mrq->data->error && 1413 (host->wait != USDHI6_WAIT_FOR_DATA_END || !host->mrq->stop); 1414} 1415 1416/* Interrupt & timeout handlers */ 1417 1418static irqreturn_t usdhi6_sd_bh(int irq, void *dev_id) 1419{ 1420 struct usdhi6_host *host = dev_id; 1421 struct mmc_request *mrq; 1422 struct mmc_command *cmd; 1423 struct mmc_data *data; 1424 bool io_wait = false; 1425 1426 cancel_delayed_work_sync(&host->timeout_work); 1427 1428 mrq = host->mrq; 1429 if (!mrq) 1430 return IRQ_HANDLED; 1431 1432 cmd = mrq->cmd; 1433 data = mrq->data; 1434 1435 switch (host->wait) { 1436 case USDHI6_WAIT_FOR_REQUEST: 1437 /* We're too late, the timeout has already kicked in */ 1438 return IRQ_HANDLED; 1439 case USDHI6_WAIT_FOR_CMD: 1440 /* Wait for data? */ 1441 io_wait = usdhi6_end_cmd(host); 1442 break; 1443 case USDHI6_WAIT_FOR_MREAD: 1444 /* Wait for more data? */ 1445 io_wait = usdhi6_mread_block(host); 1446 break; 1447 case USDHI6_WAIT_FOR_READ: 1448 /* Wait for data end? */ 1449 io_wait = usdhi6_read_block(host); 1450 break; 1451 case USDHI6_WAIT_FOR_MWRITE: 1452 /* Wait data to write? */ 1453 io_wait = usdhi6_mwrite_block(host); 1454 break; 1455 case USDHI6_WAIT_FOR_WRITE: 1456 /* Wait for data end? */ 1457 io_wait = usdhi6_write_block(host); 1458 break; 1459 case USDHI6_WAIT_FOR_DMA: 1460 usdhi6_dma_check_error(host); 1461 break; 1462 case USDHI6_WAIT_FOR_STOP: 1463 usdhi6_write(host, USDHI6_SD_STOP, 0); 1464 if (host->io_error) { 1465 int ret = usdhi6_error_code(host); 1466 if (mrq->stop) 1467 mrq->stop->error = ret; 1468 else 1469 mrq->data->error = ret; 1470 dev_warn(mmc_dev(host->mmc), "%s(): %d\n", __func__, ret); 1471 break; 1472 } 1473 usdhi6_resp_cmd12(host); 1474 mrq->stop->error = 0; 1475 break; 1476 case USDHI6_WAIT_FOR_DATA_END: 1477 if (host->io_error) { 1478 mrq->data->error = usdhi6_error_code(host); 1479 dev_warn(mmc_dev(host->mmc), "%s(): %d\n", __func__, 1480 mrq->data->error); 1481 } 1482 break; 1483 default: 1484 cmd->error = -EFAULT; 1485 dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait); 1486 usdhi6_request_done(host); 1487 return IRQ_HANDLED; 1488 } 1489 1490 if (io_wait) { 1491 schedule_delayed_work(&host->timeout_work, host->timeout); 1492 /* Wait for more data or ACCESS_END */ 1493 if (!host->dma_active) 1494 usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ); 1495 return IRQ_HANDLED; 1496 } 1497 1498 if (!cmd->error) { 1499 if (data) { 1500 if (!data->error) { 1501 if (host->wait != USDHI6_WAIT_FOR_STOP && 1502 host->mrq->stop && 1503 !host->mrq->stop->error && 1504 !usdhi6_stop_cmd(host)) { 1505 /* Sending STOP */ 1506 usdhi6_wait_for_resp(host); 1507 1508 schedule_delayed_work(&host->timeout_work, 1509 host->timeout); 1510 1511 return IRQ_HANDLED; 1512 } 1513 1514 data->bytes_xfered = data->blocks * data->blksz; 1515 } else { 1516 /* Data error: might need to unmap the last page */ 1517 dev_warn(mmc_dev(host->mmc), "%s(): data error %d\n", 1518 __func__, data->error); 1519 usdhi6_sg_unmap(host, true); 1520 } 1521 } else if (cmd->opcode == MMC_APP_CMD) { 1522 host->app_cmd = true; 1523 } 1524 } 1525 1526 usdhi6_request_done(host); 1527 1528 return IRQ_HANDLED; 1529} 1530 1531static irqreturn_t usdhi6_sd(int irq, void *dev_id) 1532{ 1533 struct usdhi6_host *host = dev_id; 1534 u16 status, status2, error; 1535 1536 status = usdhi6_read(host, USDHI6_SD_INFO1) & ~host->status_mask & 1537 ~USDHI6_SD_INFO1_CARD; 1538 status2 = usdhi6_read(host, USDHI6_SD_INFO2) & ~host->status2_mask; 1539 1540 usdhi6_only_cd(host); 1541 1542 dev_dbg(mmc_dev(host->mmc), 1543 "IRQ status = 0x%08x, status2 = 0x%08x\n", status, status2); 1544 1545 if (!status && !status2) 1546 return IRQ_NONE; 1547 1548 error = status2 & USDHI6_SD_INFO2_ERR; 1549 1550 /* Ack / clear interrupts */ 1551 if (USDHI6_SD_INFO1_IRQ & status) 1552 usdhi6_write(host, USDHI6_SD_INFO1, 1553 0xffff & ~(USDHI6_SD_INFO1_IRQ & status)); 1554 1555 if (USDHI6_SD_INFO2_IRQ & status2) { 1556 if (error) 1557 /* In error cases BWE and BRE aren't cleared automatically */ 1558 status2 |= USDHI6_SD_INFO2_BWE | USDHI6_SD_INFO2_BRE; 1559 1560 usdhi6_write(host, USDHI6_SD_INFO2, 1561 0xffff & ~(USDHI6_SD_INFO2_IRQ & status2)); 1562 } 1563 1564 host->io_error = error; 1565 host->irq_status = status; 1566 1567 if (error) { 1568 /* Don't pollute the log with unsupported command timeouts */ 1569 if (host->wait != USDHI6_WAIT_FOR_CMD || 1570 error != USDHI6_SD_INFO2_RSP_TOUT) 1571 dev_warn(mmc_dev(host->mmc), 1572 "%s(): INFO2 error bits 0x%08x\n", 1573 __func__, error); 1574 else 1575 dev_dbg(mmc_dev(host->mmc), 1576 "%s(): INFO2 error bits 0x%08x\n", 1577 __func__, error); 1578 } 1579 1580 return IRQ_WAKE_THREAD; 1581} 1582 1583static irqreturn_t usdhi6_sdio(int irq, void *dev_id) 1584{ 1585 struct usdhi6_host *host = dev_id; 1586 u32 status = usdhi6_read(host, USDHI6_SDIO_INFO1) & ~host->sdio_mask; 1587 1588 dev_dbg(mmc_dev(host->mmc), "%s(): status 0x%x\n", __func__, status); 1589 1590 if (!status) 1591 return IRQ_NONE; 1592 1593 usdhi6_write(host, USDHI6_SDIO_INFO1, ~status); 1594 1595 mmc_signal_sdio_irq(host->mmc); 1596 1597 return IRQ_HANDLED; 1598} 1599 1600static irqreturn_t usdhi6_cd(int irq, void *dev_id) 1601{ 1602 struct usdhi6_host *host = dev_id; 1603 struct mmc_host *mmc = host->mmc; 1604 u16 status; 1605 1606 /* We're only interested in hotplug events here */ 1607 status = usdhi6_read(host, USDHI6_SD_INFO1) & ~host->status_mask & 1608 USDHI6_SD_INFO1_CARD; 1609 1610 if (!status) 1611 return IRQ_NONE; 1612 1613 /* Ack */ 1614 usdhi6_write(host, USDHI6_SD_INFO1, !status); 1615 1616 if (!work_pending(&mmc->detect.work) && 1617 (((status & USDHI6_SD_INFO1_CARD_INSERT) && 1618 !mmc->card) || 1619 ((status & USDHI6_SD_INFO1_CARD_EJECT) && 1620 mmc->card))) 1621 mmc_detect_change(mmc, msecs_to_jiffies(100)); 1622 1623 return IRQ_HANDLED; 1624} 1625 1626/* 1627 * Actually this should not be needed, if the built-in timeout works reliably in 1628 * the both PIO cases and DMA never fails. But if DMA does fail, a timeout 1629 * handler might be the only way to catch the error. 1630 */ 1631static void usdhi6_timeout_work(struct work_struct *work) 1632{ 1633 struct delayed_work *d = container_of(work, struct delayed_work, work); 1634 struct usdhi6_host *host = container_of(d, struct usdhi6_host, timeout_work); 1635 struct mmc_request *mrq = host->mrq; 1636 struct mmc_data *data = mrq ? mrq->data : NULL; 1637 1638 dev_warn(mmc_dev(host->mmc), 1639 "%s timeout wait %u CMD%d: IRQ 0x%08x:0x%08x, last IRQ 0x%08x\n", 1640 host->dma_active ? "DMA" : "PIO", 1641 host->wait, mrq ? mrq->cmd->opcode : -1, 1642 usdhi6_read(host, USDHI6_SD_INFO1), 1643 usdhi6_read(host, USDHI6_SD_INFO2), host->irq_status); 1644 1645 if (host->dma_active) { 1646 usdhi6_dma_kill(host); 1647 usdhi6_dma_stop_unmap(host); 1648 } 1649 1650 switch (host->wait) { 1651 default: 1652 dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait); 1653 /* mrq can be NULL in this actually impossible case */ 1654 case USDHI6_WAIT_FOR_CMD: 1655 usdhi6_error_code(host); 1656 if (mrq) 1657 mrq->cmd->error = -ETIMEDOUT; 1658 break; 1659 case USDHI6_WAIT_FOR_STOP: 1660 usdhi6_error_code(host); 1661 mrq->stop->error = -ETIMEDOUT; 1662 break; 1663 case USDHI6_WAIT_FOR_DMA: 1664 case USDHI6_WAIT_FOR_MREAD: 1665 case USDHI6_WAIT_FOR_MWRITE: 1666 case USDHI6_WAIT_FOR_READ: 1667 case USDHI6_WAIT_FOR_WRITE: 1668 dev_dbg(mmc_dev(host->mmc), 1669 "%c: page #%u @ +0x%zx %ux%u in SG%u. Current SG %u bytes @ %u\n", 1670 data->flags & MMC_DATA_READ ? 'R' : 'W', host->page_idx, 1671 host->offset, data->blocks, data->blksz, data->sg_len, 1672 sg_dma_len(host->sg), host->sg->offset); 1673 usdhi6_sg_unmap(host, true); 1674 /* 1675 * If USDHI6_WAIT_FOR_DATA_END times out, we have already unmapped 1676 * the page 1677 */ 1678 case USDHI6_WAIT_FOR_DATA_END: 1679 usdhi6_error_code(host); 1680 data->error = -ETIMEDOUT; 1681 } 1682 1683 if (mrq) 1684 usdhi6_request_done(host); 1685} 1686 1687/* Probe / release */ 1688 1689static const struct of_device_id usdhi6_of_match[] = { 1690 {.compatible = "renesas,usdhi6rol0"}, 1691 {} 1692}; 1693MODULE_DEVICE_TABLE(of, usdhi6_of_match); 1694 1695static int usdhi6_probe(struct platform_device *pdev) 1696{ 1697 struct device *dev = &pdev->dev; 1698 struct mmc_host *mmc; 1699 struct usdhi6_host *host; 1700 struct resource *res; 1701 int irq_cd, irq_sd, irq_sdio; 1702 u32 version; 1703 int ret; 1704 1705 if (!dev->of_node) 1706 return -ENODEV; 1707 1708 irq_cd = platform_get_irq_byname(pdev, "card detect"); 1709 irq_sd = platform_get_irq_byname(pdev, "data"); 1710 irq_sdio = platform_get_irq_byname(pdev, "SDIO"); 1711 if (irq_sd < 0 || irq_sdio < 0) 1712 return -ENODEV; 1713 1714 mmc = mmc_alloc_host(sizeof(struct usdhi6_host), dev); 1715 if (!mmc) 1716 return -ENOMEM; 1717 1718 ret = mmc_of_parse(mmc); 1719 if (ret < 0) 1720 goto e_free_mmc; 1721 1722 mmc_regulator_get_supply(mmc); 1723 1724 host = mmc_priv(mmc); 1725 host->mmc = mmc; 1726 host->wait = USDHI6_WAIT_FOR_REQUEST; 1727 host->timeout = msecs_to_jiffies(4000); 1728 1729 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1730 host->base = devm_ioremap_resource(dev, res); 1731 if (IS_ERR(host->base)) { 1732 ret = PTR_ERR(host->base); 1733 goto e_free_mmc; 1734 } 1735 1736 host->clk = devm_clk_get(dev, NULL); 1737 if (IS_ERR(host->clk)) 1738 goto e_free_mmc; 1739 1740 host->imclk = clk_get_rate(host->clk); 1741 1742 ret = clk_prepare_enable(host->clk); 1743 if (ret < 0) 1744 goto e_free_mmc; 1745 1746 version = usdhi6_read(host, USDHI6_VERSION); 1747 if ((version & 0xfff) != 0xa0d) { 1748 dev_err(dev, "Version not recognized %x\n", version); 1749 goto e_clk_off; 1750 } 1751 1752 dev_info(dev, "A USDHI6ROL0 SD host detected with %d ports\n", 1753 usdhi6_read(host, USDHI6_SD_PORT_SEL) >> USDHI6_SD_PORT_SEL_PORTS_SHIFT); 1754 1755 usdhi6_mask_all(host); 1756 1757 if (irq_cd >= 0) { 1758 ret = devm_request_irq(dev, irq_cd, usdhi6_cd, 0, 1759 dev_name(dev), host); 1760 if (ret < 0) 1761 goto e_clk_off; 1762 } else { 1763 mmc->caps |= MMC_CAP_NEEDS_POLL; 1764 } 1765 1766 ret = devm_request_threaded_irq(dev, irq_sd, usdhi6_sd, usdhi6_sd_bh, 0, 1767 dev_name(dev), host); 1768 if (ret < 0) 1769 goto e_clk_off; 1770 1771 ret = devm_request_irq(dev, irq_sdio, usdhi6_sdio, 0, 1772 dev_name(dev), host); 1773 if (ret < 0) 1774 goto e_clk_off; 1775 1776 INIT_DELAYED_WORK(&host->timeout_work, usdhi6_timeout_work); 1777 1778 usdhi6_dma_request(host, res->start); 1779 1780 mmc->ops = &usdhi6_ops; 1781 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED | 1782 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_DDR50 | MMC_CAP_SDIO_IRQ; 1783 /* Set .max_segs to some random number. Feel free to adjust. */ 1784 mmc->max_segs = 32; 1785 mmc->max_blk_size = 512; 1786 mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs; 1787 mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size; 1788 /* 1789 * Setting .max_seg_size to 1 page would simplify our page-mapping code, 1790 * But OTOH, having large segments makes DMA more efficient. We could 1791 * check, whether we managed to get DMA and fall back to 1 page 1792 * segments, but if we do manage to obtain DMA and then it fails at 1793 * run-time and we fall back to PIO, we will continue getting large 1794 * segments. So, we wouldn't be able to get rid of the code anyway. 1795 */ 1796 mmc->max_seg_size = mmc->max_req_size; 1797 if (!mmc->f_max) 1798 mmc->f_max = host->imclk; 1799 mmc->f_min = host->imclk / 512; 1800 1801 platform_set_drvdata(pdev, host); 1802 1803 ret = mmc_add_host(mmc); 1804 if (ret < 0) 1805 goto e_clk_off; 1806 1807 return 0; 1808 1809e_clk_off: 1810 clk_disable_unprepare(host->clk); 1811e_free_mmc: 1812 mmc_free_host(mmc); 1813 1814 return ret; 1815} 1816 1817static int usdhi6_remove(struct platform_device *pdev) 1818{ 1819 struct usdhi6_host *host = platform_get_drvdata(pdev); 1820 1821 mmc_remove_host(host->mmc); 1822 1823 usdhi6_mask_all(host); 1824 cancel_delayed_work_sync(&host->timeout_work); 1825 usdhi6_dma_release(host); 1826 clk_disable_unprepare(host->clk); 1827 mmc_free_host(host->mmc); 1828 1829 return 0; 1830} 1831 1832static struct platform_driver usdhi6_driver = { 1833 .probe = usdhi6_probe, 1834 .remove = usdhi6_remove, 1835 .driver = { 1836 .name = "usdhi6rol0", 1837 .of_match_table = usdhi6_of_match, 1838 }, 1839}; 1840 1841module_platform_driver(usdhi6_driver); 1842 1843MODULE_DESCRIPTION("Renesas usdhi6rol0 SD/SDIO host driver"); 1844MODULE_LICENSE("GPL v2"); 1845MODULE_ALIAS("platform:usdhi6rol0"); 1846MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>"); 1847