/linux-4.1.27/drivers/mmc/host/ |
H A D | tmio_mmc_dma.c | 17 #include <linux/mmc/host.h> 26 void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) tmio_mmc_enable_dma() argument 28 if (!host->chan_tx || !host->chan_rx) tmio_mmc_enable_dma() 31 if (host->dma->enable) tmio_mmc_enable_dma() 32 host->dma->enable(host, enable); tmio_mmc_enable_dma() 35 void tmio_mmc_abort_dma(struct tmio_mmc_host *host) tmio_mmc_abort_dma() argument 37 tmio_mmc_enable_dma(host, false); tmio_mmc_abort_dma() 39 if (host->chan_rx) tmio_mmc_abort_dma() 40 dmaengine_terminate_all(host->chan_rx); tmio_mmc_abort_dma() 41 if (host->chan_tx) tmio_mmc_abort_dma() 42 dmaengine_terminate_all(host->chan_tx); tmio_mmc_abort_dma() 44 tmio_mmc_enable_dma(host, true); tmio_mmc_abort_dma() 47 static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) tmio_mmc_start_dma_rx() argument 49 struct scatterlist *sg = host->sg_ptr, *sg_tmp; tmio_mmc_start_dma_rx() 51 struct dma_chan *chan = host->chan_rx; tmio_mmc_start_dma_rx() 55 unsigned int align = (1 << host->pdata->alignment_shift) - 1; tmio_mmc_start_dma_rx() 57 for_each_sg(sg, sg_tmp, host->sg_len, i) { tmio_mmc_start_dma_rx() 66 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || tmio_mmc_start_dma_rx() 73 host->force_pio = true; tmio_mmc_start_dma_rx() 77 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_RXRDY); tmio_mmc_start_dma_rx() 81 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); tmio_mmc_start_dma_rx() 82 host->sg_ptr = &host->bounce_sg; tmio_mmc_start_dma_rx() 83 sg = host->sg_ptr; tmio_mmc_start_dma_rx() 86 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); tmio_mmc_start_dma_rx() 98 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", tmio_mmc_start_dma_rx() 99 __func__, host->sg_len, ret, cookie, host->mrq); tmio_mmc_start_dma_rx() 104 tmio_mmc_enable_dma(host, false); tmio_mmc_start_dma_rx() 107 host->chan_rx = NULL; tmio_mmc_start_dma_rx() 110 chan = host->chan_tx; tmio_mmc_start_dma_rx() 112 host->chan_tx = NULL; tmio_mmc_start_dma_rx() 115 dev_warn(&host->pdev->dev, tmio_mmc_start_dma_rx() 119 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, tmio_mmc_start_dma_rx() 120 desc, cookie, host->sg_len); tmio_mmc_start_dma_rx() 123 static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) tmio_mmc_start_dma_tx() argument 125 struct scatterlist *sg = host->sg_ptr, *sg_tmp; tmio_mmc_start_dma_tx() 127 struct dma_chan *chan = host->chan_tx; tmio_mmc_start_dma_tx() 131 unsigned int align = (1 << host->pdata->alignment_shift) - 1; tmio_mmc_start_dma_tx() 133 for_each_sg(sg, sg_tmp, host->sg_len, i) { tmio_mmc_start_dma_tx() 142 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || tmio_mmc_start_dma_tx() 149 host->force_pio = true; tmio_mmc_start_dma_tx() 153 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_TXRQ); tmio_mmc_start_dma_tx() 159 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); tmio_mmc_start_dma_tx() 160 memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length); tmio_mmc_start_dma_tx() 162 host->sg_ptr = &host->bounce_sg; tmio_mmc_start_dma_tx() 163 sg = host->sg_ptr; tmio_mmc_start_dma_tx() 166 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); tmio_mmc_start_dma_tx() 178 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", tmio_mmc_start_dma_tx() 179 __func__, host->sg_len, ret, cookie, host->mrq); tmio_mmc_start_dma_tx() 184 tmio_mmc_enable_dma(host, false); tmio_mmc_start_dma_tx() 187 host->chan_tx = NULL; tmio_mmc_start_dma_tx() 190 chan = host->chan_rx; tmio_mmc_start_dma_tx() 192 host->chan_rx = NULL; tmio_mmc_start_dma_tx() 195 dev_warn(&host->pdev->dev, tmio_mmc_start_dma_tx() 199 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__, tmio_mmc_start_dma_tx() 203 void tmio_mmc_start_dma(struct tmio_mmc_host *host, tmio_mmc_start_dma() argument 207 if (host->chan_rx) tmio_mmc_start_dma() 208 tmio_mmc_start_dma_rx(host); tmio_mmc_start_dma() 210 if (host->chan_tx) tmio_mmc_start_dma() 211 tmio_mmc_start_dma_tx(host); tmio_mmc_start_dma() 217 struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; tmio_mmc_issue_tasklet_fn() local 220 spin_lock_irq(&host->lock); tmio_mmc_issue_tasklet_fn() 222 if (host && host->data) { tmio_mmc_issue_tasklet_fn() 223 if (host->data->flags & MMC_DATA_READ) tmio_mmc_issue_tasklet_fn() 224 chan = host->chan_rx; tmio_mmc_issue_tasklet_fn() 226 chan = host->chan_tx; tmio_mmc_issue_tasklet_fn() 229 spin_unlock_irq(&host->lock); tmio_mmc_issue_tasklet_fn() 231 tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND); tmio_mmc_issue_tasklet_fn() 239 struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; tmio_mmc_tasklet_fn() local 241 spin_lock_irq(&host->lock); tmio_mmc_tasklet_fn() 243 if (!host->data) tmio_mmc_tasklet_fn() 246 if (host->data->flags & MMC_DATA_READ) tmio_mmc_tasklet_fn() 247 dma_unmap_sg(host->chan_rx->device->dev, tmio_mmc_tasklet_fn() 248 host->sg_ptr, host->sg_len, tmio_mmc_tasklet_fn() 251 dma_unmap_sg(host->chan_tx->device->dev, tmio_mmc_tasklet_fn() 252 host->sg_ptr, host->sg_len, tmio_mmc_tasklet_fn() 255 tmio_mmc_do_data_irq(host); tmio_mmc_tasklet_fn() 257 spin_unlock_irq(&host->lock); tmio_mmc_tasklet_fn() 260 void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata) tmio_mmc_request_dma() argument 263 if (!host->dma || (!host->pdev->dev.of_node && tmio_mmc_request_dma() 267 if (!host->chan_tx && !host->chan_rx) { tmio_mmc_request_dma() 268 struct resource *res = platform_get_resource(host->pdev, tmio_mmc_request_dma() 280 host->chan_tx = dma_request_slave_channel_compat(mask, tmio_mmc_request_dma() 281 host->dma->filter, pdata->chan_priv_tx, tmio_mmc_request_dma() 282 &host->pdev->dev, "tx"); tmio_mmc_request_dma() 283 dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__, tmio_mmc_request_dma() 284 host->chan_tx); tmio_mmc_request_dma() 286 if (!host->chan_tx) tmio_mmc_request_dma() 290 cfg.dst_addr = res->start + (CTL_SD_DATA_PORT << host->bus_shift); tmio_mmc_request_dma() 291 cfg.dst_addr_width = host->dma->dma_buswidth; tmio_mmc_request_dma() 295 ret = dmaengine_slave_config(host->chan_tx, &cfg); tmio_mmc_request_dma() 299 host->chan_rx = dma_request_slave_channel_compat(mask, tmio_mmc_request_dma() 300 host->dma->filter, pdata->chan_priv_rx, tmio_mmc_request_dma() 301 &host->pdev->dev, "rx"); tmio_mmc_request_dma() 302 dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__, tmio_mmc_request_dma() 303 host->chan_rx); tmio_mmc_request_dma() 305 if (!host->chan_rx) tmio_mmc_request_dma() 309 cfg.src_addr = cfg.dst_addr + host->pdata->dma_rx_offset; tmio_mmc_request_dma() 310 cfg.src_addr_width = host->dma->dma_buswidth; tmio_mmc_request_dma() 314 ret = dmaengine_slave_config(host->chan_rx, &cfg); tmio_mmc_request_dma() 318 host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA); tmio_mmc_request_dma() 319 if (!host->bounce_buf) tmio_mmc_request_dma() 322 tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host); tmio_mmc_request_dma() 323 tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host); tmio_mmc_request_dma() 326 tmio_mmc_enable_dma(host, true); tmio_mmc_request_dma() 332 dma_release_channel(host->chan_rx); tmio_mmc_request_dma() 333 host->chan_rx = NULL; tmio_mmc_request_dma() 336 dma_release_channel(host->chan_tx); tmio_mmc_request_dma() 337 host->chan_tx = NULL; tmio_mmc_request_dma() 340 void tmio_mmc_release_dma(struct tmio_mmc_host *host) tmio_mmc_release_dma() argument 342 if (host->chan_tx) { tmio_mmc_release_dma() 343 struct dma_chan *chan = host->chan_tx; tmio_mmc_release_dma() 344 host->chan_tx = NULL; tmio_mmc_release_dma() 347 if (host->chan_rx) { tmio_mmc_release_dma() 348 struct dma_chan *chan = host->chan_rx; tmio_mmc_release_dma() 349 host->chan_rx = NULL; tmio_mmc_release_dma() 352 if (host->bounce_buf) { tmio_mmc_release_dma() 353 free_pages((unsigned long)host->bounce_buf, 0); tmio_mmc_release_dma() 354 host->bounce_buf = NULL; tmio_mmc_release_dma()
|
H A D | s3cmci.c | 18 #include <linux/mmc/host.h> 135 #define dbg(host, channels, args...) \ 138 dev_err(&host->pdev->dev, args); \ 140 dev_info(&host->pdev->dev, args); \ 142 dev_dbg(&host->pdev->dev, args); \ 145 static void finalize_request(struct s3cmci_host *host); 147 static void s3cmci_reset(struct s3cmci_host *host); 151 static void dbg_dumpregs(struct s3cmci_host *host, char *prefix) dbg_dumpregs() argument 156 con = readl(host->base + S3C2410_SDICON); dbg_dumpregs() 157 pre = readl(host->base + S3C2410_SDIPRE); dbg_dumpregs() 158 cmdarg = readl(host->base + S3C2410_SDICMDARG); dbg_dumpregs() 159 cmdcon = readl(host->base + S3C2410_SDICMDCON); dbg_dumpregs() 160 cmdsta = readl(host->base + S3C2410_SDICMDSTAT); dbg_dumpregs() 161 r0 = readl(host->base + S3C2410_SDIRSP0); dbg_dumpregs() 162 r1 = readl(host->base + S3C2410_SDIRSP1); dbg_dumpregs() 163 r2 = readl(host->base + S3C2410_SDIRSP2); dbg_dumpregs() 164 r3 = readl(host->base + S3C2410_SDIRSP3); dbg_dumpregs() 165 timer = readl(host->base + S3C2410_SDITIMER); dbg_dumpregs() 166 bsize = readl(host->base + S3C2410_SDIBSIZE); dbg_dumpregs() 167 datcon = readl(host->base + S3C2410_SDIDCON); dbg_dumpregs() 168 datcnt = readl(host->base + S3C2410_SDIDCNT); dbg_dumpregs() 169 datsta = readl(host->base + S3C2410_SDIDSTA); dbg_dumpregs() 170 fsta = readl(host->base + S3C2410_SDIFSTA); dbg_dumpregs() 171 imask = readl(host->base + host->sdiimsk); dbg_dumpregs() 173 dbg(host, dbg_debug, "%s CON:[%08x] PRE:[%08x] TMR:[%08x]\n", dbg_dumpregs() 176 dbg(host, dbg_debug, "%s CCON:[%08x] CARG:[%08x] CSTA:[%08x]\n", dbg_dumpregs() 179 dbg(host, dbg_debug, "%s DCON:[%08x] FSTA:[%08x]" dbg_dumpregs() 183 dbg(host, dbg_debug, "%s R0:[%08x] R1:[%08x]" dbg_dumpregs() 188 static void prepare_dbgmsg(struct s3cmci_host *host, struct mmc_command *cmd, prepare_dbgmsg() argument 191 snprintf(host->dbgmsg_cmd, 300, prepare_dbgmsg() 193 host->ccnt, (stop ? " (STOP)" : ""), prepare_dbgmsg() 197 snprintf(host->dbgmsg_dat, 300, prepare_dbgmsg() 199 host->dcnt, cmd->data->blksz, prepare_dbgmsg() 203 host->dbgmsg_dat[0] = '\0'; prepare_dbgmsg() 207 static void dbg_dumpcmd(struct s3cmci_host *host, struct mmc_command *cmd, dbg_dumpcmd() argument 216 dbg(host, dbglvl, "CMD[OK] %s R0:0x%08x\n", dbg_dumpcmd() 217 host->dbgmsg_cmd, cmd->resp[0]); dbg_dumpcmd() 219 dbg(host, dbglvl, "CMD[ERR %i] %s Status:%s\n", dbg_dumpcmd() 220 cmd->error, host->dbgmsg_cmd, host->status); dbg_dumpcmd() 227 dbg(host, dbglvl, "DAT[OK] %s\n", host->dbgmsg_dat); dbg_dumpcmd() 229 dbg(host, dbglvl, "DAT[ERR %i] %s DCNT:0x%08x\n", dbg_dumpcmd() 230 cmd->data->error, host->dbgmsg_dat, dbg_dumpcmd() 231 readl(host->base + S3C2410_SDIDCNT)); dbg_dumpcmd() 235 static void dbg_dumpcmd(struct s3cmci_host *host, dbg_dumpcmd() argument 238 static void prepare_dbgmsg(struct s3cmci_host *host, struct mmc_command *cmd, prepare_dbgmsg() argument 241 static void dbg_dumpregs(struct s3cmci_host *host, char *prefix) { } dbg_dumpregs() argument 246 * s3cmci_host_usedma - return whether the host is using dma or pio dbg_dumpregs() 247 * @host: The host state dbg_dumpregs() 249 * Return true if the host is using DMA to transfer data, else false dbg_dumpregs() 253 static inline bool s3cmci_host_usedma(struct s3cmci_host *host) s3cmci_host_usedma() argument 262 static inline u32 enable_imask(struct s3cmci_host *host, u32 imask) enable_imask() argument 266 newmask = readl(host->base + host->sdiimsk); enable_imask() 269 writel(newmask, host->base + host->sdiimsk); enable_imask() 274 static inline u32 disable_imask(struct s3cmci_host *host, u32 imask) disable_imask() argument 278 newmask = readl(host->base + host->sdiimsk); disable_imask() 281 writel(newmask, host->base + host->sdiimsk); disable_imask() 286 static inline void clear_imask(struct s3cmci_host *host) clear_imask() argument 288 u32 mask = readl(host->base + host->sdiimsk); clear_imask() 292 writel(mask, host->base + host->sdiimsk); clear_imask() 297 * @host: The host to check. 307 static void s3cmci_check_sdio_irq(struct s3cmci_host *host) s3cmci_check_sdio_irq() argument 309 if (host->sdio_irqen) { s3cmci_check_sdio_irq() 312 mmc_signal_sdio_irq(host->mmc); s3cmci_check_sdio_irq() 317 static inline int get_data_buffer(struct s3cmci_host *host, get_data_buffer() argument 322 if (host->pio_active == XFER_NONE) get_data_buffer() 325 if ((!host->mrq) || (!host->mrq->data)) get_data_buffer() 328 if (host->pio_sgptr >= host->mrq->data->sg_len) { get_data_buffer() 329 dbg(host, dbg_debug, "no more buffers (%i/%i)\n", get_data_buffer() 330 host->pio_sgptr, host->mrq->data->sg_len); get_data_buffer() 333 sg = &host->mrq->data->sg[host->pio_sgptr]; get_data_buffer() 338 host->pio_sgptr++; get_data_buffer() 340 dbg(host, dbg_sg, "new buffer (%i/%i)\n", get_data_buffer() 341 host->pio_sgptr, host->mrq->data->sg_len); get_data_buffer() 346 static inline u32 fifo_count(struct s3cmci_host *host) fifo_count() argument 348 u32 fifostat = readl(host->base + S3C2410_SDIFSTA); fifo_count() 354 static inline u32 fifo_free(struct s3cmci_host *host) fifo_free() argument 356 u32 fifostat = readl(host->base + S3C2410_SDIFSTA); fifo_free() 364 * @host: The device state. 375 static void s3cmci_enable_irq(struct s3cmci_host *host, bool more) s3cmci_enable_irq() argument 382 host->irq_enabled = more; s3cmci_enable_irq() 383 host->irq_disabled = false; s3cmci_enable_irq() 385 enable = more | host->sdio_irqen; s3cmci_enable_irq() 387 if (host->irq_state != enable) { s3cmci_enable_irq() 388 host->irq_state = enable; s3cmci_enable_irq() 391 enable_irq(host->irq); s3cmci_enable_irq() 393 disable_irq(host->irq); s3cmci_enable_irq() 402 static void s3cmci_disable_irq(struct s3cmci_host *host, bool transfer) s3cmci_disable_irq() argument 410 host->irq_disabled = transfer; s3cmci_disable_irq() 412 if (transfer && host->irq_state) { s3cmci_disable_irq() 413 host->irq_state = false; s3cmci_disable_irq() 414 disable_irq(host->irq); s3cmci_disable_irq() 420 static void do_pio_read(struct s3cmci_host *host) do_pio_read() argument 428 /* write real prescaler to host, it might be set slow to fix */ do_pio_read() 429 writel(host->prescaler, host->base + S3C2410_SDIPRE); do_pio_read() 431 from_ptr = host->base + host->sdidata; do_pio_read() 433 while ((fifo = fifo_count(host))) { do_pio_read() 434 if (!host->pio_bytes) { do_pio_read() 435 res = get_data_buffer(host, &host->pio_bytes, do_pio_read() 436 &host->pio_ptr); do_pio_read() 438 host->pio_active = XFER_NONE; do_pio_read() 439 host->complete_what = COMPLETION_FINALIZE; do_pio_read() 441 dbg(host, dbg_pio, "pio_read(): " do_pio_read() 446 dbg(host, dbg_pio, do_pio_read() 448 host->pio_bytes, host->pio_ptr); do_pio_read() 451 dbg(host, dbg_pio, do_pio_read() 453 fifo, host->pio_bytes, do_pio_read() 454 readl(host->base + S3C2410_SDIDCNT)); do_pio_read() 461 if (fifo >= host->pio_bytes) do_pio_read() 462 fifo = host->pio_bytes; do_pio_read() 466 host->pio_bytes -= fifo; do_pio_read() 467 host->pio_count += fifo; do_pio_read() 470 ptr = host->pio_ptr; do_pio_read() 473 host->pio_ptr = ptr; do_pio_read() 478 u8 *p = (u8 *)host->pio_ptr; do_pio_read() 487 if (!host->pio_bytes) { do_pio_read() 488 res = get_data_buffer(host, &host->pio_bytes, &host->pio_ptr); do_pio_read() 490 dbg(host, dbg_pio, do_pio_read() 492 host->pio_active = XFER_NONE; do_pio_read() 493 host->complete_what = COMPLETION_FINALIZE; do_pio_read() 499 enable_imask(host, do_pio_read() 503 static void do_pio_write(struct s3cmci_host *host) do_pio_write() argument 510 to_ptr = host->base + host->sdidata; do_pio_write() 512 while ((fifo = fifo_free(host)) > 3) { do_pio_write() 513 if (!host->pio_bytes) { do_pio_write() 514 res = get_data_buffer(host, &host->pio_bytes, do_pio_write() 515 &host->pio_ptr); do_pio_write() 517 dbg(host, dbg_pio, do_pio_write() 519 host->pio_active = XFER_NONE; do_pio_write() 524 dbg(host, dbg_pio, do_pio_write() 526 host->pio_bytes, host->pio_ptr); do_pio_write() 534 if (fifo >= host->pio_bytes) do_pio_write() 535 fifo = host->pio_bytes; do_pio_write() 539 host->pio_bytes -= fifo; do_pio_write() 540 host->pio_count += fifo; do_pio_write() 543 ptr = host->pio_ptr; do_pio_write() 546 host->pio_ptr = ptr; do_pio_write() 549 enable_imask(host, S3C2410_SDIIMSK_TXFIFOHALF); do_pio_write() 554 struct s3cmci_host *host = (struct s3cmci_host *) data; pio_tasklet() local 556 s3cmci_disable_irq(host, true); pio_tasklet() 558 if (host->pio_active == XFER_WRITE) pio_tasklet() 559 do_pio_write(host); pio_tasklet() 561 if (host->pio_active == XFER_READ) pio_tasklet() 562 do_pio_read(host); pio_tasklet() 564 if (host->complete_what == COMPLETION_FINALIZE) { pio_tasklet() 565 clear_imask(host); pio_tasklet() 566 if (host->pio_active != XFER_NONE) { pio_tasklet() 567 dbg(host, dbg_err, "unfinished %s " pio_tasklet() 569 (host->pio_active == XFER_READ) ? "read" : "write", pio_tasklet() 570 host->pio_count, host->pio_bytes); pio_tasklet() 572 if (host->mrq->data) pio_tasklet() 573 host->mrq->data->error = -EINVAL; pio_tasklet() 576 s3cmci_enable_irq(host, false); pio_tasklet() 577 finalize_request(host); pio_tasklet() 579 s3cmci_enable_irq(host, true); pio_tasklet() 585 * host->mrq points to current request 586 * host->complete_what Indicates when the request is considered done 591 * host->complete_request is the completion-object the driver waits for 593 * 1) Driver sets up host->mrq and host->complete_what 597 * 5) Driver waits for host->complete_rquest 599 * 6) ISR sets host->mrq->cmd->error and host->mrq->data->error 600 * 7) ISR completes host->complete_request 611 struct s3cmci_host *host = dev_id; s3cmci_irq() local 617 mci_dsta = readl(host->base + S3C2410_SDIDSTA); s3cmci_irq() 618 mci_imsk = readl(host->base + host->sdiimsk); s3cmci_irq() 623 writel(mci_dclear, host->base + S3C2410_SDIDSTA); s3cmci_irq() 625 mmc_signal_sdio_irq(host->mmc); s3cmci_irq() 630 spin_lock_irqsave(&host->complete_lock, iflags); s3cmci_irq() 632 mci_csta = readl(host->base + S3C2410_SDICMDSTAT); s3cmci_irq() 633 mci_dcnt = readl(host->base + S3C2410_SDIDCNT); s3cmci_irq() 634 mci_fsta = readl(host->base + S3C2410_SDIFSTA); s3cmci_irq() 637 if ((host->complete_what == COMPLETION_NONE) || s3cmci_irq() 638 (host->complete_what == COMPLETION_FINALIZE)) { s3cmci_irq() 639 host->status = "nothing to complete"; s3cmci_irq() 640 clear_imask(host); s3cmci_irq() 644 if (!host->mrq) { s3cmci_irq() 645 host->status = "no active mrq"; s3cmci_irq() 646 clear_imask(host); s3cmci_irq() 650 cmd = host->cmd_is_stop ? host->mrq->stop : host->mrq->cmd; s3cmci_irq() 653 host->status = "no active cmd"; s3cmci_irq() 654 clear_imask(host); s3cmci_irq() 658 if (!s3cmci_host_usedma(host)) { s3cmci_irq() 659 if ((host->pio_active == XFER_WRITE) && s3cmci_irq() 662 disable_imask(host, S3C2410_SDIIMSK_TXFIFOHALF); s3cmci_irq() 663 tasklet_schedule(&host->pio_tasklet); s3cmci_irq() 664 host->status = "pio tx"; s3cmci_irq() 667 if ((host->pio_active == XFER_READ) && s3cmci_irq() 670 disable_imask(host, s3cmci_irq() 674 tasklet_schedule(&host->pio_tasklet); s3cmci_irq() 675 host->status = "pio rx"; s3cmci_irq() 680 dbg(host, dbg_err, "CMDSTAT: error CMDTIMEOUT\n"); s3cmci_irq() 682 host->status = "error: command timeout"; s3cmci_irq() 687 if (host->complete_what == COMPLETION_CMDSENT) { s3cmci_irq() 688 host->status = "ok: command sent"; s3cmci_irq() 697 if (host->mrq->cmd->flags & MMC_RSP_136) { s3cmci_irq() 698 dbg(host, dbg_irq, s3cmci_irq() 706 * host->status = "error: bad command crc"; s3cmci_irq() 716 if (host->complete_what == COMPLETION_RSPFIN) { s3cmci_irq() 717 host->status = "ok: command response received"; s3cmci_irq() 721 if (host->complete_what == COMPLETION_XFERFINISH_RSPFIN) s3cmci_irq() 722 host->complete_what = COMPLETION_XFERFINISH; s3cmci_irq() 734 if (host->is2440) { s3cmci_irq() 736 dbg(host, dbg_err, "FIFO failure\n"); s3cmci_irq() 737 host->mrq->data->error = -EILSEQ; s3cmci_irq() 738 host->status = "error: 2440 fifo failure"; s3cmci_irq() 743 dbg(host, dbg_err, "FIFO failure\n"); s3cmci_irq() 745 host->status = "error: fifo failure"; s3cmci_irq() 751 dbg(host, dbg_err, "bad data crc (outgoing)\n"); s3cmci_irq() 753 host->status = "error: bad data crc (outgoing)"; s3cmci_irq() 758 dbg(host, dbg_err, "bad data crc (incoming)\n"); s3cmci_irq() 760 host->status = "error: bad data crc (incoming)"; s3cmci_irq() 765 dbg(host, dbg_err, "data timeout\n"); s3cmci_irq() 767 host->status = "error: data timeout"; s3cmci_irq() 772 if (host->complete_what == COMPLETION_XFERFINISH) { s3cmci_irq() 773 host->status = "ok: data transfer completed"; s3cmci_irq() 777 if (host->complete_what == COMPLETION_XFERFINISH_RSPFIN) s3cmci_irq() 778 host->complete_what = COMPLETION_RSPFIN; s3cmci_irq() 784 writel(mci_cclear, host->base + S3C2410_SDICMDSTAT); s3cmci_irq() 785 writel(mci_dclear, host->base + S3C2410_SDIDSTA); s3cmci_irq() 790 host->pio_active = XFER_NONE; s3cmci_irq() 793 host->complete_what = COMPLETION_FINALIZE; s3cmci_irq() 795 clear_imask(host); s3cmci_irq() 796 tasklet_schedule(&host->pio_tasklet); s3cmci_irq() 801 dbg(host, dbg_irq, s3cmci_irq() 803 mci_csta, mci_dsta, mci_fsta, mci_dcnt, host->status); s3cmci_irq() 805 spin_unlock_irqrestore(&host->complete_lock, iflags); s3cmci_irq() 816 struct s3cmci_host *host = (struct s3cmci_host *)dev_id; s3cmci_irq_cd() local 818 dbg(host, dbg_irq, "card detect\n"); s3cmci_irq_cd() 820 mmc_detect_change(host->mmc, msecs_to_jiffies(500)); s3cmci_irq_cd() 827 struct s3cmci_host *host = arg; s3cmci_dma_done_callback() local 830 BUG_ON(!host->mrq); s3cmci_dma_done_callback() 831 BUG_ON(!host->mrq->data); s3cmci_dma_done_callback() 833 spin_lock_irqsave(&host->complete_lock, iflags); s3cmci_dma_done_callback() 835 dbg(host, dbg_dma, "DMA FINISHED\n"); s3cmci_dma_done_callback() 837 host->dma_complete = 1; s3cmci_dma_done_callback() 838 host->complete_what = COMPLETION_FINALIZE; s3cmci_dma_done_callback() 840 tasklet_schedule(&host->pio_tasklet); s3cmci_dma_done_callback() 841 spin_unlock_irqrestore(&host->complete_lock, iflags); s3cmci_dma_done_callback() 845 static void finalize_request(struct s3cmci_host *host) finalize_request() argument 847 struct mmc_request *mrq = host->mrq; finalize_request() 851 if (host->complete_what != COMPLETION_FINALIZE) finalize_request() 856 cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd; finalize_request() 860 if (s3cmci_host_usedma(host) && (!host->dma_complete)) { finalize_request() 861 dbg(host, dbg_dma, "DMA Missing (%d)!\n", finalize_request() 862 host->dma_complete); finalize_request() 868 cmd->resp[0] = readl(host->base + S3C2410_SDIRSP0); finalize_request() 869 cmd->resp[1] = readl(host->base + S3C2410_SDIRSP1); finalize_request() 870 cmd->resp[2] = readl(host->base + S3C2410_SDIRSP2); finalize_request() 871 cmd->resp[3] = readl(host->base + S3C2410_SDIRSP3); finalize_request() 873 writel(host->prescaler, host->base + S3C2410_SDIPRE); finalize_request() 881 dbg_dumpcmd(host, cmd, debug_as_failure); finalize_request() 884 writel(0, host->base + S3C2410_SDICMDARG); finalize_request() 885 writel(S3C2410_SDIDCON_STOP, host->base + S3C2410_SDIDCON); finalize_request() 886 writel(0, host->base + S3C2410_SDICMDCON); finalize_request() 887 clear_imask(host); finalize_request() 892 if (cmd->data && cmd->data->stop && (!host->cmd_is_stop)) { finalize_request() 893 host->cmd_is_stop = 1; finalize_request() 894 s3cmci_send_request(host->mmc); finalize_request() 913 if (s3cmci_host_usedma(host)) finalize_request() 914 dmaengine_terminate_all(host->dma); finalize_request() 916 if (host->is2440) { finalize_request() 920 host->base + S3C2410_SDIFSTA); finalize_request() 925 mci_con = readl(host->base + S3C2410_SDICON); finalize_request() 928 writel(mci_con, host->base + S3C2410_SDICON); finalize_request() 933 host->complete_what = COMPLETION_NONE; finalize_request() 934 host->mrq = NULL; finalize_request() 936 s3cmci_check_sdio_irq(host); finalize_request() 937 mmc_request_done(host->mmc, mrq); finalize_request() 940 static void s3cmci_send_command(struct s3cmci_host *host, s3cmci_send_command() argument 949 enable_imask(host, imsk); s3cmci_send_command() 952 host->complete_what = COMPLETION_XFERFINISH_RSPFIN; s3cmci_send_command() 954 host->complete_what = COMPLETION_RSPFIN; s3cmci_send_command() 956 host->complete_what = COMPLETION_CMDSENT; s3cmci_send_command() 958 writel(cmd->arg, host->base + S3C2410_SDICMDARG); s3cmci_send_command() 969 writel(ccon, host->base + S3C2410_SDICMDCON); s3cmci_send_command() 972 static int s3cmci_setup_data(struct s3cmci_host *host, struct mmc_data *data) s3cmci_setup_data() argument 979 writel(0, host->base + S3C2410_SDIDCON); s3cmci_setup_data() 994 while (readl(host->base + S3C2410_SDIDSTA) & s3cmci_setup_data() 997 dbg(host, dbg_err, s3cmci_setup_data() 1000 writel(S3C2410_SDIDCON_STOP, host->base + S3C2410_SDIDCON); s3cmci_setup_data() 1001 s3cmci_reset(host); s3cmci_setup_data() 1004 dbg_dumpregs(host, "DRF"); s3cmci_setup_data() 1011 if (s3cmci_host_usedma(host)) s3cmci_setup_data() 1014 if (host->bus_width == MMC_BUS_WIDTH_4) s3cmci_setup_data() 1030 if (host->is2440) { s3cmci_setup_data() 1035 writel(dcon, host->base + S3C2410_SDIDCON); s3cmci_setup_data() 1039 writel(data->blksz, host->base + S3C2410_SDIBSIZE); s3cmci_setup_data() 1045 enable_imask(host, imsk); s3cmci_setup_data() 1049 if (host->is2440) { s3cmci_setup_data() 1050 writel(0x007FFFFF, host->base + S3C2410_SDITIMER); s3cmci_setup_data() 1052 writel(0x0000FFFF, host->base + S3C2410_SDITIMER); s3cmci_setup_data() 1056 writel(0xFF, host->base + S3C2410_SDIPRE); s3cmci_setup_data() 1064 static int s3cmci_prepare_pio(struct s3cmci_host *host, struct mmc_data *data) s3cmci_prepare_pio() argument 1070 host->pio_sgptr = 0; s3cmci_prepare_pio() 1071 host->pio_bytes = 0; s3cmci_prepare_pio() 1072 host->pio_count = 0; s3cmci_prepare_pio() 1073 host->pio_active = rw ? XFER_WRITE : XFER_READ; s3cmci_prepare_pio() 1076 do_pio_write(host); s3cmci_prepare_pio() 1077 enable_imask(host, S3C2410_SDIIMSK_TXFIFOHALF); s3cmci_prepare_pio() 1079 enable_imask(host, S3C2410_SDIIMSK_RXFIFOHALF s3cmci_prepare_pio() 1086 static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data) s3cmci_prepare_dma() argument 1091 .src_addr = host->mem->start + host->sdidata, s3cmci_prepare_dma() 1092 .dst_addr = host->mem->start + host->sdidata, s3cmci_prepare_dma() 1100 writel(host->prescaler, host->base + S3C2410_SDIPRE); s3cmci_prepare_dma() 1107 dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, s3cmci_prepare_dma() 1110 dmaengine_slave_config(host->dma, &conf); s3cmci_prepare_dma() 1111 desc = dmaengine_prep_slave_sg(host->dma, data->sg, data->sg_len, s3cmci_prepare_dma() 1117 desc->callback_param = host; s3cmci_prepare_dma() 1119 dma_async_issue_pending(host->dma); s3cmci_prepare_dma() 1124 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, s3cmci_prepare_dma() 1131 struct s3cmci_host *host = mmc_priv(mmc); s3cmci_send_request() local 1132 struct mmc_request *mrq = host->mrq; s3cmci_send_request() 1133 struct mmc_command *cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd; s3cmci_send_request() 1135 host->ccnt++; s3cmci_send_request() 1136 prepare_dbgmsg(host, cmd, host->cmd_is_stop); s3cmci_send_request() 1141 writel(0xFFFFFFFF, host->base + S3C2410_SDICMDSTAT); s3cmci_send_request() 1142 writel(0xFFFFFFFF, host->base + S3C2410_SDIDSTA); s3cmci_send_request() 1143 writel(0xFFFFFFFF, host->base + S3C2410_SDIFSTA); s3cmci_send_request() 1146 int res = s3cmci_setup_data(host, cmd->data); s3cmci_send_request() 1148 host->dcnt++; s3cmci_send_request() 1151 dbg(host, dbg_err, "setup data error %d\n", res); s3cmci_send_request() 1159 if (s3cmci_host_usedma(host)) s3cmci_send_request() 1160 res = s3cmci_prepare_dma(host, cmd->data); s3cmci_send_request() 1162 res = s3cmci_prepare_pio(host, cmd->data); s3cmci_send_request() 1165 dbg(host, dbg_err, "data prepare error %d\n", res); s3cmci_send_request() 1175 s3cmci_send_command(host, cmd); s3cmci_send_request() 1178 s3cmci_enable_irq(host, true); s3cmci_send_request() 1183 struct s3cmci_host *host = mmc_priv(mmc); s3cmci_card_present() local 1184 struct s3c24xx_mci_pdata *pdata = host->pdata; s3cmci_card_present() 1196 struct s3cmci_host *host = mmc_priv(mmc); s3cmci_request() local 1198 host->status = "mmc request"; s3cmci_request() 1199 host->cmd_is_stop = 0; s3cmci_request() 1200 host->mrq = mrq; s3cmci_request() 1203 dbg(host, dbg_err, "%s: no medium present\n", __func__); s3cmci_request() 1204 host->mrq->cmd->error = -ENOMEDIUM; s3cmci_request() 1210 static void s3cmci_set_clk(struct s3cmci_host *host, struct mmc_ios *ios) s3cmci_set_clk() argument 1216 host->real_rate = host->clk_rate / (host->clk_div*(mci_psc+1)); s3cmci_set_clk() 1218 if (host->real_rate <= ios->clock) s3cmci_set_clk() 1225 host->prescaler = mci_psc; s3cmci_set_clk() 1226 writel(host->prescaler, host->base + S3C2410_SDIPRE); s3cmci_set_clk() 1230 host->real_rate = 0; s3cmci_set_clk() 1235 struct s3cmci_host *host = mmc_priv(mmc); s3cmci_set_ios() local 1240 mci_con = readl(host->base + S3C2410_SDICON); s3cmci_set_ios() 1249 if (host->pdata->set_power) s3cmci_set_ios() 1250 host->pdata->set_power(ios->power_mode, ios->vdd); s3cmci_set_ios() 1252 if (!host->is2440) s3cmci_set_ios() 1261 if (host->is2440) s3cmci_set_ios() 1264 if (host->pdata->set_power) s3cmci_set_ios() 1265 host->pdata->set_power(ios->power_mode, ios->vdd); s3cmci_set_ios() 1270 s3cmci_set_clk(host, ios); s3cmci_set_ios() 1278 writel(mci_con, host->base + S3C2410_SDICON); s3cmci_set_ios() 1282 dbg(host, dbg_conf, "running at %lukHz (requested: %ukHz).\n", s3cmci_set_ios() 1283 host->real_rate/1000, ios->clock/1000); s3cmci_set_ios() 1285 dbg(host, dbg_conf, "powered down.\n"); s3cmci_set_ios() 1288 host->bus_width = ios->bus_width; s3cmci_set_ios() 1291 static void s3cmci_reset(struct s3cmci_host *host) s3cmci_reset() argument 1293 u32 con = readl(host->base + S3C2410_SDICON); s3cmci_reset() 1296 writel(con, host->base + S3C2410_SDICON); s3cmci_reset() 1301 struct s3cmci_host *host = mmc_priv(mmc); s3cmci_get_ro() local 1302 struct s3c24xx_mci_pdata *pdata = host->pdata; s3cmci_get_ro() 1316 struct s3cmci_host *host = mmc_priv(mmc); s3cmci_enable_sdio_irq() local 1322 con = readl(host->base + S3C2410_SDICON); s3cmci_enable_sdio_irq() 1323 host->sdio_irqen = enable; s3cmci_enable_sdio_irq() 1325 if (enable == host->sdio_irqen) s3cmci_enable_sdio_irq() 1330 enable_imask(host, S3C2410_SDIIMSK_SDIOIRQ); s3cmci_enable_sdio_irq() 1332 if (!host->irq_state && !host->irq_disabled) { s3cmci_enable_sdio_irq() 1333 host->irq_state = true; s3cmci_enable_sdio_irq() 1334 enable_irq(host->irq); s3cmci_enable_sdio_irq() 1337 disable_imask(host, S3C2410_SDIIMSK_SDIOIRQ); s3cmci_enable_sdio_irq() 1340 if (!host->irq_enabled && host->irq_state) { s3cmci_enable_sdio_irq() 1341 disable_irq_nosync(host->irq); s3cmci_enable_sdio_irq() 1342 host->irq_state = false; s3cmci_enable_sdio_irq() 1346 writel(con, host->base + S3C2410_SDICON); s3cmci_enable_sdio_irq() 1351 s3cmci_check_sdio_irq(host); s3cmci_enable_sdio_irq() 1363 /* This is currently here to avoid a number of if (host->pdata) 1374 struct s3cmci_host *host; s3cmci_cpufreq_transition() local 1379 host = container_of(nb, struct s3cmci_host, freq_transition); s3cmci_cpufreq_transition() 1380 newclk = clk_get_rate(host->clk); s3cmci_cpufreq_transition() 1381 mmc = host->mmc; s3cmci_cpufreq_transition() 1383 if ((val == CPUFREQ_PRECHANGE && newclk > host->clk_rate) || s3cmci_cpufreq_transition() 1384 (val == CPUFREQ_POSTCHANGE && newclk < host->clk_rate)) { s3cmci_cpufreq_transition() 1387 host->clk_rate = newclk; s3cmci_cpufreq_transition() 1391 s3cmci_set_clk(host, &mmc->ios); s3cmci_cpufreq_transition() 1399 static inline int s3cmci_cpufreq_register(struct s3cmci_host *host) s3cmci_cpufreq_register() argument 1401 host->freq_transition.notifier_call = s3cmci_cpufreq_transition; s3cmci_cpufreq_register() 1403 return cpufreq_register_notifier(&host->freq_transition, s3cmci_cpufreq_register() 1407 static inline void s3cmci_cpufreq_deregister(struct s3cmci_host *host) s3cmci_cpufreq_deregister() argument 1409 cpufreq_unregister_notifier(&host->freq_transition, s3cmci_cpufreq_deregister() 1414 static inline int s3cmci_cpufreq_register(struct s3cmci_host *host) s3cmci_cpufreq_register() argument 1419 static inline void s3cmci_cpufreq_deregister(struct s3cmci_host *host) s3cmci_cpufreq_deregister() argument 1429 struct s3cmci_host *host = seq->private; s3cmci_state_show() local 1431 seq_printf(seq, "Register base = 0x%08x\n", (u32)host->base); s3cmci_state_show() 1432 seq_printf(seq, "Clock rate = %ld\n", host->clk_rate); s3cmci_state_show() 1433 seq_printf(seq, "Prescale = %d\n", host->prescaler); s3cmci_state_show() 1434 seq_printf(seq, "is2440 = %d\n", host->is2440); s3cmci_state_show() 1435 seq_printf(seq, "IRQ = %d\n", host->irq); s3cmci_state_show() 1436 seq_printf(seq, "IRQ enabled = %d\n", host->irq_enabled); s3cmci_state_show() 1437 seq_printf(seq, "IRQ disabled = %d\n", host->irq_disabled); s3cmci_state_show() 1438 seq_printf(seq, "IRQ state = %d\n", host->irq_state); s3cmci_state_show() 1439 seq_printf(seq, "CD IRQ = %d\n", host->irq_cd); s3cmci_state_show() 1440 seq_printf(seq, "Do DMA = %d\n", s3cmci_host_usedma(host)); s3cmci_state_show() 1441 seq_printf(seq, "SDIIMSK at %d\n", host->sdiimsk); s3cmci_state_show() 1442 seq_printf(seq, "SDIDATA at %d\n", host->sdidata); s3cmci_state_show() 1486 struct s3cmci_host *host = seq->private; s3cmci_regs_show() local 1491 readl(host->base + rptr->addr)); s3cmci_regs_show() 1493 seq_printf(seq, "SDIIMSK\t=0x%08x\n", readl(host->base + host->sdiimsk)); s3cmci_regs_show() 1511 static void s3cmci_debugfs_attach(struct s3cmci_host *host) s3cmci_debugfs_attach() argument 1513 struct device *dev = &host->pdev->dev; s3cmci_debugfs_attach() 1515 host->debug_root = debugfs_create_dir(dev_name(dev), NULL); s3cmci_debugfs_attach() 1516 if (IS_ERR(host->debug_root)) { s3cmci_debugfs_attach() 1521 host->debug_state = debugfs_create_file("state", 0444, s3cmci_debugfs_attach() 1522 host->debug_root, host, s3cmci_debugfs_attach() 1525 if (IS_ERR(host->debug_state)) s3cmci_debugfs_attach() 1528 host->debug_regs = debugfs_create_file("regs", 0444, s3cmci_debugfs_attach() 1529 host->debug_root, host, s3cmci_debugfs_attach() 1532 if (IS_ERR(host->debug_regs)) s3cmci_debugfs_attach() 1536 static void s3cmci_debugfs_remove(struct s3cmci_host *host) s3cmci_debugfs_remove() argument 1538 debugfs_remove(host->debug_regs); s3cmci_debugfs_remove() 1539 debugfs_remove(host->debug_state); s3cmci_debugfs_remove() 1540 debugfs_remove(host->debug_root); s3cmci_debugfs_remove() 1544 static inline void s3cmci_debugfs_attach(struct s3cmci_host *host) { } s3cmci_debugfs_remove() argument 1545 static inline void s3cmci_debugfs_remove(struct s3cmci_host *host) { } s3cmci_debugfs_remove() argument 1551 struct s3cmci_host *host; s3cmci_probe() local 1577 host = mmc_priv(mmc); s3cmci_probe() 1578 host->mmc = mmc; s3cmci_probe() 1579 host->pdev = pdev; s3cmci_probe() 1580 host->is2440 = is2440; s3cmci_probe() 1582 host->pdata = pdev->dev.platform_data; s3cmci_probe() 1583 if (!host->pdata) { s3cmci_probe() 1585 host->pdata = &s3cmci_def_pdata; s3cmci_probe() 1588 spin_lock_init(&host->complete_lock); s3cmci_probe() 1589 tasklet_init(&host->pio_tasklet, pio_tasklet, (unsigned long) host); s3cmci_probe() 1592 host->sdiimsk = S3C2440_SDIIMSK; s3cmci_probe() 1593 host->sdidata = S3C2440_SDIDATA; s3cmci_probe() 1594 host->clk_div = 1; s3cmci_probe() 1596 host->sdiimsk = S3C2410_SDIIMSK; s3cmci_probe() 1597 host->sdidata = S3C2410_SDIDATA; s3cmci_probe() 1598 host->clk_div = 2; s3cmci_probe() 1601 host->complete_what = COMPLETION_NONE; s3cmci_probe() 1602 host->pio_active = XFER_NONE; s3cmci_probe() 1604 host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); s3cmci_probe() 1605 if (!host->mem) { s3cmci_probe() 1613 host->mem = request_mem_region(host->mem->start, s3cmci_probe() 1614 resource_size(host->mem), pdev->name); s3cmci_probe() 1616 if (!host->mem) { s3cmci_probe() 1622 host->base = ioremap(host->mem->start, resource_size(host->mem)); s3cmci_probe() 1623 if (!host->base) { s3cmci_probe() 1629 host->irq = platform_get_irq(pdev, 0); s3cmci_probe() 1630 if (host->irq == 0) { s3cmci_probe() 1636 if (request_irq(host->irq, s3cmci_irq, 0, DRIVER_NAME, host)) { s3cmci_probe() 1646 disable_irq(host->irq); s3cmci_probe() 1647 host->irq_state = false; s3cmci_probe() 1649 if (!host->pdata->no_detect) { s3cmci_probe() 1650 ret = gpio_request(host->pdata->gpio_detect, "s3cmci detect"); s3cmci_probe() 1656 host->irq_cd = gpio_to_irq(host->pdata->gpio_detect); s3cmci_probe() 1658 if (host->irq_cd >= 0) { s3cmci_probe() 1659 if (request_irq(host->irq_cd, s3cmci_irq_cd, s3cmci_probe() 1662 DRIVER_NAME, host)) { s3cmci_probe() 1670 "host detect has no irq available\n"); s3cmci_probe() 1671 gpio_direction_input(host->pdata->gpio_detect); s3cmci_probe() 1674 host->irq_cd = -1; s3cmci_probe() 1676 if (!host->pdata->no_wprotect) { s3cmci_probe() 1677 ret = gpio_request(host->pdata->gpio_wprotect, "s3cmci wp"); s3cmci_probe() 1683 gpio_direction_input(host->pdata->gpio_wprotect); s3cmci_probe() 1688 if (s3cmci_host_usedma(host)) { s3cmci_probe() 1694 host->dma = dma_request_slave_channel_compat(mask, s3cmci_probe() 1696 if (!host->dma) { s3cmci_probe() 1703 host->clk = clk_get(&pdev->dev, "sdi"); s3cmci_probe() 1704 if (IS_ERR(host->clk)) { s3cmci_probe() 1706 ret = PTR_ERR(host->clk); s3cmci_probe() 1707 host->clk = NULL; s3cmci_probe() 1711 ret = clk_prepare_enable(host->clk); s3cmci_probe() 1717 host->clk_rate = clk_get_rate(host->clk); s3cmci_probe() 1726 mmc->f_min = host->clk_rate / (host->clk_div * 256); s3cmci_probe() 1727 mmc->f_max = host->clk_rate / host->clk_div; s3cmci_probe() 1729 if (host->pdata->ocr_avail) s3cmci_probe() 1730 mmc->ocr_avail = host->pdata->ocr_avail; s3cmci_probe() 1739 dbg(host, dbg_debug, s3cmci_probe() 1741 (host->is2440?"2440":""), s3cmci_probe() 1742 host->base, host->irq, host->irq_cd, host->dma); s3cmci_probe() 1744 ret = s3cmci_cpufreq_register(host); s3cmci_probe() 1752 dev_err(&pdev->dev, "failed to add mmc host.\n"); s3cmci_probe() 1756 s3cmci_debugfs_attach(host); s3cmci_probe() 1760 s3cmci_host_usedma(host) ? "dma" : "pio", s3cmci_probe() 1766 s3cmci_cpufreq_deregister(host); s3cmci_probe() 1769 clk_disable_unprepare(host->clk); s3cmci_probe() 1772 clk_put(host->clk); s3cmci_probe() 1775 if (s3cmci_host_usedma(host)) s3cmci_probe() 1776 dma_release_channel(host->dma); s3cmci_probe() 1779 if (!host->pdata->no_wprotect) s3cmci_probe() 1780 gpio_free(host->pdata->gpio_wprotect); s3cmci_probe() 1783 if (!host->pdata->no_detect) s3cmci_probe() 1784 gpio_free(host->pdata->gpio_detect); s3cmci_probe() 1787 if (host->irq_cd >= 0) s3cmci_probe() 1788 free_irq(host->irq_cd, host); s3cmci_probe() 1791 free_irq(host->irq, host); s3cmci_probe() 1794 iounmap(host->base); s3cmci_probe() 1797 release_mem_region(host->mem->start, resource_size(host->mem)); s3cmci_probe() 1813 struct s3cmci_host *host = mmc_priv(mmc); s3cmci_shutdown() local 1815 if (host->irq_cd >= 0) s3cmci_shutdown() 1816 free_irq(host->irq_cd, host); s3cmci_shutdown() 1818 s3cmci_debugfs_remove(host); s3cmci_shutdown() 1819 s3cmci_cpufreq_deregister(host); s3cmci_shutdown() 1821 clk_disable_unprepare(host->clk); s3cmci_shutdown() 1827 struct s3cmci_host *host = mmc_priv(mmc); s3cmci_remove() local 1828 struct s3c24xx_mci_pdata *pd = host->pdata; s3cmci_remove() 1833 clk_put(host->clk); s3cmci_remove() 1835 tasklet_disable(&host->pio_tasklet); s3cmci_remove() 1837 if (s3cmci_host_usedma(host)) s3cmci_remove() 1838 dma_release_channel(host->dma); s3cmci_remove() 1840 free_irq(host->irq, host); s3cmci_remove() 1852 iounmap(host->base); s3cmci_remove() 1853 release_mem_region(host->mem->start, resource_size(host->mem)); s3cmci_remove()
|
H A D | omap.c | 2 * linux/drivers/mmc/host/omap.c 27 #include <linux/mmc/host.h> 76 #define mmc_omap7xx() (host->features & MMC_OMAP7XX) 77 #define mmc_omap15xx() (host->features & MMC_OMAP15XX) 78 #define mmc_omap16xx() (host->features & MMC_OMAP16XX) 80 #define mmc_omap1() (host->features & MMC_OMAP1_MASK) 83 #define OMAP_MMC_REG(host, reg) (OMAP_MMC_REG_##reg << (host)->reg_shift) 84 #define OMAP_MMC_READ(host, reg) __raw_readw((host)->virt_base + OMAP_MMC_REG(host, reg)) 85 #define OMAP_MMC_WRITE(host, reg, val) __raw_writew((val), (host)->virt_base + OMAP_MMC_REG(host, reg)) 115 struct mmc_omap_host *host; member in struct:mmc_omap_slot 179 if (slot != NULL && slot->host->fclk_enabled && slot->fclk_freq > 0) { mmc_omap_fclk_offdelay() 185 static void mmc_omap_fclk_enable(struct mmc_omap_host *host, unsigned int enable) mmc_omap_fclk_enable() argument 189 spin_lock_irqsave(&host->clk_lock, flags); mmc_omap_fclk_enable() 190 if (host->fclk_enabled != enable) { mmc_omap_fclk_enable() 191 host->fclk_enabled = enable; mmc_omap_fclk_enable() 193 clk_enable(host->fclk); mmc_omap_fclk_enable() 195 clk_disable(host->fclk); mmc_omap_fclk_enable() 197 spin_unlock_irqrestore(&host->clk_lock, flags); mmc_omap_fclk_enable() 202 struct mmc_omap_host *host = slot->host; mmc_omap_select_slot() local 207 spin_lock_irqsave(&host->slot_lock, flags); mmc_omap_select_slot() 208 while (host->mmc != NULL) { mmc_omap_select_slot() 209 spin_unlock_irqrestore(&host->slot_lock, flags); mmc_omap_select_slot() 210 wait_event(host->slot_wq, host->mmc == NULL); mmc_omap_select_slot() 211 spin_lock_irqsave(&host->slot_lock, flags); mmc_omap_select_slot() 213 host->mmc = slot->mmc; mmc_omap_select_slot() 214 spin_unlock_irqrestore(&host->slot_lock, flags); mmc_omap_select_slot() 216 del_timer(&host->clk_timer); mmc_omap_select_slot() 217 if (host->current_slot != slot || !claimed) mmc_omap_select_slot() 218 mmc_omap_fclk_offdelay(host->current_slot); mmc_omap_select_slot() 220 if (host->current_slot != slot) { mmc_omap_select_slot() 221 OMAP_MMC_WRITE(host, CON, slot->saved_con & 0xFC00); mmc_omap_select_slot() 222 if (host->pdata->switch_slot != NULL) mmc_omap_select_slot() 223 host->pdata->switch_slot(mmc_dev(slot->mmc), slot->id); mmc_omap_select_slot() 224 host->current_slot = slot; mmc_omap_select_slot() 228 mmc_omap_fclk_enable(host, 1); mmc_omap_select_slot() 233 OMAP_MMC_READ(host, CON); mmc_omap_select_slot() 235 OMAP_MMC_WRITE(host, CON, slot->saved_con); mmc_omap_select_slot() 237 mmc_omap_fclk_enable(host, 0); mmc_omap_select_slot() 240 static void mmc_omap_start_request(struct mmc_omap_host *host, 245 struct mmc_omap_host *host = container_of(work, struct mmc_omap_host, mmc_omap_slot_release_work() local 247 struct mmc_omap_slot *next_slot = host->next_slot; mmc_omap_slot_release_work() 250 host->next_slot = NULL; mmc_omap_slot_release_work() 255 mmc_omap_start_request(host, rq); mmc_omap_slot_release_work() 260 struct mmc_omap_host *host = slot->host; mmc_omap_release_slot() local 264 BUG_ON(slot == NULL || host->mmc == NULL); mmc_omap_release_slot() 268 mod_timer(&host->clk_timer, jiffies + HZ/10); mmc_omap_release_slot() 270 del_timer(&host->clk_timer); mmc_omap_release_slot() 272 mmc_omap_fclk_enable(host, 0); mmc_omap_release_slot() 275 spin_lock_irqsave(&host->slot_lock, flags); mmc_omap_release_slot() 277 for (i = 0; i < host->nr_slots; i++) { mmc_omap_release_slot() 280 if (host->slots[i] == NULL || host->slots[i]->mrq == NULL) mmc_omap_release_slot() 283 BUG_ON(host->next_slot != NULL); mmc_omap_release_slot() 284 new_slot = host->slots[i]; mmc_omap_release_slot() 286 BUG_ON(new_slot == host->current_slot); mmc_omap_release_slot() 288 host->next_slot = new_slot; mmc_omap_release_slot() 289 host->mmc = new_slot->mmc; mmc_omap_release_slot() 290 spin_unlock_irqrestore(&host->slot_lock, flags); mmc_omap_release_slot() 291 queue_work(host->mmc_omap_wq, &host->slot_release_work); mmc_omap_release_slot() 295 host->mmc = NULL; mmc_omap_release_slot() 296 wake_up(&host->slot_wq); mmc_omap_release_slot() 297 spin_unlock_irqrestore(&host->slot_lock, flags); mmc_omap_release_slot() 335 mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd) mmc_omap_start_command() argument 342 host->cmd = cmd; mmc_omap_start_command() 363 dev_err(mmc_dev(host->mmc), "Invalid response type: %04x\n", mmc_resp_type(cmd)); mmc_omap_start_command() 379 if (host->current_slot->bus_mode == MMC_BUSMODE_OPENDRAIN) mmc_omap_start_command() 385 if (host->data && !(host->data->flags & MMC_DATA_WRITE)) mmc_omap_start_command() 388 mod_timer(&host->cmd_abort_timer, jiffies + HZ/2); mmc_omap_start_command() 390 OMAP_MMC_WRITE(host, CTO, 200); mmc_omap_start_command() 391 OMAP_MMC_WRITE(host, ARGL, cmd->arg & 0xffff); mmc_omap_start_command() 392 OMAP_MMC_WRITE(host, ARGH, cmd->arg >> 16); mmc_omap_start_command() 400 OMAP_MMC_WRITE(host, IE, irq_mask); mmc_omap_start_command() 401 OMAP_MMC_WRITE(host, CMD, cmdreg); mmc_omap_start_command() 405 mmc_omap_release_dma(struct mmc_omap_host *host, struct mmc_data *data, mmc_omap_release_dma() argument 409 struct device *dev = mmc_dev(host->mmc); mmc_omap_release_dma() 414 c = host->dma_tx; mmc_omap_release_dma() 417 c = host->dma_rx; mmc_omap_release_dma() 427 dma_unmap_sg(dev, data->sg, host->sg_len, dma_data_dir); mmc_omap_release_dma() 432 struct mmc_omap_host *host = container_of(work, struct mmc_omap_host, mmc_omap_send_stop_work() local 434 struct mmc_omap_slot *slot = host->current_slot; mmc_omap_send_stop_work() 435 struct mmc_data *data = host->stop_data; mmc_omap_send_stop_work() 441 mmc_omap_start_command(host, data->stop); mmc_omap_send_stop_work() 445 mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data) mmc_omap_xfer_done() argument 447 if (host->dma_in_use) mmc_omap_xfer_done() 448 mmc_omap_release_dma(host, data, data->error); mmc_omap_xfer_done() 450 host->data = NULL; mmc_omap_xfer_done() 451 host->sg_len = 0; mmc_omap_xfer_done() 461 host->mrq = NULL; mmc_omap_xfer_done() 462 mmc = host->mmc; mmc_omap_xfer_done() 463 mmc_omap_release_slot(host->current_slot, 1); mmc_omap_xfer_done() 468 host->stop_data = data; mmc_omap_xfer_done() 469 queue_work(host->mmc_omap_wq, &host->send_stop_work); mmc_omap_xfer_done() 473 mmc_omap_send_abort(struct mmc_omap_host *host, int maxloops) mmc_omap_send_abort() argument 475 struct mmc_omap_slot *slot = host->current_slot; mmc_omap_send_abort() 483 OMAP_MMC_WRITE(host, STAT, 0xFFFF); mmc_omap_send_abort() 484 OMAP_MMC_WRITE(host, CMD, (3 << 12) | (1 << 7)); mmc_omap_send_abort() 488 stat = OMAP_MMC_READ(host, STAT); mmc_omap_send_abort() 498 OMAP_MMC_WRITE(host, STAT, stat); mmc_omap_send_abort() 502 mmc_omap_abort_xfer(struct mmc_omap_host *host, struct mmc_data *data) mmc_omap_abort_xfer() argument 504 if (host->dma_in_use) mmc_omap_abort_xfer() 505 mmc_omap_release_dma(host, data, 1); mmc_omap_abort_xfer() 507 host->data = NULL; mmc_omap_abort_xfer() 508 host->sg_len = 0; mmc_omap_abort_xfer() 510 mmc_omap_send_abort(host, 10000); mmc_omap_abort_xfer() 514 mmc_omap_end_of_data(struct mmc_omap_host *host, struct mmc_data *data) mmc_omap_end_of_data() argument 519 if (!host->dma_in_use) { mmc_omap_end_of_data() 520 mmc_omap_xfer_done(host, data); mmc_omap_end_of_data() 524 spin_lock_irqsave(&host->dma_lock, flags); mmc_omap_end_of_data() 525 if (host->dma_done) mmc_omap_end_of_data() 528 host->brs_received = 1; mmc_omap_end_of_data() 529 spin_unlock_irqrestore(&host->dma_lock, flags); mmc_omap_end_of_data() 531 mmc_omap_xfer_done(host, data); mmc_omap_end_of_data() 535 mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data) mmc_omap_dma_done() argument 541 spin_lock_irqsave(&host->dma_lock, flags); mmc_omap_dma_done() 542 if (host->brs_received) mmc_omap_dma_done() 545 host->dma_done = 1; mmc_omap_dma_done() 546 spin_unlock_irqrestore(&host->dma_lock, flags); mmc_omap_dma_done() 548 mmc_omap_xfer_done(host, data); mmc_omap_dma_done() 552 mmc_omap_cmd_done(struct mmc_omap_host *host, struct mmc_command *cmd) mmc_omap_cmd_done() argument 554 host->cmd = NULL; mmc_omap_cmd_done() 556 del_timer(&host->cmd_abort_timer); mmc_omap_cmd_done() 562 OMAP_MMC_READ(host, RSP0) | mmc_omap_cmd_done() 563 (OMAP_MMC_READ(host, RSP1) << 16); mmc_omap_cmd_done() 565 OMAP_MMC_READ(host, RSP2) | mmc_omap_cmd_done() 566 (OMAP_MMC_READ(host, RSP3) << 16); mmc_omap_cmd_done() 568 OMAP_MMC_READ(host, RSP4) | mmc_omap_cmd_done() 569 (OMAP_MMC_READ(host, RSP5) << 16); mmc_omap_cmd_done() 571 OMAP_MMC_READ(host, RSP6) | mmc_omap_cmd_done() 572 (OMAP_MMC_READ(host, RSP7) << 16); mmc_omap_cmd_done() 576 OMAP_MMC_READ(host, RSP6) | mmc_omap_cmd_done() 577 (OMAP_MMC_READ(host, RSP7) << 16); mmc_omap_cmd_done() 581 if (host->data == NULL || cmd->error) { mmc_omap_cmd_done() 584 if (host->data != NULL) mmc_omap_cmd_done() 585 mmc_omap_abort_xfer(host, host->data); mmc_omap_cmd_done() 586 host->mrq = NULL; mmc_omap_cmd_done() 587 mmc = host->mmc; mmc_omap_cmd_done() 588 mmc_omap_release_slot(host->current_slot, 1); mmc_omap_cmd_done() 599 struct mmc_omap_host *host = container_of(work, struct mmc_omap_host, mmc_omap_abort_command() local 601 BUG_ON(!host->cmd); mmc_omap_abort_command() 603 dev_dbg(mmc_dev(host->mmc), "Aborting stuck command CMD%d\n", mmc_omap_abort_command() 604 host->cmd->opcode); mmc_omap_abort_command() 606 if (host->cmd->error == 0) mmc_omap_abort_command() 607 host->cmd->error = -ETIMEDOUT; mmc_omap_abort_command() 609 if (host->data == NULL) { mmc_omap_abort_command() 613 cmd = host->cmd; mmc_omap_abort_command() 614 host->cmd = NULL; mmc_omap_abort_command() 615 mmc_omap_send_abort(host, 10000); mmc_omap_abort_command() 617 host->mrq = NULL; mmc_omap_abort_command() 618 mmc = host->mmc; mmc_omap_abort_command() 619 mmc_omap_release_slot(host->current_slot, 1); mmc_omap_abort_command() 622 mmc_omap_cmd_done(host, host->cmd); mmc_omap_abort_command() 624 host->abort = 0; mmc_omap_abort_command() 625 enable_irq(host->irq); mmc_omap_abort_command() 631 struct mmc_omap_host *host = (struct mmc_omap_host *) data; mmc_omap_cmd_timer() local 634 spin_lock_irqsave(&host->slot_lock, flags); mmc_omap_cmd_timer() 635 if (host->cmd != NULL && !host->abort) { mmc_omap_cmd_timer() 636 OMAP_MMC_WRITE(host, IE, 0); mmc_omap_cmd_timer() 637 disable_irq(host->irq); mmc_omap_cmd_timer() 638 host->abort = 1; mmc_omap_cmd_timer() 639 queue_work(host->mmc_omap_wq, &host->cmd_abort_work); mmc_omap_cmd_timer() 641 spin_unlock_irqrestore(&host->slot_lock, flags); mmc_omap_cmd_timer() 646 mmc_omap_sg_to_buf(struct mmc_omap_host *host) mmc_omap_sg_to_buf() argument 650 sg = host->data->sg + host->sg_idx; mmc_omap_sg_to_buf() 651 host->buffer_bytes_left = sg->length; mmc_omap_sg_to_buf() 652 host->buffer = sg_virt(sg); mmc_omap_sg_to_buf() 653 if (host->buffer_bytes_left > host->total_bytes_left) mmc_omap_sg_to_buf() 654 host->buffer_bytes_left = host->total_bytes_left; mmc_omap_sg_to_buf() 660 struct mmc_omap_host *host = (struct mmc_omap_host *) data; mmc_omap_clk_timer() local 662 mmc_omap_fclk_enable(host, 0); mmc_omap_clk_timer() 667 mmc_omap_xfer_data(struct mmc_omap_host *host, int write) mmc_omap_xfer_data() argument 671 if (host->buffer_bytes_left == 0) { mmc_omap_xfer_data() 672 host->sg_idx++; mmc_omap_xfer_data() 673 BUG_ON(host->sg_idx == host->sg_len); mmc_omap_xfer_data() 674 mmc_omap_sg_to_buf(host); mmc_omap_xfer_data() 677 if (n > host->buffer_bytes_left) mmc_omap_xfer_data() 678 n = host->buffer_bytes_left; mmc_omap_xfer_data() 683 host->buffer_bytes_left -= n; mmc_omap_xfer_data() 684 host->total_bytes_left -= n; mmc_omap_xfer_data() 685 host->data->bytes_xfered += n; mmc_omap_xfer_data() 688 __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA), mmc_omap_xfer_data() 689 host->buffer, nwords); mmc_omap_xfer_data() 691 __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA), mmc_omap_xfer_data() 692 host->buffer, nwords); mmc_omap_xfer_data() 695 host->buffer += nwords; mmc_omap_xfer_data() 699 static void mmc_omap_report_irq(struct mmc_omap_host *host, u16 status) mmc_omap_report_irq() argument 713 dev_vdbg(mmc_dev(host->mmc), "%s\n", res); mmc_omap_report_irq() 716 static void mmc_omap_report_irq(struct mmc_omap_host *host, u16 status) mmc_omap_report_irq() argument 724 struct mmc_omap_host * host = (struct mmc_omap_host *)dev_id; mmc_omap_irq() local 730 if (host->cmd == NULL && host->data == NULL) { mmc_omap_irq() 731 status = OMAP_MMC_READ(host, STAT); mmc_omap_irq() 732 dev_info(mmc_dev(host->slots[0]->mmc), mmc_omap_irq() 735 OMAP_MMC_WRITE(host, STAT, status); mmc_omap_irq() 736 OMAP_MMC_WRITE(host, IE, 0); mmc_omap_irq() 746 while ((status = OMAP_MMC_READ(host, STAT)) != 0) { mmc_omap_irq() 749 OMAP_MMC_WRITE(host, STAT, status); mmc_omap_irq() 750 if (host->cmd != NULL) mmc_omap_irq() 751 cmd = host->cmd->opcode; mmc_omap_irq() 754 dev_dbg(mmc_dev(host->mmc), "MMC IRQ %04x (CMD %d): ", mmc_omap_irq() 756 mmc_omap_report_irq(host, status); mmc_omap_irq() 758 if (host->total_bytes_left) { mmc_omap_irq() 761 mmc_omap_xfer_data(host, 0); mmc_omap_irq() 763 mmc_omap_xfer_data(host, 1); mmc_omap_irq() 770 dev_dbg(mmc_dev(host->mmc), "data timeout (CMD%d)\n", mmc_omap_irq() 772 if (host->data) { mmc_omap_irq() 773 host->data->error = -ETIMEDOUT; mmc_omap_irq() 779 if (host->data) { mmc_omap_irq() 780 host->data->error = -EILSEQ; mmc_omap_irq() 781 dev_dbg(mmc_dev(host->mmc), mmc_omap_irq() 783 host->total_bytes_left); mmc_omap_irq() 786 dev_dbg(mmc_dev(host->mmc), "data CRC error\n"); mmc_omap_irq() 792 if (host->cmd) { mmc_omap_irq() 794 host->current_slot; mmc_omap_irq() 797 dev_err(mmc_dev(host->mmc), mmc_omap_irq() 800 host->cmd->error = -ETIMEDOUT; mmc_omap_irq() 807 if (host->cmd) { mmc_omap_irq() 808 dev_err(mmc_dev(host->mmc), mmc_omap_irq() 810 cmd, host->cmd->arg); mmc_omap_irq() 811 host->cmd->error = -EILSEQ; mmc_omap_irq() 815 dev_err(mmc_dev(host->mmc), mmc_omap_irq() 820 dev_dbg(mmc_dev(host->mmc), mmc_omap_irq() 836 if (cmd_error && host->data) { mmc_omap_irq() 837 del_timer(&host->cmd_abort_timer); mmc_omap_irq() 838 host->abort = 1; mmc_omap_irq() 839 OMAP_MMC_WRITE(host, IE, 0); mmc_omap_irq() 840 disable_irq_nosync(host->irq); mmc_omap_irq() 841 queue_work(host->mmc_omap_wq, &host->cmd_abort_work); mmc_omap_irq() 845 if (end_command && host->cmd) mmc_omap_irq() 846 mmc_omap_cmd_done(host, host->cmd); mmc_omap_irq() 847 if (host->data != NULL) { mmc_omap_irq() 849 mmc_omap_xfer_done(host, host->data); mmc_omap_irq() 851 mmc_omap_end_of_data(host, host->data); mmc_omap_irq() 860 struct mmc_omap_host *host = dev_get_drvdata(dev); omap_mmc_notify_cover_event() local 861 struct mmc_omap_slot *slot = host->slots[num]; omap_mmc_notify_cover_event() 863 BUG_ON(num >= host->nr_slots); omap_mmc_notify_cover_event() 866 if (host->nr_slots == 0 || !host->slots[num]) omap_mmc_notify_cover_event() 906 struct mmc_omap_host *host = priv; mmc_omap_dma_callback() local 907 struct mmc_data *data = host->data; mmc_omap_dma_callback() 912 mmc_omap_dma_done(host, data); mmc_omap_dma_callback() 915 static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req) set_cmd_timeout() argument 919 reg = OMAP_MMC_READ(host, SDIO); set_cmd_timeout() 921 OMAP_MMC_WRITE(host, SDIO, reg); set_cmd_timeout() 923 OMAP_MMC_WRITE(host, CTO, 0xff); set_cmd_timeout() 926 static inline void set_data_timeout(struct mmc_omap_host *host, struct mmc_request *req) set_data_timeout() argument 931 cycle_ns = 1000000000 / host->current_slot->fclk_freq; set_data_timeout() 936 reg = OMAP_MMC_READ(host, SDIO); set_data_timeout() 942 OMAP_MMC_WRITE(host, SDIO, reg); set_data_timeout() 943 OMAP_MMC_WRITE(host, DTO, timeout); set_data_timeout() 947 mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req) mmc_omap_prepare_data() argument 953 host->data = data; mmc_omap_prepare_data() 955 OMAP_MMC_WRITE(host, BLEN, 0); mmc_omap_prepare_data() 956 OMAP_MMC_WRITE(host, NBLK, 0); mmc_omap_prepare_data() 957 OMAP_MMC_WRITE(host, BUF, 0); mmc_omap_prepare_data() 958 host->dma_in_use = 0; mmc_omap_prepare_data() 959 set_cmd_timeout(host, req); mmc_omap_prepare_data() 965 OMAP_MMC_WRITE(host, NBLK, data->blocks - 1); mmc_omap_prepare_data() 966 OMAP_MMC_WRITE(host, BLEN, block_size - 1); mmc_omap_prepare_data() 967 set_data_timeout(host, req); mmc_omap_prepare_data() 982 host->sg_idx = 0; mmc_omap_prepare_data() 1003 c = host->dma_tx; mmc_omap_prepare_data() 1004 bp = &host->dma_tx_burst; mmc_omap_prepare_data() 1008 c = host->dma_rx; mmc_omap_prepare_data() 1009 bp = &host->dma_rx_burst; mmc_omap_prepare_data() 1021 cfg.src_addr = host->phys_base + OMAP_MMC_REG(host, DATA); mmc_omap_prepare_data() 1022 cfg.dst_addr = host->phys_base + OMAP_MMC_REG(host, DATA); mmc_omap_prepare_data() 1034 host->sg_len = dma_map_sg(c->device->dev, data->sg, sg_len, mmc_omap_prepare_data() 1036 if (host->sg_len == 0) mmc_omap_prepare_data() 1039 tx = dmaengine_prep_slave_sg(c, data->sg, host->sg_len, mmc_omap_prepare_data() 1045 OMAP_MMC_WRITE(host, BUF, buf); mmc_omap_prepare_data() 1048 tx->callback_param = host; mmc_omap_prepare_data() 1050 host->brs_received = 0; mmc_omap_prepare_data() 1051 host->dma_done = 0; mmc_omap_prepare_data() 1052 host->dma_in_use = 1; mmc_omap_prepare_data() 1058 OMAP_MMC_WRITE(host, BUF, 0x1f1f); mmc_omap_prepare_data() 1059 host->total_bytes_left = data->blocks * block_size; mmc_omap_prepare_data() 1060 host->sg_len = sg_len; mmc_omap_prepare_data() 1061 mmc_omap_sg_to_buf(host); mmc_omap_prepare_data() 1062 host->dma_in_use = 0; mmc_omap_prepare_data() 1065 static void mmc_omap_start_request(struct mmc_omap_host *host, mmc_omap_start_request() argument 1068 BUG_ON(host->mrq != NULL); mmc_omap_start_request() 1070 host->mrq = req; mmc_omap_start_request() 1073 mmc_omap_prepare_data(host, req); mmc_omap_start_request() 1074 mmc_omap_start_command(host, req->cmd); mmc_omap_start_request() 1075 if (host->dma_in_use) { mmc_omap_start_request() 1076 struct dma_chan *c = host->data->flags & MMC_DATA_WRITE ? mmc_omap_start_request() 1077 host->dma_tx : host->dma_rx; mmc_omap_start_request() 1086 struct mmc_omap_host *host = slot->host; mmc_omap_request() local 1089 spin_lock_irqsave(&host->slot_lock, flags); mmc_omap_request() 1090 if (host->mmc != NULL) { mmc_omap_request() 1093 spin_unlock_irqrestore(&host->slot_lock, flags); mmc_omap_request() 1096 host->mmc = mmc; mmc_omap_request() 1097 spin_unlock_irqrestore(&host->slot_lock, flags); mmc_omap_request() 1099 mmc_omap_start_request(host, req); mmc_omap_request() 1105 struct mmc_omap_host *host; mmc_omap_set_power() local 1107 host = slot->host; mmc_omap_set_power() 1116 w = OMAP_MMC_READ(host, CON); mmc_omap_set_power() 1117 OMAP_MMC_WRITE(host, CON, w | (1 << 11)); mmc_omap_set_power() 1119 w = OMAP_MMC_READ(host, CON); mmc_omap_set_power() 1120 OMAP_MMC_WRITE(host, CON, w & ~(1 << 11)); mmc_omap_set_power() 1128 struct mmc_omap_host *host = slot->host; mmc_omap_calc_divisor() local 1129 int func_clk_rate = clk_get_rate(host->fclk); mmc_omap_calc_divisor() 1156 struct mmc_omap_host *host = slot->host; mmc_omap_set_ios() local 1177 mmc_omap_fclk_enable(host, 1); mmc_omap_set_ios() 1195 OMAP_MMC_WRITE(host, CON, dsor); mmc_omap_set_ios() 1202 OMAP_MMC_WRITE(host, IE, 0); mmc_omap_set_ios() 1203 OMAP_MMC_WRITE(host, STAT, 0xffff); mmc_omap_set_ios() 1204 OMAP_MMC_WRITE(host, CMD, 1 << 7); mmc_omap_set_ios() 1205 while (usecs > 0 && (OMAP_MMC_READ(host, STAT) & 1) == 0) { mmc_omap_set_ios() 1209 OMAP_MMC_WRITE(host, STAT, 1); mmc_omap_set_ios() 1221 static int mmc_omap_new_slot(struct mmc_omap_host *host, int id) mmc_omap_new_slot() argument 1227 mmc = mmc_alloc_host(sizeof(struct mmc_omap_slot), host->dev); mmc_omap_new_slot() 1232 slot->host = host; mmc_omap_new_slot() 1235 slot->pdata = &host->pdata->slots[id]; mmc_omap_new_slot() 1237 host->slots[id] = slot; mmc_omap_new_slot() 1240 if (host->pdata->slots[id].wires >= 4) mmc_omap_new_slot() 1250 if (host->pdata->max_freq) mmc_omap_new_slot() 1251 mmc->f_max = min(host->pdata->max_freq, mmc->f_max); mmc_omap_new_slot() 1312 flush_workqueue(slot->host->mmc_omap_wq); mmc_omap_remove_slot() 1321 struct mmc_omap_host *host = NULL; mmc_omap_probe() local 1337 host = devm_kzalloc(&pdev->dev, sizeof(struct mmc_omap_host), mmc_omap_probe() 1339 if (host == NULL) mmc_omap_probe() 1347 host->virt_base = devm_ioremap_resource(&pdev->dev, res); mmc_omap_probe() 1348 if (IS_ERR(host->virt_base)) mmc_omap_probe() 1349 return PTR_ERR(host->virt_base); mmc_omap_probe() 1351 INIT_WORK(&host->slot_release_work, mmc_omap_slot_release_work); mmc_omap_probe() 1352 INIT_WORK(&host->send_stop_work, mmc_omap_send_stop_work); mmc_omap_probe() 1354 INIT_WORK(&host->cmd_abort_work, mmc_omap_abort_command); mmc_omap_probe() 1355 setup_timer(&host->cmd_abort_timer, mmc_omap_cmd_timer, mmc_omap_probe() 1356 (unsigned long) host); mmc_omap_probe() 1358 spin_lock_init(&host->clk_lock); mmc_omap_probe() 1359 setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host); mmc_omap_probe() 1361 spin_lock_init(&host->dma_lock); mmc_omap_probe() 1362 spin_lock_init(&host->slot_lock); mmc_omap_probe() 1363 init_waitqueue_head(&host->slot_wq); mmc_omap_probe() 1365 host->pdata = pdata; mmc_omap_probe() 1366 host->features = host->pdata->slots[0].features; mmc_omap_probe() 1367 host->dev = &pdev->dev; mmc_omap_probe() 1368 platform_set_drvdata(pdev, host); mmc_omap_probe() 1370 host->id = pdev->id; mmc_omap_probe() 1371 host->irq = irq; mmc_omap_probe() 1372 host->phys_base = res->start; mmc_omap_probe() 1373 host->iclk = clk_get(&pdev->dev, "ick"); mmc_omap_probe() 1374 if (IS_ERR(host->iclk)) mmc_omap_probe() 1375 return PTR_ERR(host->iclk); mmc_omap_probe() 1376 clk_enable(host->iclk); mmc_omap_probe() 1378 host->fclk = clk_get(&pdev->dev, "fck"); mmc_omap_probe() 1379 if (IS_ERR(host->fclk)) { mmc_omap_probe() 1380 ret = PTR_ERR(host->fclk); mmc_omap_probe() 1387 host->dma_tx_burst = -1; mmc_omap_probe() 1388 host->dma_rx_burst = -1; mmc_omap_probe() 1393 host->dma_tx = dma_request_slave_channel_compat(mask, mmc_omap_probe() 1395 if (!host->dma_tx) mmc_omap_probe() 1396 dev_warn(host->dev, "unable to obtain TX DMA engine channel %u\n", mmc_omap_probe() 1402 host->dma_rx = dma_request_slave_channel_compat(mask, mmc_omap_probe() 1404 if (!host->dma_rx) mmc_omap_probe() 1405 dev_warn(host->dev, "unable to obtain RX DMA engine channel %u\n", mmc_omap_probe() 1408 ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host); mmc_omap_probe() 1418 host->nr_slots = pdata->nr_slots; mmc_omap_probe() 1419 host->reg_shift = (mmc_omap7xx() ? 1 : 2); mmc_omap_probe() 1421 host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0); mmc_omap_probe() 1422 if (!host->mmc_omap_wq) mmc_omap_probe() 1426 ret = mmc_omap_new_slot(host, i); mmc_omap_probe() 1429 mmc_omap_remove_slot(host->slots[i]); mmc_omap_probe() 1438 destroy_workqueue(host->mmc_omap_wq); mmc_omap_probe() 1443 free_irq(host->irq, host); mmc_omap_probe() 1445 if (host->dma_tx) mmc_omap_probe() 1446 dma_release_channel(host->dma_tx); mmc_omap_probe() 1447 if (host->dma_rx) mmc_omap_probe() 1448 dma_release_channel(host->dma_rx); mmc_omap_probe() 1449 clk_put(host->fclk); mmc_omap_probe() 1451 clk_disable(host->iclk); mmc_omap_probe() 1452 clk_put(host->iclk); mmc_omap_probe() 1458 struct mmc_omap_host *host = platform_get_drvdata(pdev); mmc_omap_remove() local 1461 BUG_ON(host == NULL); mmc_omap_remove() 1463 for (i = 0; i < host->nr_slots; i++) mmc_omap_remove() 1464 mmc_omap_remove_slot(host->slots[i]); mmc_omap_remove() 1466 if (host->pdata->cleanup) mmc_omap_remove() 1467 host->pdata->cleanup(&pdev->dev); mmc_omap_remove() 1469 mmc_omap_fclk_enable(host, 0); mmc_omap_remove() 1470 free_irq(host->irq, host); mmc_omap_remove() 1471 clk_put(host->fclk); mmc_omap_remove() 1472 clk_disable(host->iclk); mmc_omap_remove() 1473 clk_put(host->iclk); mmc_omap_remove() 1475 if (host->dma_tx) mmc_omap_remove() 1476 dma_release_channel(host->dma_tx); mmc_omap_remove() 1477 if (host->dma_rx) mmc_omap_remove() 1478 dma_release_channel(host->dma_rx); mmc_omap_remove() 1480 destroy_workqueue(host->mmc_omap_wq); mmc_omap_remove()
|
H A D | sdhci.c | 2 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver 29 #include <linux/mmc/host.h> 56 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable); 57 static int sdhci_pre_dma_transfer(struct sdhci_host *host, 59 static int sdhci_do_get_cd(struct sdhci_host *host); 62 static int sdhci_runtime_pm_get(struct sdhci_host *host); 63 static int sdhci_runtime_pm_put(struct sdhci_host *host); 64 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host); 65 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host); 67 static inline int sdhci_runtime_pm_get(struct sdhci_host *host) sdhci_runtime_pm_get() argument 71 static inline int sdhci_runtime_pm_put(struct sdhci_host *host) sdhci_runtime_pm_put() argument 75 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host) sdhci_runtime_pm_bus_on() argument 78 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host) sdhci_runtime_pm_bus_off() argument 83 static void sdhci_dumpregs(struct sdhci_host *host) sdhci_dumpregs() argument 86 mmc_hostname(host->mmc)); sdhci_dumpregs() 89 sdhci_readl(host, SDHCI_DMA_ADDRESS), sdhci_dumpregs() 90 sdhci_readw(host, SDHCI_HOST_VERSION)); sdhci_dumpregs() 92 sdhci_readw(host, SDHCI_BLOCK_SIZE), sdhci_dumpregs() 93 sdhci_readw(host, SDHCI_BLOCK_COUNT)); sdhci_dumpregs() 95 sdhci_readl(host, SDHCI_ARGUMENT), sdhci_dumpregs() 96 sdhci_readw(host, SDHCI_TRANSFER_MODE)); sdhci_dumpregs() 98 sdhci_readl(host, SDHCI_PRESENT_STATE), sdhci_dumpregs() 99 sdhci_readb(host, SDHCI_HOST_CONTROL)); sdhci_dumpregs() 101 sdhci_readb(host, SDHCI_POWER_CONTROL), sdhci_dumpregs() 102 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL)); sdhci_dumpregs() 104 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL), sdhci_dumpregs() 105 sdhci_readw(host, SDHCI_CLOCK_CONTROL)); sdhci_dumpregs() 107 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL), sdhci_dumpregs() 108 sdhci_readl(host, SDHCI_INT_STATUS)); sdhci_dumpregs() 110 sdhci_readl(host, SDHCI_INT_ENABLE), sdhci_dumpregs() 111 sdhci_readl(host, SDHCI_SIGNAL_ENABLE)); sdhci_dumpregs() 113 sdhci_readw(host, SDHCI_ACMD12_ERR), sdhci_dumpregs() 114 sdhci_readw(host, SDHCI_SLOT_INT_STATUS)); sdhci_dumpregs() 116 sdhci_readl(host, SDHCI_CAPABILITIES), sdhci_dumpregs() 117 sdhci_readl(host, SDHCI_CAPABILITIES_1)); sdhci_dumpregs() 119 sdhci_readw(host, SDHCI_COMMAND), sdhci_dumpregs() 120 sdhci_readl(host, SDHCI_MAX_CURRENT)); sdhci_dumpregs() 122 sdhci_readw(host, SDHCI_HOST_CONTROL2)); sdhci_dumpregs() 124 if (host->flags & SDHCI_USE_ADMA) { sdhci_dumpregs() 125 if (host->flags & SDHCI_USE_64_BIT_DMA) sdhci_dumpregs() 127 readl(host->ioaddr + SDHCI_ADMA_ERROR), sdhci_dumpregs() 128 readl(host->ioaddr + SDHCI_ADMA_ADDRESS_HI), sdhci_dumpregs() 129 readl(host->ioaddr + SDHCI_ADMA_ADDRESS)); sdhci_dumpregs() 132 readl(host->ioaddr + SDHCI_ADMA_ERROR), sdhci_dumpregs() 133 readl(host->ioaddr + SDHCI_ADMA_ADDRESS)); sdhci_dumpregs() 145 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable) sdhci_set_card_detection() argument 149 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) || sdhci_set_card_detection() 150 (host->mmc->caps & MMC_CAP_NONREMOVABLE)) sdhci_set_card_detection() 154 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & sdhci_set_card_detection() 157 host->ier |= present ? SDHCI_INT_CARD_REMOVE : sdhci_set_card_detection() 160 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); sdhci_set_card_detection() 163 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); sdhci_set_card_detection() 164 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); sdhci_set_card_detection() 167 static void sdhci_enable_card_detection(struct sdhci_host *host) sdhci_enable_card_detection() argument 169 sdhci_set_card_detection(host, true); sdhci_enable_card_detection() 172 static void sdhci_disable_card_detection(struct sdhci_host *host) sdhci_disable_card_detection() argument 174 sdhci_set_card_detection(host, false); sdhci_disable_card_detection() 177 void sdhci_reset(struct sdhci_host *host, u8 mask) sdhci_reset() argument 181 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); sdhci_reset() 184 host->clock = 0; sdhci_reset() 186 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) sdhci_reset() 187 sdhci_runtime_pm_bus_off(host); sdhci_reset() 194 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) { sdhci_reset() 197 mmc_hostname(host->mmc), (int)mask); sdhci_reset() 198 sdhci_dumpregs(host); sdhci_reset() 207 static void sdhci_do_reset(struct sdhci_host *host, u8 mask) sdhci_do_reset() argument 209 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { sdhci_do_reset() 210 if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & sdhci_do_reset() 215 host->ops->reset(host, mask); sdhci_do_reset() 218 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { sdhci_do_reset() 219 if (host->ops->enable_dma) sdhci_do_reset() 220 host->ops->enable_dma(host); sdhci_do_reset() 224 host->preset_enabled = false; sdhci_do_reset() 230 static void sdhci_init(struct sdhci_host *host, int soft) sdhci_init() argument 233 sdhci_do_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA); sdhci_init() 235 sdhci_do_reset(host, SDHCI_RESET_ALL); sdhci_init() 237 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | sdhci_init() 243 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); sdhci_init() 244 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); sdhci_init() 248 host->clock = 0; sdhci_init() 249 sdhci_set_ios(host->mmc, &host->mmc->ios); sdhci_init() 253 static void sdhci_reinit(struct sdhci_host *host) sdhci_reinit() argument 255 sdhci_init(host, 0); sdhci_reinit() 261 if (host->flags & SDHCI_USING_RETUNING_TIMER) { sdhci_reinit() 262 host->flags &= ~SDHCI_USING_RETUNING_TIMER; sdhci_reinit() 264 del_timer_sync(&host->tuning_timer); sdhci_reinit() 265 host->flags &= ~SDHCI_NEEDS_RETUNING; sdhci_reinit() 267 sdhci_enable_card_detection(host); sdhci_reinit() 270 static void sdhci_activate_led(struct sdhci_host *host) sdhci_activate_led() argument 274 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); sdhci_activate_led() 276 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); sdhci_activate_led() 279 static void sdhci_deactivate_led(struct sdhci_host *host) sdhci_deactivate_led() argument 283 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); sdhci_deactivate_led() 285 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); sdhci_deactivate_led() 292 struct sdhci_host *host = container_of(led, struct sdhci_host, led); sdhci_led_control() local 295 spin_lock_irqsave(&host->lock, flags); sdhci_led_control() 297 if (host->runtime_suspended) sdhci_led_control() 301 sdhci_deactivate_led(host); sdhci_led_control() 303 sdhci_activate_led(host); sdhci_led_control() 305 spin_unlock_irqrestore(&host->lock, flags); sdhci_led_control() 315 static void sdhci_read_block_pio(struct sdhci_host *host) sdhci_read_block_pio() argument 324 blksize = host->data->blksz; sdhci_read_block_pio() 330 if (!sg_miter_next(&host->sg_miter)) sdhci_read_block_pio() 333 len = min(host->sg_miter.length, blksize); sdhci_read_block_pio() 336 host->sg_miter.consumed = len; sdhci_read_block_pio() 338 buf = host->sg_miter.addr; sdhci_read_block_pio() 342 scratch = sdhci_readl(host, SDHCI_BUFFER); sdhci_read_block_pio() 355 sg_miter_stop(&host->sg_miter); sdhci_read_block_pio() 360 static void sdhci_write_block_pio(struct sdhci_host *host) sdhci_write_block_pio() argument 369 blksize = host->data->blksz; sdhci_write_block_pio() 376 if (!sg_miter_next(&host->sg_miter)) sdhci_write_block_pio() 379 len = min(host->sg_miter.length, blksize); sdhci_write_block_pio() 382 host->sg_miter.consumed = len; sdhci_write_block_pio() 384 buf = host->sg_miter.addr; sdhci_write_block_pio() 394 sdhci_writel(host, scratch, SDHCI_BUFFER); sdhci_write_block_pio() 401 sg_miter_stop(&host->sg_miter); sdhci_write_block_pio() 406 static void sdhci_transfer_pio(struct sdhci_host *host) sdhci_transfer_pio() argument 410 BUG_ON(!host->data); sdhci_transfer_pio() 412 if (host->blocks == 0) sdhci_transfer_pio() 415 if (host->data->flags & MMC_DATA_READ) sdhci_transfer_pio() 425 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) && sdhci_transfer_pio() 426 (host->data->blocks == 1)) sdhci_transfer_pio() 429 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { sdhci_transfer_pio() 430 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY) sdhci_transfer_pio() 433 if (host->data->flags & MMC_DATA_READ) sdhci_transfer_pio() 434 sdhci_read_block_pio(host); sdhci_transfer_pio() 436 sdhci_write_block_pio(host); sdhci_transfer_pio() 438 host->blocks--; sdhci_transfer_pio() 439 if (host->blocks == 0) sdhci_transfer_pio() 458 static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc, sdhci_adma_write_desc() argument 468 if (host->flags & SDHCI_USE_64_BIT_DMA) sdhci_adma_write_desc() 480 static int sdhci_adma_table_pre(struct sdhci_host *host, sdhci_adma_table_pre() argument 506 host->align_addr = dma_map_single(mmc_dev(host->mmc), sdhci_adma_table_pre() 507 host->align_buffer, host->align_buffer_sz, direction); sdhci_adma_table_pre() 508 if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr)) sdhci_adma_table_pre() 510 BUG_ON(host->align_addr & host->align_mask); sdhci_adma_table_pre() 512 host->sg_count = sdhci_pre_dma_transfer(host, data); sdhci_adma_table_pre() 513 if (host->sg_count < 0) sdhci_adma_table_pre() 516 desc = host->adma_table; sdhci_adma_table_pre() 517 align = host->align_buffer; sdhci_adma_table_pre() 519 align_addr = host->align_addr; sdhci_adma_table_pre() 521 for_each_sg(data->sg, sg, host->sg_count, i) { sdhci_adma_table_pre() 532 offset = (host->align_sz - (addr & host->align_mask)) & sdhci_adma_table_pre() 533 host->align_mask; sdhci_adma_table_pre() 542 sdhci_adma_write_desc(host, desc, align_addr, offset, sdhci_adma_table_pre() 547 align += host->align_sz; sdhci_adma_table_pre() 548 align_addr += host->align_sz; sdhci_adma_table_pre() 550 desc += host->desc_sz; sdhci_adma_table_pre() 560 sdhci_adma_write_desc(host, desc, addr, len, sdhci_adma_table_pre() 562 desc += host->desc_sz; sdhci_adma_table_pre() 569 WARN_ON((desc - host->adma_table) >= host->adma_table_sz); sdhci_adma_table_pre() 572 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) { sdhci_adma_table_pre() 576 if (desc != host->adma_table) { sdhci_adma_table_pre() 577 desc -= host->desc_sz; sdhci_adma_table_pre() 586 sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID); sdhci_adma_table_pre() 593 dma_sync_single_for_device(mmc_dev(host->mmc), sdhci_adma_table_pre() 594 host->align_addr, host->align_buffer_sz, direction); sdhci_adma_table_pre() 600 dma_unmap_single(mmc_dev(host->mmc), host->align_addr, sdhci_adma_table_pre() 601 host->align_buffer_sz, direction); sdhci_adma_table_pre() 606 static void sdhci_adma_table_post(struct sdhci_host *host, sdhci_adma_table_post() argument 623 dma_unmap_single(mmc_dev(host->mmc), host->align_addr, sdhci_adma_table_post() 624 host->align_buffer_sz, direction); sdhci_adma_table_post() 628 for_each_sg(data->sg, sg, host->sg_count, i) sdhci_adma_table_post() 629 if (sg_dma_address(sg) & host->align_mask) { sdhci_adma_table_post() 635 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg, sdhci_adma_table_post() 638 align = host->align_buffer; sdhci_adma_table_post() 640 for_each_sg(data->sg, sg, host->sg_count, i) { sdhci_adma_table_post() 641 if (sg_dma_address(sg) & host->align_mask) { sdhci_adma_table_post() 642 size = host->align_sz - sdhci_adma_table_post() 643 (sg_dma_address(sg) & host->align_mask); sdhci_adma_table_post() 649 align += host->align_sz; sdhci_adma_table_post() 655 dma_unmap_sg(mmc_dev(host->mmc), data->sg, sdhci_adma_table_post() 661 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd) sdhci_calc_timeout() argument 668 * If the host controller provides us with an incorrect timeout sdhci_calc_timeout() 673 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL) sdhci_calc_timeout() 685 if (host->clock && data->timeout_clks) { sdhci_calc_timeout() 690 * host->clock is in Hz. target_timeout is in us. sdhci_calc_timeout() 694 if (do_div(val, host->clock)) sdhci_calc_timeout() 706 * (2) host->timeout_clk < 2^16 sdhci_calc_timeout() 711 current_timeout = (1 << 13) * 1000 / host->timeout_clk; sdhci_calc_timeout() 721 mmc_hostname(host->mmc), count, cmd->opcode); sdhci_calc_timeout() 728 static void sdhci_set_transfer_irqs(struct sdhci_host *host) sdhci_set_transfer_irqs() argument 733 if (host->flags & SDHCI_REQ_USE_DMA) sdhci_set_transfer_irqs() 734 host->ier = (host->ier & ~pio_irqs) | dma_irqs; sdhci_set_transfer_irqs() 736 host->ier = (host->ier & ~dma_irqs) | pio_irqs; sdhci_set_transfer_irqs() 738 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); sdhci_set_transfer_irqs() 739 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); sdhci_set_transfer_irqs() 742 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) sdhci_set_timeout() argument 746 if (host->ops->set_timeout) { sdhci_set_timeout() 747 host->ops->set_timeout(host, cmd); sdhci_set_timeout() 749 count = sdhci_calc_timeout(host, cmd); sdhci_set_timeout() 750 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); sdhci_set_timeout() 754 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) sdhci_prepare_data() argument 760 WARN_ON(host->data); sdhci_prepare_data() 763 sdhci_set_timeout(host, cmd); sdhci_prepare_data() 770 BUG_ON(data->blksz > host->mmc->max_blk_size); sdhci_prepare_data() 773 host->data = data; sdhci_prepare_data() 774 host->data_early = 0; sdhci_prepare_data() 775 host->data->bytes_xfered = 0; sdhci_prepare_data() 777 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) sdhci_prepare_data() 778 host->flags |= SDHCI_REQ_USE_DMA; sdhci_prepare_data() 784 if (host->flags & SDHCI_REQ_USE_DMA) { sdhci_prepare_data() 789 if (host->flags & SDHCI_USE_ADMA) { sdhci_prepare_data() 790 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) sdhci_prepare_data() 793 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) sdhci_prepare_data() 803 host->flags &= ~SDHCI_REQ_USE_DMA; sdhci_prepare_data() 814 if (host->flags & SDHCI_REQ_USE_DMA) { sdhci_prepare_data() 819 if (host->flags & SDHCI_USE_ADMA) { sdhci_prepare_data() 825 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) sdhci_prepare_data() 828 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) sdhci_prepare_data() 837 host->flags &= ~SDHCI_REQ_USE_DMA; sdhci_prepare_data() 844 if (host->flags & SDHCI_REQ_USE_DMA) { sdhci_prepare_data() 845 if (host->flags & SDHCI_USE_ADMA) { sdhci_prepare_data() 846 ret = sdhci_adma_table_pre(host, data); sdhci_prepare_data() 853 host->flags &= ~SDHCI_REQ_USE_DMA; sdhci_prepare_data() 855 sdhci_writel(host, host->adma_addr, sdhci_prepare_data() 857 if (host->flags & SDHCI_USE_64_BIT_DMA) sdhci_prepare_data() 858 sdhci_writel(host, sdhci_prepare_data() 859 (u64)host->adma_addr >> 32, sdhci_prepare_data() 865 sg_cnt = sdhci_pre_dma_transfer(host, data); sdhci_prepare_data() 872 host->flags &= ~SDHCI_REQ_USE_DMA; sdhci_prepare_data() 875 sdhci_writel(host, sg_dma_address(data->sg), sdhci_prepare_data() 886 if (host->version >= SDHCI_SPEC_200) { sdhci_prepare_data() 887 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); sdhci_prepare_data() 889 if ((host->flags & SDHCI_REQ_USE_DMA) && sdhci_prepare_data() 890 (host->flags & SDHCI_USE_ADMA)) { sdhci_prepare_data() 891 if (host->flags & SDHCI_USE_64_BIT_DMA) sdhci_prepare_data() 898 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); sdhci_prepare_data() 901 if (!(host->flags & SDHCI_REQ_USE_DMA)) { sdhci_prepare_data() 905 if (host->data->flags & MMC_DATA_READ) sdhci_prepare_data() 909 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); sdhci_prepare_data() 910 host->blocks = data->blocks; sdhci_prepare_data() 913 sdhci_set_transfer_irqs(host); sdhci_prepare_data() 916 sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG, sdhci_prepare_data() 918 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); sdhci_prepare_data() 921 static void sdhci_set_transfer_mode(struct sdhci_host *host, sdhci_set_transfer_mode() argument 928 if (host->quirks2 & sdhci_set_transfer_mode() 930 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE); sdhci_set_transfer_mode() 933 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE); sdhci_set_transfer_mode() 934 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 | sdhci_set_transfer_mode() 940 WARN_ON(!host->data); sdhci_set_transfer_mode() 942 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE)) sdhci_set_transfer_mode() 951 if (!host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) && sdhci_set_transfer_mode() 954 else if (host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) { sdhci_set_transfer_mode() 956 sdhci_writel(host, host->mrq->sbc->arg, SDHCI_ARGUMENT2); sdhci_set_transfer_mode() 962 if (host->flags & SDHCI_REQ_USE_DMA) sdhci_set_transfer_mode() 965 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE); sdhci_set_transfer_mode() 968 static void sdhci_finish_data(struct sdhci_host *host) sdhci_finish_data() argument 972 BUG_ON(!host->data); sdhci_finish_data() 974 data = host->data; sdhci_finish_data() 975 host->data = NULL; sdhci_finish_data() 977 if (host->flags & SDHCI_REQ_USE_DMA) { sdhci_finish_data() 978 if (host->flags & SDHCI_USE_ADMA) sdhci_finish_data() 979 sdhci_adma_table_post(host, data); sdhci_finish_data() 982 dma_unmap_sg(mmc_dev(host->mmc), sdhci_finish_data() 1010 !host->mrq->sbc)) { sdhci_finish_data() 1017 sdhci_do_reset(host, SDHCI_RESET_CMD); sdhci_finish_data() 1018 sdhci_do_reset(host, SDHCI_RESET_DATA); sdhci_finish_data() 1021 sdhci_send_command(host, data->stop); sdhci_finish_data() 1023 tasklet_schedule(&host->finish_tasklet); sdhci_finish_data() 1026 void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) sdhci_send_command() argument 1032 WARN_ON(host->cmd); sdhci_send_command() 1043 if (host->mrq->data && (cmd == host->mrq->data->stop)) sdhci_send_command() 1046 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { sdhci_send_command() 1049 "inhibit bit(s).\n", mmc_hostname(host->mmc)); sdhci_send_command() 1050 sdhci_dumpregs(host); sdhci_send_command() 1052 tasklet_schedule(&host->finish_tasklet); sdhci_send_command() 1064 mod_timer(&host->timer, timeout); sdhci_send_command() 1066 host->cmd = cmd; sdhci_send_command() 1067 host->busy_handle = 0; sdhci_send_command() 1069 sdhci_prepare_data(host, cmd); sdhci_send_command() 1071 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT); sdhci_send_command() 1073 sdhci_set_transfer_mode(host, cmd); sdhci_send_command() 1077 mmc_hostname(host->mmc)); sdhci_send_command() 1079 tasklet_schedule(&host->finish_tasklet); sdhci_send_command() 1102 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND); sdhci_send_command() 1106 static void sdhci_finish_command(struct sdhci_host *host) sdhci_finish_command() argument 1110 BUG_ON(host->cmd == NULL); sdhci_finish_command() 1112 if (host->cmd->flags & MMC_RSP_PRESENT) { sdhci_finish_command() 1113 if (host->cmd->flags & MMC_RSP_136) { sdhci_finish_command() 1116 host->cmd->resp[i] = sdhci_readl(host, sdhci_finish_command() 1119 host->cmd->resp[i] |= sdhci_finish_command() 1120 sdhci_readb(host, sdhci_finish_command() 1124 host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE); sdhci_finish_command() 1128 host->cmd->error = 0; sdhci_finish_command() 1131 if (host->cmd == host->mrq->sbc) { sdhci_finish_command() 1132 host->cmd = NULL; sdhci_finish_command() 1133 sdhci_send_command(host, host->mrq->cmd); sdhci_finish_command() 1137 if (host->data && host->data_early) sdhci_finish_command() 1138 sdhci_finish_data(host); sdhci_finish_command() 1140 if (!host->cmd->data) sdhci_finish_command() 1141 tasklet_schedule(&host->finish_tasklet); sdhci_finish_command() 1143 host->cmd = NULL; sdhci_finish_command() 1147 static u16 sdhci_get_preset_value(struct sdhci_host *host) sdhci_get_preset_value() argument 1151 switch (host->timing) { sdhci_get_preset_value() 1153 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); sdhci_get_preset_value() 1156 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25); sdhci_get_preset_value() 1159 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50); sdhci_get_preset_value() 1163 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104); sdhci_get_preset_value() 1167 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50); sdhci_get_preset_value() 1170 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400); sdhci_get_preset_value() 1174 mmc_hostname(host->mmc)); sdhci_get_preset_value() 1175 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); sdhci_get_preset_value() 1181 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) sdhci_set_clock() argument 1188 host->mmc->actual_clock = 0; sdhci_set_clock() 1190 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); sdhci_set_clock() 1195 if (host->version >= SDHCI_SPEC_300) { sdhci_set_clock() 1196 if (host->preset_enabled) { sdhci_set_clock() 1199 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); sdhci_set_clock() 1200 pre_val = sdhci_get_preset_value(host); sdhci_set_clock() 1203 if (host->clk_mul && sdhci_set_clock() 1207 clk_mul = host->clk_mul; sdhci_set_clock() 1218 if (host->clk_mul) { sdhci_set_clock() 1220 if ((host->max_clk * host->clk_mul / div) sdhci_set_clock() 1230 clk_mul = host->clk_mul; sdhci_set_clock() 1234 if (host->max_clk <= clock) sdhci_set_clock() 1239 if ((host->max_clk / div) <= clock) sdhci_set_clock() 1249 if ((host->max_clk / div) <= clock) sdhci_set_clock() 1258 host->mmc->actual_clock = (host->max_clk * clk_mul) / real_div; sdhci_set_clock() 1263 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); sdhci_set_clock() 1267 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL)) sdhci_set_clock() 1271 "stabilised.\n", mmc_hostname(host->mmc)); sdhci_set_clock() 1272 sdhci_dumpregs(host); sdhci_set_clock() 1280 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); sdhci_set_clock() 1284 static void sdhci_set_power(struct sdhci_host *host, unsigned char mode, sdhci_set_power() argument 1287 struct mmc_host *mmc = host->mmc; sdhci_set_power() 1291 spin_unlock_irq(&host->lock); sdhci_set_power() 1293 spin_lock_irq(&host->lock); sdhci_set_power() 1296 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL); sdhci_set_power() 1298 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); sdhci_set_power() 1321 if (host->pwr == pwr) sdhci_set_power() 1324 host->pwr = pwr; sdhci_set_power() 1327 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); sdhci_set_power() 1328 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) sdhci_set_power() 1329 sdhci_runtime_pm_bus_off(host); sdhci_set_power() 1336 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) sdhci_set_power() 1337 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); sdhci_set_power() 1344 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER) sdhci_set_power() 1345 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); sdhci_set_power() 1349 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); sdhci_set_power() 1351 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) sdhci_set_power() 1352 sdhci_runtime_pm_bus_on(host); sdhci_set_power() 1358 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER) sdhci_set_power() 1371 struct sdhci_host *host; sdhci_request() local 1376 host = mmc_priv(mmc); sdhci_request() 1378 sdhci_runtime_pm_get(host); sdhci_request() 1381 present = sdhci_do_get_cd(host); sdhci_request() 1383 spin_lock_irqsave(&host->lock, flags); sdhci_request() 1385 WARN_ON(host->mrq != NULL); sdhci_request() 1388 sdhci_activate_led(host); sdhci_request() 1395 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) { sdhci_request() 1402 host->mrq = mrq; sdhci_request() 1404 if (!present || host->flags & SDHCI_DEVICE_DEAD) { sdhci_request() 1405 host->mrq->cmd->error = -ENOMEDIUM; sdhci_request() 1406 tasklet_schedule(&host->finish_tasklet); sdhci_request() 1410 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); sdhci_request() 1416 if ((host->flags & SDHCI_NEEDS_RETUNING) && sdhci_request() 1426 /* Here we need to set the host->mrq to NULL, sdhci_request() 1430 host->mrq = NULL; sdhci_request() 1432 spin_unlock_irqrestore(&host->lock, flags); sdhci_request() 1434 spin_lock_irqsave(&host->lock, flags); sdhci_request() 1437 host->mrq = mrq; sdhci_request() 1441 if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23)) sdhci_request() 1442 sdhci_send_command(host, mrq->sbc); sdhci_request() 1444 sdhci_send_command(host, mrq->cmd); sdhci_request() 1448 spin_unlock_irqrestore(&host->lock, flags); sdhci_request() 1451 void sdhci_set_bus_width(struct sdhci_host *host, int width) sdhci_set_bus_width() argument 1455 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); sdhci_set_bus_width() 1458 if (host->version >= SDHCI_SPEC_300) sdhci_set_bus_width() 1461 if (host->version >= SDHCI_SPEC_300) sdhci_set_bus_width() 1468 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); sdhci_set_bus_width() 1472 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing) sdhci_set_uhs_signaling() argument 1476 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); sdhci_set_uhs_signaling() 1477 /* Select Bus Speed Mode for host */ sdhci_set_uhs_signaling() 1493 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); sdhci_set_uhs_signaling() 1497 static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios) sdhci_do_set_ios() argument 1501 struct mmc_host *mmc = host->mmc; sdhci_do_set_ios() 1503 spin_lock_irqsave(&host->lock, flags); sdhci_do_set_ios() 1505 if (host->flags & SDHCI_DEVICE_DEAD) { sdhci_do_set_ios() 1506 spin_unlock_irqrestore(&host->lock, flags); sdhci_do_set_ios() 1518 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); sdhci_do_set_ios() 1519 sdhci_reinit(host); sdhci_do_set_ios() 1522 if (host->version >= SDHCI_SPEC_300 && sdhci_do_set_ios() 1524 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) sdhci_do_set_ios() 1525 sdhci_enable_preset_value(host, false); sdhci_do_set_ios() 1527 if (!ios->clock || ios->clock != host->clock) { sdhci_do_set_ios() 1528 host->ops->set_clock(host, ios->clock); sdhci_do_set_ios() 1529 host->clock = ios->clock; sdhci_do_set_ios() 1531 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK && sdhci_do_set_ios() 1532 host->clock) { sdhci_do_set_ios() 1533 host->timeout_clk = host->mmc->actual_clock ? sdhci_do_set_ios() 1534 host->mmc->actual_clock / 1000 : sdhci_do_set_ios() 1535 host->clock / 1000; sdhci_do_set_ios() 1536 host->mmc->max_busy_timeout = sdhci_do_set_ios() 1537 host->ops->get_max_timeout_count ? sdhci_do_set_ios() 1538 host->ops->get_max_timeout_count(host) : sdhci_do_set_ios() 1540 host->mmc->max_busy_timeout /= host->timeout_clk; sdhci_do_set_ios() 1544 sdhci_set_power(host, ios->power_mode, ios->vdd); sdhci_do_set_ios() 1546 if (host->ops->platform_send_init_74_clocks) sdhci_do_set_ios() 1547 host->ops->platform_send_init_74_clocks(host, ios->power_mode); sdhci_do_set_ios() 1549 host->ops->set_bus_width(host, ios->bus_width); sdhci_do_set_ios() 1551 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); sdhci_do_set_ios() 1555 && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) sdhci_do_set_ios() 1560 if (host->version >= SDHCI_SPEC_300) { sdhci_do_set_ios() 1573 if (!host->preset_enabled) { sdhci_do_set_ios() 1574 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); sdhci_do_set_ios() 1579 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); sdhci_do_set_ios() 1586 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); sdhci_do_set_ios() 1596 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); sdhci_do_set_ios() 1598 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); sdhci_do_set_ios() 1600 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); sdhci_do_set_ios() 1603 host->ops->set_clock(host, host->clock); sdhci_do_set_ios() 1607 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); sdhci_do_set_ios() 1609 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); sdhci_do_set_ios() 1611 host->ops->set_uhs_signaling(host, ios->timing); sdhci_do_set_ios() 1612 host->timing = ios->timing; sdhci_do_set_ios() 1614 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) && sdhci_do_set_ios() 1623 sdhci_enable_preset_value(host, true); sdhci_do_set_ios() 1624 preset = sdhci_get_preset_value(host); sdhci_do_set_ios() 1630 host->ops->set_clock(host, host->clock); sdhci_do_set_ios() 1632 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); sdhci_do_set_ios() 1639 if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS) sdhci_do_set_ios() 1640 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); sdhci_do_set_ios() 1643 spin_unlock_irqrestore(&host->lock, flags); sdhci_do_set_ios() 1648 struct sdhci_host *host = mmc_priv(mmc); sdhci_set_ios() local 1650 sdhci_runtime_pm_get(host); sdhci_set_ios() 1651 sdhci_do_set_ios(host, ios); sdhci_set_ios() 1652 sdhci_runtime_pm_put(host); sdhci_set_ios() 1655 static int sdhci_do_get_cd(struct sdhci_host *host) sdhci_do_get_cd() argument 1657 int gpio_cd = mmc_gpio_get_cd(host->mmc); sdhci_do_get_cd() 1659 if (host->flags & SDHCI_DEVICE_DEAD) sdhci_do_get_cd() 1663 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) || sdhci_do_get_cd() 1664 (host->mmc->caps & MMC_CAP_NONREMOVABLE)) sdhci_do_get_cd() 1672 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); sdhci_do_get_cd() 1677 struct sdhci_host *host = mmc_priv(mmc); sdhci_get_cd() local 1680 sdhci_runtime_pm_get(host); sdhci_get_cd() 1681 ret = sdhci_do_get_cd(host); sdhci_get_cd() 1682 sdhci_runtime_pm_put(host); sdhci_get_cd() 1686 static int sdhci_check_ro(struct sdhci_host *host) sdhci_check_ro() argument 1691 spin_lock_irqsave(&host->lock, flags); sdhci_check_ro() 1693 if (host->flags & SDHCI_DEVICE_DEAD) sdhci_check_ro() 1695 else if (host->ops->get_ro) sdhci_check_ro() 1696 is_readonly = host->ops->get_ro(host); sdhci_check_ro() 1698 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE) sdhci_check_ro() 1701 spin_unlock_irqrestore(&host->lock, flags); sdhci_check_ro() 1704 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ? sdhci_check_ro() 1710 static int sdhci_do_get_ro(struct sdhci_host *host) sdhci_do_get_ro() argument 1714 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT)) sdhci_do_get_ro() 1715 return sdhci_check_ro(host); sdhci_do_get_ro() 1719 if (sdhci_check_ro(host)) { sdhci_do_get_ro() 1730 struct sdhci_host *host = mmc_priv(mmc); sdhci_hw_reset() local 1732 if (host->ops && host->ops->hw_reset) sdhci_hw_reset() 1733 host->ops->hw_reset(host); sdhci_hw_reset() 1738 struct sdhci_host *host = mmc_priv(mmc); sdhci_get_ro() local 1741 sdhci_runtime_pm_get(host); sdhci_get_ro() 1742 ret = sdhci_do_get_ro(host); sdhci_get_ro() 1743 sdhci_runtime_pm_put(host); sdhci_get_ro() 1747 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable) sdhci_enable_sdio_irq_nolock() argument 1749 if (!(host->flags & SDHCI_DEVICE_DEAD)) { sdhci_enable_sdio_irq_nolock() 1751 host->ier |= SDHCI_INT_CARD_INT; sdhci_enable_sdio_irq_nolock() 1753 host->ier &= ~SDHCI_INT_CARD_INT; sdhci_enable_sdio_irq_nolock() 1755 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); sdhci_enable_sdio_irq_nolock() 1756 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); sdhci_enable_sdio_irq_nolock() 1763 struct sdhci_host *host = mmc_priv(mmc); sdhci_enable_sdio_irq() local 1766 sdhci_runtime_pm_get(host); sdhci_enable_sdio_irq() 1768 spin_lock_irqsave(&host->lock, flags); sdhci_enable_sdio_irq() 1770 host->flags |= SDHCI_SDIO_IRQ_ENABLED; sdhci_enable_sdio_irq() 1772 host->flags &= ~SDHCI_SDIO_IRQ_ENABLED; sdhci_enable_sdio_irq() 1774 sdhci_enable_sdio_irq_nolock(host, enable); sdhci_enable_sdio_irq() 1775 spin_unlock_irqrestore(&host->lock, flags); sdhci_enable_sdio_irq() 1777 sdhci_runtime_pm_put(host); sdhci_enable_sdio_irq() 1780 static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host, sdhci_do_start_signal_voltage_switch() argument 1783 struct mmc_host *mmc = host->mmc; sdhci_do_start_signal_voltage_switch() 1791 if (host->version < SDHCI_SPEC_300) sdhci_do_start_signal_voltage_switch() 1794 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); sdhci_do_start_signal_voltage_switch() 1800 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); sdhci_do_start_signal_voltage_switch() 1815 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); sdhci_do_start_signal_voltage_switch() 1839 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); sdhci_do_start_signal_voltage_switch() 1842 if (host->ops->voltage_switch) sdhci_do_start_signal_voltage_switch() 1843 host->ops->voltage_switch(host); sdhci_do_start_signal_voltage_switch() 1846 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); sdhci_do_start_signal_voltage_switch() 1874 struct sdhci_host *host = mmc_priv(mmc); sdhci_start_signal_voltage_switch() local 1877 if (host->version < SDHCI_SPEC_300) sdhci_start_signal_voltage_switch() 1879 sdhci_runtime_pm_get(host); sdhci_start_signal_voltage_switch() 1880 err = sdhci_do_start_signal_voltage_switch(host, ios); sdhci_start_signal_voltage_switch() 1881 sdhci_runtime_pm_put(host); sdhci_start_signal_voltage_switch() 1887 struct sdhci_host *host = mmc_priv(mmc); sdhci_card_busy() local 1890 sdhci_runtime_pm_get(host); sdhci_card_busy() 1892 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); sdhci_card_busy() 1893 sdhci_runtime_pm_put(host); sdhci_card_busy() 1900 struct sdhci_host *host = mmc_priv(mmc); sdhci_prepare_hs400_tuning() local 1903 spin_lock_irqsave(&host->lock, flags); sdhci_prepare_hs400_tuning() 1904 host->flags |= SDHCI_HS400_TUNING; sdhci_prepare_hs400_tuning() 1905 spin_unlock_irqrestore(&host->lock, flags); sdhci_prepare_hs400_tuning() 1912 struct sdhci_host *host = mmc_priv(mmc); sdhci_execute_tuning() local 1920 sdhci_runtime_pm_get(host); sdhci_execute_tuning() 1921 spin_lock_irqsave(&host->lock, flags); sdhci_execute_tuning() 1923 hs400_tuning = host->flags & SDHCI_HS400_TUNING; sdhci_execute_tuning() 1924 host->flags &= ~SDHCI_HS400_TUNING; sdhci_execute_tuning() 1926 if (host->tuning_mode == SDHCI_TUNING_MODE_1) sdhci_execute_tuning() 1927 tuning_count = host->tuning_count; sdhci_execute_tuning() 1936 switch (host->timing) { sdhci_execute_tuning() 1955 if (host->flags & SDHCI_SDR50_NEEDS_TUNING || sdhci_execute_tuning() 1956 host->flags & SDHCI_SDR104_NEEDS_TUNING) sdhci_execute_tuning() 1964 if (host->ops->platform_execute_tuning) { sdhci_execute_tuning() 1965 spin_unlock_irqrestore(&host->lock, flags); sdhci_execute_tuning() 1966 err = host->ops->platform_execute_tuning(host, opcode); sdhci_execute_tuning() 1967 sdhci_runtime_pm_put(host); sdhci_execute_tuning() 1971 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); sdhci_execute_tuning() 1973 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND) sdhci_execute_tuning() 1975 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); sdhci_execute_tuning() 1987 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE); sdhci_execute_tuning() 1988 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE); sdhci_execute_tuning() 2009 host->mrq = &mrq; sdhci_execute_tuning() 2018 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128), sdhci_execute_tuning() 2021 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), sdhci_execute_tuning() 2024 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), sdhci_execute_tuning() 2029 * The tuning block is sent by the card to the host controller. sdhci_execute_tuning() 2034 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE); sdhci_execute_tuning() 2036 sdhci_send_command(host, &cmd); sdhci_execute_tuning() 2038 host->cmd = NULL; sdhci_execute_tuning() 2039 host->mrq = NULL; sdhci_execute_tuning() 2041 spin_unlock_irqrestore(&host->lock, flags); sdhci_execute_tuning() 2043 wait_event_interruptible_timeout(host->buf_ready_int, sdhci_execute_tuning() 2044 (host->tuning_done == 1), sdhci_execute_tuning() 2046 spin_lock_irqsave(&host->lock, flags); sdhci_execute_tuning() 2048 if (!host->tuning_done) { sdhci_execute_tuning() 2053 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); sdhci_execute_tuning() 2056 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); sdhci_execute_tuning() 2062 host->tuning_done = 0; sdhci_execute_tuning() 2064 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); sdhci_execute_tuning() 2077 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); sdhci_execute_tuning() 2087 host->flags &= ~SDHCI_NEEDS_RETUNING; sdhci_execute_tuning() 2090 host->flags |= SDHCI_USING_RETUNING_TIMER; sdhci_execute_tuning() 2091 mod_timer(&host->tuning_timer, jiffies + tuning_count * HZ); sdhci_execute_tuning() 2095 * In case tuning fails, host controllers which support re-tuning can sdhci_execute_tuning() 2099 * them. SDHCI_USING_RETUNING_TIMER means the host is currently using sdhci_execute_tuning() 2102 if (err && (host->flags & SDHCI_USING_RETUNING_TIMER)) sdhci_execute_tuning() 2105 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); sdhci_execute_tuning() 2106 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); sdhci_execute_tuning() 2108 spin_unlock_irqrestore(&host->lock, flags); sdhci_execute_tuning() 2109 sdhci_runtime_pm_put(host); sdhci_execute_tuning() 2115 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable) sdhci_enable_preset_value() argument 2118 if (host->version < SDHCI_SPEC_300) sdhci_enable_preset_value() 2125 if (host->preset_enabled != enable) { sdhci_enable_preset_value() 2126 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); sdhci_enable_preset_value() 2133 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); sdhci_enable_preset_value() 2136 host->flags |= SDHCI_PV_ENABLED; sdhci_enable_preset_value() 2138 host->flags &= ~SDHCI_PV_ENABLED; sdhci_enable_preset_value() 2140 host->preset_enabled = enable; sdhci_enable_preset_value() 2147 struct sdhci_host *host = mmc_priv(mmc); sdhci_post_req() local 2150 if (host->flags & SDHCI_REQ_USE_DMA) { sdhci_post_req() 2153 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, sdhci_post_req() 2160 static int sdhci_pre_dma_transfer(struct sdhci_host *host, sdhci_pre_dma_transfer() argument 2172 sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, sdhci_pre_dma_transfer() 2188 struct sdhci_host *host = mmc_priv(mmc); sdhci_pre_req() local 2192 if (host->flags & SDHCI_REQ_USE_DMA) sdhci_pre_req() 2193 sdhci_pre_dma_transfer(host, mrq->data); sdhci_pre_req() 2198 struct sdhci_host *host = mmc_priv(mmc); sdhci_card_event() local 2203 if (host->ops->card_event) sdhci_card_event() 2204 host->ops->card_event(host); sdhci_card_event() 2206 present = sdhci_do_get_cd(host); sdhci_card_event() 2208 spin_lock_irqsave(&host->lock, flags); sdhci_card_event() 2210 /* Check host->mrq first in case we are runtime suspended */ sdhci_card_event() 2211 if (host->mrq && !present) { sdhci_card_event() 2213 mmc_hostname(host->mmc)); sdhci_card_event() 2215 mmc_hostname(host->mmc)); sdhci_card_event() 2217 sdhci_do_reset(host, SDHCI_RESET_CMD); sdhci_card_event() 2218 sdhci_do_reset(host, SDHCI_RESET_DATA); sdhci_card_event() 2220 host->mrq->cmd->error = -ENOMEDIUM; sdhci_card_event() 2221 tasklet_schedule(&host->finish_tasklet); sdhci_card_event() 2224 spin_unlock_irqrestore(&host->lock, flags); sdhci_card_event() 2251 struct sdhci_host *host; sdhci_tasklet_finish() local 2255 host = (struct sdhci_host*)param; sdhci_tasklet_finish() 2257 spin_lock_irqsave(&host->lock, flags); sdhci_tasklet_finish() 2263 if (!host->mrq) { sdhci_tasklet_finish() 2264 spin_unlock_irqrestore(&host->lock, flags); sdhci_tasklet_finish() 2268 del_timer(&host->timer); sdhci_tasklet_finish() 2270 mrq = host->mrq; sdhci_tasklet_finish() 2276 if (!(host->flags & SDHCI_DEVICE_DEAD) && sdhci_tasklet_finish() 2281 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) { sdhci_tasklet_finish() 2284 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) sdhci_tasklet_finish() 2286 host->ops->set_clock(host, host->clock); sdhci_tasklet_finish() 2290 sdhci_do_reset(host, SDHCI_RESET_CMD); sdhci_tasklet_finish() 2291 sdhci_do_reset(host, SDHCI_RESET_DATA); sdhci_tasklet_finish() 2294 host->mrq = NULL; sdhci_tasklet_finish() 2295 host->cmd = NULL; sdhci_tasklet_finish() 2296 host->data = NULL; sdhci_tasklet_finish() 2299 sdhci_deactivate_led(host); sdhci_tasklet_finish() 2303 spin_unlock_irqrestore(&host->lock, flags); sdhci_tasklet_finish() 2305 mmc_request_done(host->mmc, mrq); sdhci_tasklet_finish() 2306 sdhci_runtime_pm_put(host); sdhci_tasklet_finish() 2311 struct sdhci_host *host; sdhci_timeout_timer() local 2314 host = (struct sdhci_host*)data; sdhci_timeout_timer() 2316 spin_lock_irqsave(&host->lock, flags); sdhci_timeout_timer() 2318 if (host->mrq) { sdhci_timeout_timer() 2320 "interrupt.\n", mmc_hostname(host->mmc)); sdhci_timeout_timer() 2321 sdhci_dumpregs(host); sdhci_timeout_timer() 2323 if (host->data) { sdhci_timeout_timer() 2324 host->data->error = -ETIMEDOUT; sdhci_timeout_timer() 2325 sdhci_finish_data(host); sdhci_timeout_timer() 2327 if (host->cmd) sdhci_timeout_timer() 2328 host->cmd->error = -ETIMEDOUT; sdhci_timeout_timer() 2330 host->mrq->cmd->error = -ETIMEDOUT; sdhci_timeout_timer() 2332 tasklet_schedule(&host->finish_tasklet); sdhci_timeout_timer() 2337 spin_unlock_irqrestore(&host->lock, flags); sdhci_timeout_timer() 2342 struct sdhci_host *host; sdhci_tuning_timer() local 2345 host = (struct sdhci_host *)data; sdhci_tuning_timer() 2347 spin_lock_irqsave(&host->lock, flags); sdhci_tuning_timer() 2349 host->flags |= SDHCI_NEEDS_RETUNING; sdhci_tuning_timer() 2351 spin_unlock_irqrestore(&host->lock, flags); sdhci_tuning_timer() 2360 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask) sdhci_cmd_irq() argument 2364 if (!host->cmd) { sdhci_cmd_irq() 2367 mmc_hostname(host->mmc), (unsigned)intmask); sdhci_cmd_irq() 2368 sdhci_dumpregs(host); sdhci_cmd_irq() 2373 host->cmd->error = -ETIMEDOUT; sdhci_cmd_irq() 2376 host->cmd->error = -EILSEQ; sdhci_cmd_irq() 2378 if (host->cmd->error) { sdhci_cmd_irq() 2379 tasklet_schedule(&host->finish_tasklet); sdhci_cmd_irq() 2384 * The host can send and interrupt when the busy state has sdhci_cmd_irq() 2394 if (host->cmd->flags & MMC_RSP_BUSY) { sdhci_cmd_irq() 2395 if (host->cmd->data) sdhci_cmd_irq() 2398 else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) sdhci_cmd_irq() 2399 && !host->busy_handle) { sdhci_cmd_irq() 2401 host->busy_handle = 1; sdhci_cmd_irq() 2407 } else if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) && sdhci_cmd_irq() 2408 host->cmd->opcode == MMC_STOP_TRANSMISSION && !host->data) { sdhci_cmd_irq() 2413 sdhci_finish_command(host); sdhci_cmd_irq() 2417 static void sdhci_adma_show_error(struct sdhci_host *host) sdhci_adma_show_error() argument 2419 const char *name = mmc_hostname(host->mmc); sdhci_adma_show_error() 2420 void *desc = host->adma_table; sdhci_adma_show_error() 2422 sdhci_dumpregs(host); sdhci_adma_show_error() 2427 if (host->flags & SDHCI_USE_64_BIT_DMA) sdhci_adma_show_error() 2439 desc += host->desc_sz; sdhci_adma_show_error() 2446 static void sdhci_adma_show_error(struct sdhci_host *host) { } sdhci_adma_show_error() argument 2449 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) sdhci_data_irq() argument 2456 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)); sdhci_data_irq() 2459 host->tuning_done = 1; sdhci_data_irq() 2460 wake_up(&host->buf_ready_int); sdhci_data_irq() 2465 if (!host->data) { sdhci_data_irq() 2471 if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) { sdhci_data_irq() 2473 host->cmd->error = -ETIMEDOUT; sdhci_data_irq() 2474 tasklet_schedule(&host->finish_tasklet); sdhci_data_irq() 2483 if (host->busy_handle) sdhci_data_irq() 2484 sdhci_finish_command(host); sdhci_data_irq() 2486 host->busy_handle = 1; sdhci_data_irq() 2493 mmc_hostname(host->mmc), (unsigned)intmask); sdhci_data_irq() 2494 sdhci_dumpregs(host); sdhci_data_irq() 2500 host->data->error = -ETIMEDOUT; sdhci_data_irq() 2502 host->data->error = -EILSEQ; sdhci_data_irq() 2504 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)) sdhci_data_irq() 2506 host->data->error = -EILSEQ; sdhci_data_irq() 2508 pr_err("%s: ADMA error\n", mmc_hostname(host->mmc)); sdhci_data_irq() 2509 sdhci_adma_show_error(host); sdhci_data_irq() 2510 host->data->error = -EIO; sdhci_data_irq() 2511 if (host->ops->adma_workaround) sdhci_data_irq() 2512 host->ops->adma_workaround(host, intmask); sdhci_data_irq() 2515 if (host->data->error) sdhci_data_irq() 2516 sdhci_finish_data(host); sdhci_data_irq() 2519 sdhci_transfer_pio(host); sdhci_data_irq() 2526 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS) sdhci_data_irq() 2532 dmastart = sg_dma_address(host->data->sg); sdhci_data_irq() 2533 dmanow = dmastart + host->data->bytes_xfered; sdhci_data_irq() 2540 host->data->bytes_xfered = dmanow - dmastart; sdhci_data_irq() 2543 mmc_hostname(host->mmc), dmastart, sdhci_data_irq() 2544 host->data->bytes_xfered, dmanow); sdhci_data_irq() 2545 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS); sdhci_data_irq() 2549 if (host->cmd) { sdhci_data_irq() 2555 host->data_early = 1; sdhci_data_irq() 2557 sdhci_finish_data(host); sdhci_data_irq() 2566 struct sdhci_host *host = dev_id; sdhci_irq() local 2570 spin_lock(&host->lock); sdhci_irq() 2572 if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) { sdhci_irq() 2573 spin_unlock(&host->lock); sdhci_irq() 2577 intmask = sdhci_readl(host, SDHCI_INT_STATUS); sdhci_irq() 2587 sdhci_writel(host, mask, SDHCI_INT_STATUS); sdhci_irq() 2590 mmc_hostname(host->mmc), intmask); sdhci_irq() 2593 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & sdhci_irq() 2607 host->ier &= ~(SDHCI_INT_CARD_INSERT | sdhci_irq() 2609 host->ier |= present ? SDHCI_INT_CARD_REMOVE : sdhci_irq() 2611 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); sdhci_irq() 2612 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); sdhci_irq() 2614 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT | sdhci_irq() 2617 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT | sdhci_irq() 2623 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, sdhci_irq() 2627 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK); sdhci_irq() 2631 mmc_hostname(host->mmc)); sdhci_irq() 2634 sdhci_enable_sdio_irq_nolock(host, false); sdhci_irq() 2635 host->thread_isr |= SDHCI_INT_CARD_INT; sdhci_irq() 2646 sdhci_writel(host, intmask, SDHCI_INT_STATUS); sdhci_irq() 2652 intmask = sdhci_readl(host, SDHCI_INT_STATUS); sdhci_irq() 2655 spin_unlock(&host->lock); sdhci_irq() 2659 mmc_hostname(host->mmc), unexpected); sdhci_irq() 2660 sdhci_dumpregs(host); sdhci_irq() 2668 struct sdhci_host *host = dev_id; sdhci_thread_irq() local 2672 spin_lock_irqsave(&host->lock, flags); sdhci_thread_irq() 2673 isr = host->thread_isr; sdhci_thread_irq() 2674 host->thread_isr = 0; sdhci_thread_irq() 2675 spin_unlock_irqrestore(&host->lock, flags); sdhci_thread_irq() 2678 sdhci_card_event(host->mmc); sdhci_thread_irq() 2679 mmc_detect_change(host->mmc, msecs_to_jiffies(200)); sdhci_thread_irq() 2683 sdio_run_irqs(host->mmc); sdhci_thread_irq() 2685 spin_lock_irqsave(&host->lock, flags); sdhci_thread_irq() 2686 if (host->flags & SDHCI_SDIO_IRQ_ENABLED) sdhci_thread_irq() 2687 sdhci_enable_sdio_irq_nolock(host, true); sdhci_thread_irq() 2688 spin_unlock_irqrestore(&host->lock, flags); sdhci_thread_irq() 2701 void sdhci_enable_irq_wakeups(struct sdhci_host *host) sdhci_enable_irq_wakeups() argument 2707 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); sdhci_enable_irq_wakeups() 2710 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) sdhci_enable_irq_wakeups() 2712 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); sdhci_enable_irq_wakeups() 2716 static void sdhci_disable_irq_wakeups(struct sdhci_host *host) sdhci_disable_irq_wakeups() argument 2722 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); sdhci_disable_irq_wakeups() 2724 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); sdhci_disable_irq_wakeups() 2727 int sdhci_suspend_host(struct sdhci_host *host) sdhci_suspend_host() argument 2729 sdhci_disable_card_detection(host); sdhci_suspend_host() 2732 if (host->flags & SDHCI_USING_RETUNING_TIMER) { sdhci_suspend_host() 2733 del_timer_sync(&host->tuning_timer); sdhci_suspend_host() 2734 host->flags &= ~SDHCI_NEEDS_RETUNING; sdhci_suspend_host() 2737 if (!device_may_wakeup(mmc_dev(host->mmc))) { sdhci_suspend_host() 2738 host->ier = 0; sdhci_suspend_host() 2739 sdhci_writel(host, 0, SDHCI_INT_ENABLE); sdhci_suspend_host() 2740 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); sdhci_suspend_host() 2741 free_irq(host->irq, host); sdhci_suspend_host() 2743 sdhci_enable_irq_wakeups(host); sdhci_suspend_host() 2744 enable_irq_wake(host->irq); sdhci_suspend_host() 2751 int sdhci_resume_host(struct sdhci_host *host) sdhci_resume_host() argument 2755 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { sdhci_resume_host() 2756 if (host->ops->enable_dma) sdhci_resume_host() 2757 host->ops->enable_dma(host); sdhci_resume_host() 2760 if (!device_may_wakeup(mmc_dev(host->mmc))) { sdhci_resume_host() 2761 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_resume_host() 2763 mmc_hostname(host->mmc), host); sdhci_resume_host() 2767 sdhci_disable_irq_wakeups(host); sdhci_resume_host() 2768 disable_irq_wake(host->irq); sdhci_resume_host() 2771 if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) && sdhci_resume_host() 2772 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) { sdhci_resume_host() 2773 /* Card keeps power but host controller does not */ sdhci_resume_host() 2774 sdhci_init(host, 0); sdhci_resume_host() 2775 host->pwr = 0; sdhci_resume_host() 2776 host->clock = 0; sdhci_resume_host() 2777 sdhci_do_set_ios(host, &host->mmc->ios); sdhci_resume_host() 2779 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER)); sdhci_resume_host() 2783 sdhci_enable_card_detection(host); sdhci_resume_host() 2786 if (host->flags & SDHCI_USING_RETUNING_TIMER) sdhci_resume_host() 2787 host->flags |= SDHCI_NEEDS_RETUNING; sdhci_resume_host() 2794 static int sdhci_runtime_pm_get(struct sdhci_host *host) sdhci_runtime_pm_get() argument 2796 return pm_runtime_get_sync(host->mmc->parent); sdhci_runtime_pm_get() 2799 static int sdhci_runtime_pm_put(struct sdhci_host *host) sdhci_runtime_pm_put() argument 2801 pm_runtime_mark_last_busy(host->mmc->parent); sdhci_runtime_pm_put() 2802 return pm_runtime_put_autosuspend(host->mmc->parent); sdhci_runtime_pm_put() 2805 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host) sdhci_runtime_pm_bus_on() argument 2807 if (host->bus_on) sdhci_runtime_pm_bus_on() 2809 host->bus_on = true; sdhci_runtime_pm_bus_on() 2810 pm_runtime_get_noresume(host->mmc->parent); sdhci_runtime_pm_bus_on() 2813 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host) sdhci_runtime_pm_bus_off() argument 2815 if (!host->bus_on) sdhci_runtime_pm_bus_off() 2817 host->bus_on = false; sdhci_runtime_pm_bus_off() 2818 pm_runtime_put_noidle(host->mmc->parent); sdhci_runtime_pm_bus_off() 2821 int sdhci_runtime_suspend_host(struct sdhci_host *host) sdhci_runtime_suspend_host() argument 2826 if (host->flags & SDHCI_USING_RETUNING_TIMER) { sdhci_runtime_suspend_host() 2827 del_timer_sync(&host->tuning_timer); sdhci_runtime_suspend_host() 2828 host->flags &= ~SDHCI_NEEDS_RETUNING; sdhci_runtime_suspend_host() 2831 spin_lock_irqsave(&host->lock, flags); sdhci_runtime_suspend_host() 2832 host->ier &= SDHCI_INT_CARD_INT; sdhci_runtime_suspend_host() 2833 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); sdhci_runtime_suspend_host() 2834 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); sdhci_runtime_suspend_host() 2835 spin_unlock_irqrestore(&host->lock, flags); sdhci_runtime_suspend_host() 2837 synchronize_hardirq(host->irq); sdhci_runtime_suspend_host() 2839 spin_lock_irqsave(&host->lock, flags); sdhci_runtime_suspend_host() 2840 host->runtime_suspended = true; sdhci_runtime_suspend_host() 2841 spin_unlock_irqrestore(&host->lock, flags); sdhci_runtime_suspend_host() 2847 int sdhci_runtime_resume_host(struct sdhci_host *host) sdhci_runtime_resume_host() argument 2850 int host_flags = host->flags; sdhci_runtime_resume_host() 2853 if (host->ops->enable_dma) sdhci_runtime_resume_host() 2854 host->ops->enable_dma(host); sdhci_runtime_resume_host() 2857 sdhci_init(host, 0); sdhci_runtime_resume_host() 2860 host->pwr = 0; sdhci_runtime_resume_host() 2861 host->clock = 0; sdhci_runtime_resume_host() 2862 sdhci_do_start_signal_voltage_switch(host, &host->mmc->ios); sdhci_runtime_resume_host() 2863 sdhci_do_set_ios(host, &host->mmc->ios); sdhci_runtime_resume_host() 2866 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) { sdhci_runtime_resume_host() 2867 spin_lock_irqsave(&host->lock, flags); sdhci_runtime_resume_host() 2868 sdhci_enable_preset_value(host, true); sdhci_runtime_resume_host() 2869 spin_unlock_irqrestore(&host->lock, flags); sdhci_runtime_resume_host() 2873 if (host->flags & SDHCI_USING_RETUNING_TIMER) sdhci_runtime_resume_host() 2874 host->flags |= SDHCI_NEEDS_RETUNING; sdhci_runtime_resume_host() 2876 spin_lock_irqsave(&host->lock, flags); sdhci_runtime_resume_host() 2878 host->runtime_suspended = false; sdhci_runtime_resume_host() 2881 if (host->flags & SDHCI_SDIO_IRQ_ENABLED) sdhci_runtime_resume_host() 2882 sdhci_enable_sdio_irq_nolock(host, true); sdhci_runtime_resume_host() 2885 sdhci_enable_card_detection(host); sdhci_runtime_resume_host() 2887 spin_unlock_irqrestore(&host->lock, flags); sdhci_runtime_resume_host() 2905 struct sdhci_host *host; sdhci_alloc_host() local 2913 host = mmc_priv(mmc); sdhci_alloc_host() 2914 host->mmc = mmc; sdhci_alloc_host() 2916 return host; sdhci_alloc_host() 2921 int sdhci_add_host(struct sdhci_host *host) sdhci_add_host() argument 2930 WARN_ON(host == NULL); sdhci_add_host() 2931 if (host == NULL) sdhci_add_host() 2934 mmc = host->mmc; sdhci_add_host() 2937 host->quirks = debug_quirks; sdhci_add_host() 2939 host->quirks2 = debug_quirks2; sdhci_add_host() 2941 override_timeout_clk = host->timeout_clk; sdhci_add_host() 2943 sdhci_do_reset(host, SDHCI_RESET_ALL); sdhci_add_host() 2945 host->version = sdhci_readw(host, SDHCI_HOST_VERSION); sdhci_add_host() 2946 host->version = (host->version & SDHCI_SPEC_VER_MASK) sdhci_add_host() 2948 if (host->version > SDHCI_SPEC_300) { sdhci_add_host() 2951 host->version); sdhci_add_host() 2954 caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps : sdhci_add_host() 2955 sdhci_readl(host, SDHCI_CAPABILITIES); sdhci_add_host() 2957 if (host->version >= SDHCI_SPEC_300) sdhci_add_host() 2958 caps[1] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? sdhci_add_host() 2959 host->caps1 : sdhci_add_host() 2960 sdhci_readl(host, SDHCI_CAPABILITIES_1); sdhci_add_host() 2962 if (host->quirks & SDHCI_QUIRK_FORCE_DMA) sdhci_add_host() 2963 host->flags |= SDHCI_USE_SDMA; sdhci_add_host() 2967 host->flags |= SDHCI_USE_SDMA; sdhci_add_host() 2969 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) && sdhci_add_host() 2970 (host->flags & SDHCI_USE_SDMA)) { sdhci_add_host() 2972 host->flags &= ~SDHCI_USE_SDMA; sdhci_add_host() 2975 if ((host->version >= SDHCI_SPEC_200) && sdhci_add_host() 2977 host->flags |= SDHCI_USE_ADMA; sdhci_add_host() 2979 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) && sdhci_add_host() 2980 (host->flags & SDHCI_USE_ADMA)) { sdhci_add_host() 2982 host->flags &= ~SDHCI_USE_ADMA; sdhci_add_host() 2992 if (sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT) sdhci_add_host() 2993 host->flags |= SDHCI_USE_64_BIT_DMA; sdhci_add_host() 2995 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { sdhci_add_host() 2996 if (host->ops->enable_dma) { sdhci_add_host() 2997 if (host->ops->enable_dma(host)) { sdhci_add_host() 3000 host->flags &= sdhci_add_host() 3007 if (host->flags & SDHCI_USE_64_BIT_DMA) sdhci_add_host() 3008 host->flags &= ~SDHCI_USE_SDMA; sdhci_add_host() 3010 if (host->flags & SDHCI_USE_ADMA) { sdhci_add_host() 3017 if (host->flags & SDHCI_USE_64_BIT_DMA) { sdhci_add_host() 3018 host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) * sdhci_add_host() 3020 host->align_buffer_sz = SDHCI_MAX_SEGS * sdhci_add_host() 3022 host->desc_sz = SDHCI_ADMA2_64_DESC_SZ; sdhci_add_host() 3023 host->align_sz = SDHCI_ADMA2_64_ALIGN; sdhci_add_host() 3024 host->align_mask = SDHCI_ADMA2_64_ALIGN - 1; sdhci_add_host() 3026 host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) * sdhci_add_host() 3028 host->align_buffer_sz = SDHCI_MAX_SEGS * sdhci_add_host() 3030 host->desc_sz = SDHCI_ADMA2_32_DESC_SZ; sdhci_add_host() 3031 host->align_sz = SDHCI_ADMA2_32_ALIGN; sdhci_add_host() 3032 host->align_mask = SDHCI_ADMA2_32_ALIGN - 1; sdhci_add_host() 3034 host->adma_table = dma_alloc_coherent(mmc_dev(mmc), sdhci_add_host() 3035 host->adma_table_sz, sdhci_add_host() 3036 &host->adma_addr, sdhci_add_host() 3038 host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL); sdhci_add_host() 3039 if (!host->adma_table || !host->align_buffer) { sdhci_add_host() 3040 if (host->adma_table) sdhci_add_host() 3042 host->adma_table_sz, sdhci_add_host() 3043 host->adma_table, sdhci_add_host() 3044 host->adma_addr); sdhci_add_host() 3045 kfree(host->align_buffer); sdhci_add_host() 3048 host->flags &= ~SDHCI_USE_ADMA; sdhci_add_host() 3049 host->adma_table = NULL; sdhci_add_host() 3050 host->align_buffer = NULL; sdhci_add_host() 3051 } else if (host->adma_addr & host->align_mask) { sdhci_add_host() 3054 host->flags &= ~SDHCI_USE_ADMA; sdhci_add_host() 3055 dma_free_coherent(mmc_dev(mmc), host->adma_table_sz, sdhci_add_host() 3056 host->adma_table, host->adma_addr); sdhci_add_host() 3057 kfree(host->align_buffer); sdhci_add_host() 3058 host->adma_table = NULL; sdhci_add_host() 3059 host->align_buffer = NULL; sdhci_add_host() 3068 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) { sdhci_add_host() 3069 host->dma_mask = DMA_BIT_MASK(64); sdhci_add_host() 3070 mmc_dev(mmc)->dma_mask = &host->dma_mask; sdhci_add_host() 3073 if (host->version >= SDHCI_SPEC_300) sdhci_add_host() 3074 host->max_clk = (caps[0] & SDHCI_CLOCK_V3_BASE_MASK) sdhci_add_host() 3077 host->max_clk = (caps[0] & SDHCI_CLOCK_BASE_MASK) sdhci_add_host() 3080 host->max_clk *= 1000000; sdhci_add_host() 3081 if (host->max_clk == 0 || host->quirks & sdhci_add_host() 3083 if (!host->ops->get_max_clock) { sdhci_add_host() 3088 host->max_clk = host->ops->get_max_clock(host); sdhci_add_host() 3095 host->clk_mul = (caps[1] & SDHCI_CLOCK_MUL_MASK) >> sdhci_add_host() 3104 if (host->clk_mul) sdhci_add_host() 3105 host->clk_mul += 1; sdhci_add_host() 3108 * Set host parameters. sdhci_add_host() 3111 mmc->f_max = host->max_clk; sdhci_add_host() 3112 if (host->ops->get_min_clock) sdhci_add_host() 3113 mmc->f_min = host->ops->get_min_clock(host); sdhci_add_host() 3114 else if (host->version >= SDHCI_SPEC_300) { sdhci_add_host() 3115 if (host->clk_mul) { sdhci_add_host() 3116 mmc->f_min = (host->max_clk * host->clk_mul) / 1024; sdhci_add_host() 3117 mmc->f_max = host->max_clk * host->clk_mul; sdhci_add_host() 3119 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; sdhci_add_host() 3121 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; sdhci_add_host() 3123 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { sdhci_add_host() 3124 host->timeout_clk = (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> sdhci_add_host() 3126 if (host->timeout_clk == 0) { sdhci_add_host() 3127 if (host->ops->get_timeout_clock) { sdhci_add_host() 3128 host->timeout_clk = sdhci_add_host() 3129 host->ops->get_timeout_clock(host); sdhci_add_host() 3138 host->timeout_clk *= 1000; sdhci_add_host() 3141 host->timeout_clk = override_timeout_clk; sdhci_add_host() 3143 mmc->max_busy_timeout = host->ops->get_max_timeout_count ? sdhci_add_host() 3144 host->ops->get_max_timeout_count(host) : 1 << 27; sdhci_add_host() 3145 mmc->max_busy_timeout /= host->timeout_clk; sdhci_add_host() 3151 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) sdhci_add_host() 3152 host->flags |= SDHCI_AUTO_CMD12; sdhci_add_host() 3155 if ((host->version >= SDHCI_SPEC_300) && sdhci_add_host() 3156 ((host->flags & SDHCI_USE_ADMA) || sdhci_add_host() 3157 !(host->flags & SDHCI_USE_SDMA)) && sdhci_add_host() 3158 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) { sdhci_add_host() 3159 host->flags |= SDHCI_AUTO_CMD23; sdhci_add_host() 3172 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) sdhci_add_host() 3175 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23) sdhci_add_host() 3181 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && sdhci_add_host() 3204 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) sdhci_add_host() 3219 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200)) sdhci_add_host() 3224 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 && sdhci_add_host() 3235 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50)) sdhci_add_host() 3238 /* Does the host need tuning for SDR50? */ sdhci_add_host() 3240 host->flags |= SDHCI_SDR50_NEEDS_TUNING; sdhci_add_host() 3242 /* Does the host need tuning for SDR104 / HS200? */ sdhci_add_host() 3244 host->flags |= SDHCI_SDR104_NEEDS_TUNING; sdhci_add_host() 3246 /* Driver Type(s) (A, C, D) supported by the host */ sdhci_add_host() 3255 host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >> sdhci_add_host() 3262 if (host->tuning_count) sdhci_add_host() 3263 host->tuning_count = 1 << (host->tuning_count - 1); sdhci_add_host() 3266 host->tuning_mode = (caps[1] & SDHCI_RETUNING_MODE_MASK) >> sdhci_add_host() 3278 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT); sdhci_add_host() 3320 /* If OCR set by host, use it instead. */ sdhci_add_host() 3321 if (host->ocr_mask) sdhci_add_host() 3322 ocr_avail = host->ocr_mask; sdhci_add_host() 3330 if (host->ocr_avail_sdio) sdhci_add_host() 3331 mmc->ocr_avail_sdio &= host->ocr_avail_sdio; sdhci_add_host() 3333 if (host->ocr_avail_sd) sdhci_add_host() 3334 mmc->ocr_avail_sd &= host->ocr_avail_sd; sdhci_add_host() 3338 if (host->ocr_avail_mmc) sdhci_add_host() 3339 mmc->ocr_avail_mmc &= host->ocr_avail_mmc; sdhci_add_host() 3347 spin_lock_init(&host->lock); sdhci_add_host() 3353 if (host->flags & SDHCI_USE_ADMA) sdhci_add_host() 3355 else if (host->flags & SDHCI_USE_SDMA) sdhci_add_host() 3372 if (host->flags & SDHCI_USE_ADMA) { sdhci_add_host() 3373 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) sdhci_add_host() 3385 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) { sdhci_add_host() 3402 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535; sdhci_add_host() 3407 tasklet_init(&host->finish_tasklet, sdhci_add_host() 3408 sdhci_tasklet_finish, (unsigned long)host); sdhci_add_host() 3410 setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host); sdhci_add_host() 3412 init_waitqueue_head(&host->buf_ready_int); sdhci_add_host() 3414 if (host->version >= SDHCI_SPEC_300) { sdhci_add_host() 3416 init_timer(&host->tuning_timer); sdhci_add_host() 3417 host->tuning_timer.data = (unsigned long)host; sdhci_add_host() 3418 host->tuning_timer.function = sdhci_tuning_timer; sdhci_add_host() 3421 sdhci_init(host, 0); sdhci_add_host() 3423 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq, sdhci_add_host() 3424 IRQF_SHARED, mmc_hostname(mmc), host); sdhci_add_host() 3427 mmc_hostname(mmc), host->irq, ret); sdhci_add_host() 3432 sdhci_dumpregs(host); sdhci_add_host() 3436 snprintf(host->led_name, sizeof(host->led_name), sdhci_add_host() 3438 host->led.name = host->led_name; sdhci_add_host() 3439 host->led.brightness = LED_OFF; sdhci_add_host() 3440 host->led.default_trigger = mmc_hostname(mmc); sdhci_add_host() 3441 host->led.brightness_set = sdhci_led_control; sdhci_add_host() 3443 ret = led_classdev_register(mmc_dev(mmc), &host->led); sdhci_add_host() 3456 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)), sdhci_add_host() 3457 (host->flags & SDHCI_USE_ADMA) ? sdhci_add_host() 3458 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" : sdhci_add_host() 3459 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"); sdhci_add_host() 3461 sdhci_enable_card_detection(host); sdhci_add_host() 3467 sdhci_do_reset(host, SDHCI_RESET_ALL); sdhci_add_host() 3468 sdhci_writel(host, 0, SDHCI_INT_ENABLE); sdhci_add_host() 3469 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); sdhci_add_host() 3470 free_irq(host->irq, host); sdhci_add_host() 3473 tasklet_kill(&host->finish_tasklet); sdhci_add_host() 3480 void sdhci_remove_host(struct sdhci_host *host, int dead) sdhci_remove_host() argument 3482 struct mmc_host *mmc = host->mmc; sdhci_remove_host() 3486 spin_lock_irqsave(&host->lock, flags); sdhci_remove_host() 3488 host->flags |= SDHCI_DEVICE_DEAD; sdhci_remove_host() 3490 if (host->mrq) { sdhci_remove_host() 3494 host->mrq->cmd->error = -ENOMEDIUM; sdhci_remove_host() 3495 tasklet_schedule(&host->finish_tasklet); sdhci_remove_host() 3498 spin_unlock_irqrestore(&host->lock, flags); sdhci_remove_host() 3501 sdhci_disable_card_detection(host); sdhci_remove_host() 3506 led_classdev_unregister(&host->led); sdhci_remove_host() 3510 sdhci_do_reset(host, SDHCI_RESET_ALL); sdhci_remove_host() 3512 sdhci_writel(host, 0, SDHCI_INT_ENABLE); sdhci_remove_host() 3513 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); sdhci_remove_host() 3514 free_irq(host->irq, host); sdhci_remove_host() 3516 del_timer_sync(&host->timer); sdhci_remove_host() 3518 tasklet_kill(&host->finish_tasklet); sdhci_remove_host() 3523 if (host->adma_table) sdhci_remove_host() 3524 dma_free_coherent(mmc_dev(mmc), host->adma_table_sz, sdhci_remove_host() 3525 host->adma_table, host->adma_addr); sdhci_remove_host() 3526 kfree(host->align_buffer); sdhci_remove_host() 3528 host->adma_table = NULL; sdhci_remove_host() 3529 host->align_buffer = NULL; sdhci_remove_host() 3534 void sdhci_free_host(struct sdhci_host *host) sdhci_free_host() argument 3536 mmc_free_host(host->mmc); sdhci_free_host()
|
H A D | pxamci.c | 2 * linux/drivers/mmc/host/pxa.c - PXA MMCI driver 28 #include <linux/mmc/host.h> 83 static inline void pxamci_init_ocr(struct pxamci_host *host) pxamci_init_ocr() argument 86 host->vcc = regulator_get_optional(mmc_dev(host->mmc), "vmmc"); pxamci_init_ocr() 88 if (IS_ERR(host->vcc)) pxamci_init_ocr() 89 host->vcc = NULL; pxamci_init_ocr() 91 host->mmc->ocr_avail = mmc_regulator_get_ocrmask(host->vcc); pxamci_init_ocr() 92 if (host->pdata && host->pdata->ocr_mask) pxamci_init_ocr() 93 dev_warn(mmc_dev(host->mmc), pxamci_init_ocr() 97 if (host->vcc == NULL) { pxamci_init_ocr() 99 host->mmc->ocr_avail = host->pdata ? pxamci_init_ocr() 100 host->pdata->ocr_mask : pxamci_init_ocr() 105 static inline int pxamci_set_power(struct pxamci_host *host, pxamci_set_power() argument 111 if (host->vcc) { pxamci_set_power() 115 ret = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd); pxamci_set_power() 119 ret = mmc_regulator_set_ocr(host->mmc, host->vcc, 0); pxamci_set_power() 124 if (!host->vcc && host->pdata && pxamci_set_power() 125 gpio_is_valid(host->pdata->gpio_power)) { pxamci_set_power() 126 on = ((1 << vdd) & host->pdata->ocr_mask); pxamci_set_power() 127 gpio_set_value(host->pdata->gpio_power, pxamci_set_power() 128 !!on ^ host->pdata->gpio_power_invert); pxamci_set_power() 130 if (!host->vcc && host->pdata && host->pdata->setpower) pxamci_set_power() 131 return host->pdata->setpower(mmc_dev(host->mmc), vdd); pxamci_set_power() 136 static void pxamci_stop_clock(struct pxamci_host *host) pxamci_stop_clock() argument 138 if (readl(host->base + MMC_STAT) & STAT_CLK_EN) { pxamci_stop_clock() 142 writel(STOP_CLOCK, host->base + MMC_STRPCL); pxamci_stop_clock() 145 v = readl(host->base + MMC_STAT); pxamci_stop_clock() 152 dev_err(mmc_dev(host->mmc), "unable to stop clock\n"); pxamci_stop_clock() 156 static void pxamci_enable_irq(struct pxamci_host *host, unsigned int mask) pxamci_enable_irq() argument 160 spin_lock_irqsave(&host->lock, flags); pxamci_enable_irq() 161 host->imask &= ~mask; pxamci_enable_irq() 162 writel(host->imask, host->base + MMC_I_MASK); pxamci_enable_irq() 163 spin_unlock_irqrestore(&host->lock, flags); pxamci_enable_irq() 166 static void pxamci_disable_irq(struct pxamci_host *host, unsigned int mask) pxamci_disable_irq() argument 170 spin_lock_irqsave(&host->lock, flags); pxamci_disable_irq() 171 host->imask |= mask; pxamci_disable_irq() 172 writel(host->imask, host->base + MMC_I_MASK); pxamci_disable_irq() 173 spin_unlock_irqrestore(&host->lock, flags); pxamci_disable_irq() 176 static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data) pxamci_setup_data() argument 185 host->data = data; pxamci_setup_data() 190 writel(nob, host->base + MMC_NOB); pxamci_setup_data() 191 writel(data->blksz, host->base + MMC_BLKLEN); pxamci_setup_data() 193 clks = (unsigned long long)data->timeout_ns * host->clkrate; pxamci_setup_data() 195 timeout = (unsigned int)clks + (data->timeout_clks << host->clkrt); pxamci_setup_data() 196 writel((timeout + 255) / 256, host->base + MMC_RDTO); pxamci_setup_data() 199 host->dma_dir = DMA_FROM_DEVICE; pxamci_setup_data() 201 DRCMR(host->dma_drcmrtx) = 0; pxamci_setup_data() 202 DRCMR(host->dma_drcmrrx) = host->dma | DRCMR_MAPVLD; pxamci_setup_data() 204 host->dma_dir = DMA_TO_DEVICE; pxamci_setup_data() 206 DRCMR(host->dma_drcmrrx) = 0; pxamci_setup_data() 207 DRCMR(host->dma_drcmrtx) = host->dma | DRCMR_MAPVLD; pxamci_setup_data() 212 host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, pxamci_setup_data() 213 host->dma_dir); pxamci_setup_data() 215 for (i = 0; i < host->dma_len; i++) { pxamci_setup_data() 217 host->sg_cpu[i].dcmd = dcmd | length; pxamci_setup_data() 219 host->sg_cpu[i].dcmd |= DCMD_ENDIRQEN; pxamci_setup_data() 224 host->sg_cpu[i].dsadr = host->res->start + MMC_RXFIFO; pxamci_setup_data() 225 host->sg_cpu[i].dtadr = sg_dma_address(&data->sg[i]); pxamci_setup_data() 227 host->sg_cpu[i].dsadr = sg_dma_address(&data->sg[i]); pxamci_setup_data() 228 host->sg_cpu[i].dtadr = host->res->start + MMC_TXFIFO; pxamci_setup_data() 230 host->sg_cpu[i].ddadr = host->sg_dma + (i + 1) * pxamci_setup_data() 233 host->sg_cpu[host->dma_len - 1].ddadr = DDADR_STOP; pxamci_setup_data() 242 DALGN |= (1 << host->dma); pxamci_setup_data() 244 DALGN &= ~(1 << host->dma); pxamci_setup_data() 245 DDADR(host->dma) = host->sg_dma; pxamci_setup_data() 254 DCSR(host->dma) = DCSR_RUN; pxamci_setup_data() 257 static void pxamci_start_cmd(struct pxamci_host *host, struct mmc_command *cmd, unsigned int cmdat) pxamci_start_cmd() argument 259 WARN_ON(host->cmd != NULL); pxamci_start_cmd() 260 host->cmd = cmd; pxamci_start_cmd() 280 writel(cmd->opcode, host->base + MMC_CMD); pxamci_start_cmd() 281 writel(cmd->arg >> 16, host->base + MMC_ARGH); pxamci_start_cmd() 282 writel(cmd->arg & 0xffff, host->base + MMC_ARGL); pxamci_start_cmd() 283 writel(cmdat, host->base + MMC_CMDAT); pxamci_start_cmd() 284 writel(host->clkrt, host->base + MMC_CLKRT); pxamci_start_cmd() 286 writel(START_CLOCK, host->base + MMC_STRPCL); pxamci_start_cmd() 288 pxamci_enable_irq(host, END_CMD_RES); pxamci_start_cmd() 291 static void pxamci_finish_request(struct pxamci_host *host, struct mmc_request *mrq) pxamci_finish_request() argument 293 host->mrq = NULL; pxamci_finish_request() 294 host->cmd = NULL; pxamci_finish_request() 295 host->data = NULL; pxamci_finish_request() 296 mmc_request_done(host->mmc, mrq); pxamci_finish_request() 299 static int pxamci_cmd_done(struct pxamci_host *host, unsigned int stat) pxamci_cmd_done() argument 301 struct mmc_command *cmd = host->cmd; pxamci_cmd_done() 308 host->cmd = NULL; pxamci_cmd_done() 314 v = readl(host->base + MMC_RES) & 0xffff; pxamci_cmd_done() 316 u32 w1 = readl(host->base + MMC_RES) & 0xffff; pxamci_cmd_done() 317 u32 w2 = readl(host->base + MMC_RES) & 0xffff; pxamci_cmd_done() 338 pxamci_disable_irq(host, END_CMD_RES); pxamci_cmd_done() 339 if (host->data && !cmd->error) { pxamci_cmd_done() 340 pxamci_enable_irq(host, DATA_TRAN_DONE); pxamci_cmd_done() 345 if (cpu_is_pxa27x() && host->data->flags & MMC_DATA_WRITE) pxamci_cmd_done() 346 DCSR(host->dma) = DCSR_RUN; pxamci_cmd_done() 348 pxamci_finish_request(host, host->mrq); pxamci_cmd_done() 354 static int pxamci_data_done(struct pxamci_host *host, unsigned int stat) pxamci_data_done() argument 356 struct mmc_data *data = host->data; pxamci_data_done() 361 DCSR(host->dma) = 0; pxamci_data_done() 362 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, pxamci_data_done() 363 host->dma_dir); pxamci_data_done() 381 pxamci_disable_irq(host, DATA_TRAN_DONE); pxamci_data_done() 383 host->data = NULL; pxamci_data_done() 384 if (host->mrq->stop) { pxamci_data_done() 385 pxamci_stop_clock(host); pxamci_data_done() 386 pxamci_start_cmd(host, host->mrq->stop, host->cmdat); pxamci_data_done() 388 pxamci_finish_request(host, host->mrq); pxamci_data_done() 396 struct pxamci_host *host = devid; pxamci_irq() local 400 ireg = readl(host->base + MMC_I_REG) & ~readl(host->base + MMC_I_MASK); pxamci_irq() 403 unsigned stat = readl(host->base + MMC_STAT); pxamci_irq() 408 handled |= pxamci_cmd_done(host, stat); pxamci_irq() 410 handled |= pxamci_data_done(host, stat); pxamci_irq() 412 mmc_signal_sdio_irq(host->mmc); pxamci_irq() 422 struct pxamci_host *host = mmc_priv(mmc); pxamci_request() local 425 WARN_ON(host->mrq != NULL); pxamci_request() 427 host->mrq = mrq; pxamci_request() 429 pxamci_stop_clock(host); pxamci_request() 431 cmdat = host->cmdat; pxamci_request() 432 host->cmdat &= ~CMDAT_INIT; pxamci_request() 435 pxamci_setup_data(host, mrq->data); pxamci_request() 446 pxamci_start_cmd(host, mrq->cmd, cmdat); pxamci_request() 451 struct pxamci_host *host = mmc_priv(mmc); pxamci_get_ro() local 453 if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro)) { pxamci_get_ro() 454 if (host->pdata->gpio_card_ro_invert) pxamci_get_ro() 455 return !gpio_get_value(host->pdata->gpio_card_ro); pxamci_get_ro() 457 return gpio_get_value(host->pdata->gpio_card_ro); pxamci_get_ro() 459 if (host->pdata && host->pdata->get_ro) pxamci_get_ro() 460 return !!host->pdata->get_ro(mmc_dev(mmc)); pxamci_get_ro() 470 struct pxamci_host *host = mmc_priv(mmc); pxamci_set_ios() local 473 unsigned long rate = host->clkrate; pxamci_set_ios() 476 if (host->clkrt == CLKRT_OFF) pxamci_set_ios() 477 clk_prepare_enable(host->clk); pxamci_set_ios() 481 host->clkrt = 7; pxamci_set_ios() 494 host->clkrt = fls(clk) - 1; pxamci_set_ios() 501 pxamci_stop_clock(host); pxamci_set_ios() 502 if (host->clkrt != CLKRT_OFF) { pxamci_set_ios() 503 host->clkrt = CLKRT_OFF; pxamci_set_ios() 504 clk_disable_unprepare(host->clk); pxamci_set_ios() 508 if (host->power_mode != ios->power_mode) { pxamci_set_ios() 511 host->power_mode = ios->power_mode; pxamci_set_ios() 513 ret = pxamci_set_power(host, ios->power_mode, ios->vdd); pxamci_set_ios() 526 host->cmdat |= CMDAT_INIT; pxamci_set_ios() 530 host->cmdat |= CMDAT_SD_4DAT; pxamci_set_ios() 532 host->cmdat &= ~CMDAT_SD_4DAT; pxamci_set_ios() 535 host->clkrt, host->cmdat); pxamci_set_ios() 538 static void pxamci_enable_sdio_irq(struct mmc_host *host, int enable) pxamci_enable_sdio_irq() argument 540 struct pxamci_host *pxa_host = mmc_priv(host); pxamci_enable_sdio_irq() 557 struct pxamci_host *host = devid; pxamci_dma_irq() local 562 writel(BUF_PART_FULL, host->base + MMC_PRTBUF); pxamci_dma_irq() 565 mmc_hostname(host->mmc), dma, dcsr); pxamci_dma_irq() 566 host->data->error = -EIO; pxamci_dma_irq() 567 pxamci_data_done(host, 0); pxamci_dma_irq() 573 struct pxamci_host *host = mmc_priv(devid); pxamci_detect_irq() local 575 mmc_detect_change(devid, msecs_to_jiffies(host->pdata->detect_delay_ms)); pxamci_detect_irq() 626 struct pxamci_host *host = NULL; pxamci_probe() local 672 host = mmc_priv(mmc); pxamci_probe() 673 host->mmc = mmc; pxamci_probe() 674 host->dma = -1; pxamci_probe() 675 host->pdata = pdev->dev.platform_data; pxamci_probe() 676 host->clkrt = CLKRT_OFF; pxamci_probe() 678 host->clk = clk_get(&pdev->dev, NULL); pxamci_probe() 679 if (IS_ERR(host->clk)) { pxamci_probe() 680 ret = PTR_ERR(host->clk); pxamci_probe() 681 host->clk = NULL; pxamci_probe() 685 host->clkrate = clk_get_rate(host->clk); pxamci_probe() 690 mmc->f_min = (host->clkrate + 63) / 64; pxamci_probe() 691 mmc->f_max = (mmc_has_26MHz()) ? 26000000 : host->clkrate; pxamci_probe() 693 pxamci_init_ocr(host); pxamci_probe() 696 host->cmdat = 0; pxamci_probe() 699 host->cmdat |= CMDAT_SDIO_INT_EN; pxamci_probe() 705 host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &host->sg_dma, GFP_KERNEL); pxamci_probe() 706 if (!host->sg_cpu) { pxamci_probe() 711 spin_lock_init(&host->lock); pxamci_probe() 712 host->res = r; pxamci_probe() 713 host->irq = irq; pxamci_probe() 714 host->imask = MMC_I_MASK_ALL; pxamci_probe() 716 host->base = ioremap(r->start, SZ_4K); pxamci_probe() 717 if (!host->base) { pxamci_probe() 723 * Ensure that the host controller is shut down, and setup pxamci_probe() 726 pxamci_stop_clock(host); pxamci_probe() 727 writel(0, host->base + MMC_SPI); pxamci_probe() 728 writel(64, host->base + MMC_RESTO); pxamci_probe() 729 writel(host->imask, host->base + MMC_I_MASK); pxamci_probe() 731 host->dma = pxa_request_dma(DRIVER_NAME, DMA_PRIO_LOW, pxamci_probe() 732 pxamci_dma_irq, host); pxamci_probe() 733 if (host->dma < 0) { pxamci_probe() 738 ret = request_irq(host->irq, pxamci_irq, 0, DRIVER_NAME, host); pxamci_probe() 749 host->dma_drcmrrx = dmarx->start; pxamci_probe() 756 host->dma_drcmrtx = dmatx->start; pxamci_probe() 758 if (host->pdata) { pxamci_probe() 759 gpio_cd = host->pdata->gpio_card_detect; pxamci_probe() 760 gpio_ro = host->pdata->gpio_card_ro; pxamci_probe() 761 gpio_power = host->pdata->gpio_power; pxamci_probe() 770 host->pdata->gpio_power_invert); pxamci_probe() 797 if (host->pdata && host->pdata->init) pxamci_probe() 798 host->pdata->init(&pdev->dev, pxamci_detect_irq, mmc); pxamci_probe() 800 if (gpio_is_valid(gpio_power) && host->pdata->setpower) pxamci_probe() 802 if (gpio_is_valid(gpio_ro) && host->pdata->get_ro) pxamci_probe() 816 if (host) { pxamci_probe() 817 if (host->dma >= 0) pxamci_probe() 818 pxa_free_dma(host->dma); pxamci_probe() 819 if (host->base) pxamci_probe() 820 iounmap(host->base); pxamci_probe() 821 if (host->sg_cpu) pxamci_probe() 822 dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); pxamci_probe() 823 if (host->clk) pxamci_probe() 824 clk_put(host->clk); pxamci_probe() 838 struct pxamci_host *host = mmc_priv(mmc); pxamci_remove() local 842 if (host->pdata) { pxamci_remove() 843 gpio_cd = host->pdata->gpio_card_detect; pxamci_remove() 844 gpio_ro = host->pdata->gpio_card_ro; pxamci_remove() 845 gpio_power = host->pdata->gpio_power; pxamci_remove() 855 if (host->vcc) pxamci_remove() 856 regulator_put(host->vcc); pxamci_remove() 858 if (host->pdata && host->pdata->exit) pxamci_remove() 859 host->pdata->exit(&pdev->dev, mmc); pxamci_remove() 861 pxamci_stop_clock(host); pxamci_remove() 864 host->base + MMC_I_MASK); pxamci_remove() 866 DRCMR(host->dma_drcmrrx) = 0; pxamci_remove() 867 DRCMR(host->dma_drcmrtx) = 0; pxamci_remove() 869 free_irq(host->irq, host); pxamci_remove() 870 pxa_free_dma(host->dma); pxamci_remove() 871 iounmap(host->base); pxamci_remove() 872 dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); pxamci_remove() 874 clk_put(host->clk); pxamci_remove() 876 release_resource(host->res); pxamci_remove()
|
H A D | moxart-mmc.c | 2 * MOXA ART MMC host driver. 25 #include <linux/mmc/host.h> 151 static inline void moxart_init_sg(struct moxart_host *host, moxart_init_sg() argument 154 host->cur_sg = data->sg; moxart_init_sg() 155 host->num_sg = data->sg_len; moxart_init_sg() 156 host->data_remain = host->cur_sg->length; moxart_init_sg() 158 if (host->data_remain > host->data_len) moxart_init_sg() 159 host->data_remain = host->data_len; moxart_init_sg() 162 static inline int moxart_next_sg(struct moxart_host *host) moxart_next_sg() argument 165 struct mmc_data *data = host->mrq->cmd->data; moxart_next_sg() 167 host->cur_sg++; moxart_next_sg() 168 host->num_sg--; moxart_next_sg() 170 if (host->num_sg > 0) { moxart_next_sg() 171 host->data_remain = host->cur_sg->length; moxart_next_sg() 172 remain = host->data_len - data->bytes_xfered; moxart_next_sg() 173 if (remain > 0 && remain < host->data_remain) moxart_next_sg() 174 host->data_remain = remain; moxart_next_sg() 177 return host->num_sg; moxart_next_sg() 180 static int moxart_wait_for_status(struct moxart_host *host, moxart_wait_for_status() argument 187 *status = readl(host->base + REG_STATUS); moxart_wait_for_status() 192 writel(*status & mask, host->base + REG_CLEAR); moxart_wait_for_status() 198 dev_err(mmc_dev(host->mmc), "timed out waiting for status\n"); moxart_wait_for_status() 204 static void moxart_send_command(struct moxart_host *host, moxart_send_command() argument 210 RSP_CRC_FAIL | CMD_SENT, host->base + REG_CLEAR); moxart_send_command() 211 writel(cmd->arg, host->base + REG_ARGUMENT); moxart_send_command() 225 writel(cmdctrl | CMD_EN, host->base + REG_COMMAND); moxart_send_command() 227 if (moxart_wait_for_status(host, MASK_RSP, &status) == -ETIMEDOUT) moxart_send_command() 240 cmd->resp[3] = readl(host->base + REG_RESPONSE0); moxart_send_command() 241 cmd->resp[2] = readl(host->base + REG_RESPONSE1); moxart_send_command() 242 cmd->resp[1] = readl(host->base + REG_RESPONSE2); moxart_send_command() 243 cmd->resp[0] = readl(host->base + REG_RESPONSE3); moxart_send_command() 245 cmd->resp[0] = readl(host->base + REG_RESPONSE0); moxart_send_command() 252 struct moxart_host *host = param; moxart_dma_complete() local 254 complete(&host->dma_complete); moxart_dma_complete() 257 static void moxart_transfer_dma(struct mmc_data *data, struct moxart_host *host) moxart_transfer_dma() argument 264 if (host->data_len == data->bytes_xfered) moxart_transfer_dma() 268 dma_chan = host->dma_chan_tx; moxart_transfer_dma() 272 dma_chan = host->dma_chan_rx; moxart_transfer_dma() 286 dev_err(mmc_dev(host->mmc), "dma_map_sg returned zero length\n"); moxart_transfer_dma() 290 host->tx_desc = desc; moxart_transfer_dma() 292 desc->callback_param = host; moxart_transfer_dma() 297 data->bytes_xfered += host->data_remain; moxart_transfer_dma() 300 &host->dma_complete, host->timeout); moxart_transfer_dma() 308 static void moxart_transfer_pio(struct moxart_host *host) moxart_transfer_pio() argument 310 struct mmc_data *data = host->mrq->cmd->data; moxart_transfer_pio() 313 if (host->data_len == data->bytes_xfered) moxart_transfer_pio() 316 sgp = sg_virt(host->cur_sg); moxart_transfer_pio() 317 remain = host->data_remain; moxart_transfer_pio() 321 if (moxart_wait_for_status(host, FIFO_URUN, &status) moxart_transfer_pio() 324 complete(&host->pio_complete); moxart_transfer_pio() 327 for (len = 0; len < remain && len < host->fifo_width;) { moxart_transfer_pio() 328 iowrite32(*sgp, host->base + REG_DATA_WINDOW); moxart_transfer_pio() 337 if (moxart_wait_for_status(host, FIFO_ORUN, &status) moxart_transfer_pio() 340 complete(&host->pio_complete); moxart_transfer_pio() 343 for (len = 0; len < remain && len < host->fifo_width;) { moxart_transfer_pio() 346 *sgp = ioread32be(host->base + moxart_transfer_pio() 349 *sgp = ioread32(host->base + moxart_transfer_pio() 358 data->bytes_xfered += host->data_remain - remain; moxart_transfer_pio() 359 host->data_remain = remain; moxart_transfer_pio() 361 if (host->data_len != data->bytes_xfered) moxart_transfer_pio() 362 moxart_next_sg(host); moxart_transfer_pio() 364 complete(&host->pio_complete); moxart_transfer_pio() 367 static void moxart_prepare_data(struct moxart_host *host) moxart_prepare_data() argument 369 struct mmc_data *data = host->mrq->cmd->data; moxart_prepare_data() 376 host->data_len = data->blocks * data->blksz; moxart_prepare_data() 380 moxart_init_sg(host, data); moxart_prepare_data() 387 if ((host->data_len > host->fifo_width) && host->have_dma) moxart_prepare_data() 390 writel(DCR_DATA_FIFO_RESET, host->base + REG_DATA_CONTROL); moxart_prepare_data() 391 writel(MASK_DATA | FIFO_URUN | FIFO_ORUN, host->base + REG_CLEAR); moxart_prepare_data() 392 writel(host->rate, host->base + REG_DATA_TIMER); moxart_prepare_data() 393 writel(host->data_len, host->base + REG_DATA_LENGTH); moxart_prepare_data() 394 writel(datactrl, host->base + REG_DATA_CONTROL); moxart_prepare_data() 399 struct moxart_host *host = mmc_priv(mmc); moxart_request() local 403 spin_lock_irqsave(&host->lock, flags); moxart_request() 405 init_completion(&host->dma_complete); moxart_request() 406 init_completion(&host->pio_complete); moxart_request() 408 host->mrq = mrq; moxart_request() 410 if (readl(host->base + REG_STATUS) & CARD_DETECT) { moxart_request() 415 moxart_prepare_data(host); moxart_request() 416 moxart_send_command(host, host->mrq->cmd); moxart_request() 419 if ((host->data_len > host->fifo_width) && host->have_dma) { moxart_request() 421 writel(CARD_CHANGE, host->base + REG_INTERRUPT_MASK); moxart_request() 423 spin_unlock_irqrestore(&host->lock, flags); moxart_request() 425 moxart_transfer_dma(mrq->cmd->data, host); moxart_request() 427 spin_lock_irqsave(&host->lock, flags); moxart_request() 430 writel(MASK_INTR_PIO, host->base + REG_INTERRUPT_MASK); moxart_request() 432 spin_unlock_irqrestore(&host->lock, flags); moxart_request() 436 &host->pio_complete, host->timeout); moxart_request() 438 spin_lock_irqsave(&host->lock, flags); moxart_request() 441 if (host->is_removed) { moxart_request() 442 dev_err(mmc_dev(host->mmc), "card removed\n"); moxart_request() 447 if (moxart_wait_for_status(host, MASK_DATA, &status) moxart_request() 457 moxart_send_command(host, mrq->cmd->data->stop); moxart_request() 461 spin_unlock_irqrestore(&host->lock, flags); moxart_request() 462 mmc_request_done(host->mmc, mrq); moxart_request() 467 struct moxart_host *host = (struct moxart_host *)devid; moxart_irq() local 471 spin_lock_irqsave(&host->lock, flags); moxart_irq() 473 status = readl(host->base + REG_STATUS); moxart_irq() 475 host->is_removed = status & CARD_DETECT; moxart_irq() 476 if (host->is_removed && host->have_dma) { moxart_irq() 477 dmaengine_terminate_all(host->dma_chan_tx); moxart_irq() 478 dmaengine_terminate_all(host->dma_chan_rx); moxart_irq() 480 host->mrq = NULL; moxart_irq() 481 writel(MASK_INTR_PIO, host->base + REG_CLEAR); moxart_irq() 482 writel(CARD_CHANGE, host->base + REG_INTERRUPT_MASK); moxart_irq() 483 mmc_detect_change(host->mmc, 0); moxart_irq() 485 if (status & (FIFO_ORUN | FIFO_URUN) && host->mrq) moxart_irq() 486 moxart_transfer_pio(host); moxart_irq() 488 spin_unlock_irqrestore(&host->lock, flags); moxart_irq() 495 struct moxart_host *host = mmc_priv(mmc); moxart_set_ios() local 500 spin_lock_irqsave(&host->lock, flags); moxart_set_ios() 504 if (ios->clock >= host->sysclk / (2 * (div + 1))) moxart_set_ios() 508 host->rate = host->sysclk / (2 * (div + 1)); moxart_set_ios() 509 if (host->rate > host->sysclk) moxart_set_ios() 511 writel(ctrl, host->base + REG_CLOCK_CONTROL); moxart_set_ios() 515 writel(readl(host->base + REG_POWER_CONTROL) & ~SD_POWER_ON, moxart_set_ios() 516 host->base + REG_POWER_CONTROL); moxart_set_ios() 524 host->base + REG_POWER_CONTROL); moxart_set_ios() 529 writel(BUS_WIDTH_4, host->base + REG_BUS_WIDTH); moxart_set_ios() 532 writel(BUS_WIDTH_8, host->base + REG_BUS_WIDTH); moxart_set_ios() 535 writel(BUS_WIDTH_1, host->base + REG_BUS_WIDTH); moxart_set_ios() 539 spin_unlock_irqrestore(&host->lock, flags); moxart_set_ios() 545 struct moxart_host *host = mmc_priv(mmc); moxart_get_ro() local 547 return !!(readl(host->base + REG_STATUS) & WRITE_PROT); moxart_get_ro() 562 struct moxart_host *host = NULL; moxart_probe() local 605 host = mmc_priv(mmc); moxart_probe() 606 host->mmc = mmc; moxart_probe() 607 host->base = reg_mmc; moxart_probe() 608 host->reg_phys = res_mmc.start; moxart_probe() 609 host->timeout = msecs_to_jiffies(1000); moxart_probe() 610 host->sysclk = clk_get_rate(clk); moxart_probe() 611 host->fifo_width = readl(host->base + REG_FEATURE) << 2; moxart_probe() 612 host->dma_chan_tx = dma_request_slave_channel_reason(dev, "tx"); moxart_probe() 613 host->dma_chan_rx = dma_request_slave_channel_reason(dev, "rx"); moxart_probe() 615 spin_lock_init(&host->lock); moxart_probe() 618 mmc->f_max = DIV_ROUND_CLOSEST(host->sysclk, 2); moxart_probe() 619 mmc->f_min = DIV_ROUND_CLOSEST(host->sysclk, CLK_DIV_MASK * 2); moxart_probe() 622 if (IS_ERR(host->dma_chan_tx) || IS_ERR(host->dma_chan_rx)) { moxart_probe() 623 if (PTR_ERR(host->dma_chan_tx) == -EPROBE_DEFER || moxart_probe() 624 PTR_ERR(host->dma_chan_rx) == -EPROBE_DEFER) { moxart_probe() 629 host->have_dma = false; moxart_probe() 632 host->dma_chan_tx, host->dma_chan_rx); moxart_probe() 633 host->have_dma = true; moxart_probe() 640 cfg.dst_addr = host->reg_phys + REG_DATA_WINDOW; moxart_probe() 641 dmaengine_slave_config(host->dma_chan_tx, &cfg); moxart_probe() 644 cfg.src_addr = host->reg_phys + REG_DATA_WINDOW; moxart_probe() 646 dmaengine_slave_config(host->dma_chan_rx, &cfg); moxart_probe() 649 switch ((readl(host->base + REG_BUS_WIDTH) >> 3) & 3) { moxart_probe() 660 writel(0, host->base + REG_INTERRUPT_MASK); moxart_probe() 662 writel(CMD_SDC_RESET, host->base + REG_COMMAND); moxart_probe() 664 if (!(readl(host->base + REG_COMMAND) & CMD_SDC_RESET)) moxart_probe() 669 ret = devm_request_irq(dev, irq, moxart_irq, 0, "moxart-mmc", host); moxart_probe() 676 dev_dbg(dev, "IRQ=%d, FIFO is %d bytes\n", irq, host->fifo_width); moxart_probe() 689 struct moxart_host *host = mmc_priv(mmc); moxart_remove() local 694 if (!IS_ERR(host->dma_chan_tx)) moxart_remove() 695 dma_release_channel(host->dma_chan_tx); moxart_remove() 696 if (!IS_ERR(host->dma_chan_rx)) moxart_remove() 697 dma_release_channel(host->dma_chan_rx); moxart_remove() 701 writel(0, host->base + REG_INTERRUPT_MASK); moxart_remove() 702 writel(0, host->base + REG_POWER_CONTROL); moxart_remove() 703 writel(readl(host->base + REG_CLOCK_CONTROL) | CLK_OFF, moxart_remove() 704 host->base + REG_CLOCK_CONTROL); moxart_remove()
|
H A D | usdhi6rol0.c | 19 #include <linux/mmc/host.h> 205 static void usdhi6_write(struct usdhi6_host *host, u32 reg, u32 data) usdhi6_write() argument 207 iowrite32(data, host->base + reg); usdhi6_write() 208 dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__, usdhi6_write() 209 host->base, reg, data); usdhi6_write() 212 static void usdhi6_write16(struct usdhi6_host *host, u32 reg, u16 data) usdhi6_write16() argument 214 iowrite16(data, host->base + reg); usdhi6_write16() 215 dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__, usdhi6_write16() 216 host->base, reg, data); usdhi6_write16() 219 static u32 usdhi6_read(struct usdhi6_host *host, u32 reg) usdhi6_read() argument 221 u32 data = ioread32(host->base + reg); usdhi6_read() 222 dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__, usdhi6_read() 223 host->base, reg, data); usdhi6_read() 227 static u16 usdhi6_read16(struct usdhi6_host *host, u32 reg) usdhi6_read16() argument 229 u16 data = ioread16(host->base + reg); usdhi6_read16() 230 dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__, usdhi6_read16() 231 host->base, reg, data); usdhi6_read16() 235 static void usdhi6_irq_enable(struct usdhi6_host *host, u32 info1, u32 info2) usdhi6_irq_enable() argument 237 host->status_mask = USDHI6_SD_INFO1_IRQ & ~info1; usdhi6_irq_enable() 238 host->status2_mask = USDHI6_SD_INFO2_IRQ & ~info2; usdhi6_irq_enable() 239 usdhi6_write(host, USDHI6_SD_INFO1_MASK, host->status_mask); usdhi6_irq_enable() 240 usdhi6_write(host, USDHI6_SD_INFO2_MASK, host->status2_mask); usdhi6_irq_enable() 243 static void usdhi6_wait_for_resp(struct usdhi6_host *host) usdhi6_wait_for_resp() argument 245 usdhi6_irq_enable(host, USDHI6_SD_INFO1_RSP_END | usdhi6_wait_for_resp() 250 static void usdhi6_wait_for_brwe(struct usdhi6_host *host, bool read) usdhi6_wait_for_brwe() argument 252 usdhi6_irq_enable(host, USDHI6_SD_INFO1_ACCESS_END | usdhi6_wait_for_brwe() 257 static void usdhi6_only_cd(struct usdhi6_host *host) usdhi6_only_cd() argument 260 usdhi6_irq_enable(host, USDHI6_SD_INFO1_CARD_CD, 0); usdhi6_only_cd() 263 static void usdhi6_mask_all(struct usdhi6_host *host) usdhi6_mask_all() argument 265 usdhi6_irq_enable(host, 0, 0); usdhi6_mask_all() 268 static int usdhi6_error_code(struct usdhi6_host *host) usdhi6_error_code() argument 272 usdhi6_write(host, USDHI6_SD_STOP, USDHI6_SD_STOP_STP); usdhi6_error_code() 274 if (host->io_error & usdhi6_error_code() 276 u32 rsp54 = usdhi6_read(host, USDHI6_SD_RSP54); usdhi6_error_code() 277 int opc = host->mrq ? host->mrq->cmd->opcode : -1; usdhi6_error_code() 279 err = usdhi6_read(host, USDHI6_SD_ERR_STS2); usdhi6_error_code() 281 if (host->wait == USDHI6_WAIT_FOR_CMD) usdhi6_error_code() 282 dev_dbg(mmc_dev(host->mmc), usdhi6_error_code() 284 err, rsp54, host->wait, opc); usdhi6_error_code() 286 dev_warn(mmc_dev(host->mmc), usdhi6_error_code() 288 err, rsp54, host->wait, opc); usdhi6_error_code() 292 err = usdhi6_read(host, USDHI6_SD_ERR_STS1); usdhi6_error_code() 294 dev_warn(mmc_dev(host->mmc), "Err sts 0x%x, state %u, CMD%d\n", usdhi6_error_code() 295 err, host->wait, host->mrq ? host->mrq->cmd->opcode : -1); usdhi6_error_code() 296 if (host->io_error & USDHI6_SD_INFO2_ILA) usdhi6_error_code() 310 static void usdhi6_blk_bounce(struct usdhi6_host *host, usdhi6_blk_bounce() argument 313 struct mmc_data *data = host->mrq->data; usdhi6_blk_bounce() 314 size_t blk_head = host->head_len; usdhi6_blk_bounce() 316 dev_dbg(mmc_dev(host->mmc), "%s(): CMD%u of %u SG: %ux%u @ 0x%x\n", usdhi6_blk_bounce() 317 __func__, host->mrq->cmd->opcode, data->sg_len, usdhi6_blk_bounce() 320 host->head_pg.page = host->pg.page; usdhi6_blk_bounce() 321 host->head_pg.mapped = host->pg.mapped; usdhi6_blk_bounce() 322 host->pg.page = nth_page(host->pg.page, 1); usdhi6_blk_bounce() 323 host->pg.mapped = kmap(host->pg.page); usdhi6_blk_bounce() 325 host->blk_page = host->bounce_buf; usdhi6_blk_bounce() 326 host->offset = 0; usdhi6_blk_bounce() 331 memcpy(host->bounce_buf, host->head_pg.mapped + PAGE_SIZE - blk_head, usdhi6_blk_bounce() 333 memcpy(host->bounce_buf + blk_head, host->pg.mapped, usdhi6_blk_bounce() 338 static void usdhi6_sg_prep(struct usdhi6_host *host) usdhi6_sg_prep() argument 340 struct mmc_request *mrq = host->mrq; usdhi6_sg_prep() 343 usdhi6_write(host, USDHI6_SD_SECCNT, data->blocks); usdhi6_sg_prep() 345 host->sg = data->sg; usdhi6_sg_prep() 347 host->offset = host->sg->offset; usdhi6_sg_prep() 351 static void *usdhi6_sg_map(struct usdhi6_host *host) usdhi6_sg_map() argument 353 struct mmc_data *data = host->mrq->data; usdhi6_sg_map() 354 struct scatterlist *sg = data->sg_len > 1 ? host->sg : data->sg; usdhi6_sg_map() 358 WARN(host->pg.page, "%p not properly unmapped!\n", host->pg.page); usdhi6_sg_map() 364 host->pg.page = sg_page(sg); usdhi6_sg_map() 365 host->pg.mapped = kmap(host->pg.page); usdhi6_sg_map() 366 host->offset = sg->offset; usdhi6_sg_map() 372 host->head_len = blk_head; usdhi6_sg_map() 379 usdhi6_blk_bounce(host, sg); usdhi6_sg_map() 381 host->blk_page = host->pg.mapped; usdhi6_sg_map() 383 dev_dbg(mmc_dev(host->mmc), "Mapped %p (%lx) at %p + %u for CMD%u @ 0x%p\n", usdhi6_sg_map() 384 host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped, usdhi6_sg_map() 385 sg->offset, host->mrq->cmd->opcode, host->mrq); usdhi6_sg_map() 387 return host->blk_page + host->offset; usdhi6_sg_map() 391 static void usdhi6_sg_unmap(struct usdhi6_host *host, bool force) usdhi6_sg_unmap() argument 393 struct mmc_data *data = host->mrq->data; usdhi6_sg_unmap() 394 struct page *page = host->head_pg.page; usdhi6_sg_unmap() 399 host->sg : data->sg; usdhi6_sg_unmap() 400 size_t blk_head = host->head_len; usdhi6_sg_unmap() 403 memcpy(host->head_pg.mapped + PAGE_SIZE - blk_head, usdhi6_sg_unmap() 404 host->bounce_buf, blk_head); usdhi6_sg_unmap() 405 memcpy(host->pg.mapped, host->bounce_buf + blk_head, usdhi6_sg_unmap() 412 host->head_pg.page = NULL; usdhi6_sg_unmap() 415 (host->page_idx << PAGE_SHIFT) + data->blksz - blk_head) usdhi6_sg_unmap() 420 page = host->pg.page; usdhi6_sg_unmap() 427 host->pg.page = NULL; usdhi6_sg_unmap() 431 static void usdhi6_sg_advance(struct usdhi6_host *host) usdhi6_sg_advance() argument 433 struct mmc_data *data = host->mrq->data; usdhi6_sg_advance() 437 if (host->head_pg.page) { usdhi6_sg_advance() 439 host->page_idx++; usdhi6_sg_advance() 440 host->offset = data->blksz - host->head_len; usdhi6_sg_advance() 441 host->blk_page = host->pg.mapped; usdhi6_sg_advance() 442 usdhi6_sg_unmap(host, false); usdhi6_sg_advance() 444 host->offset += data->blksz; usdhi6_sg_advance() 446 if (host->offset == PAGE_SIZE) { usdhi6_sg_advance() 448 host->offset = 0; usdhi6_sg_advance() 449 host->page_idx++; usdhi6_sg_advance() 454 * Now host->blk_page + host->offset point at the end of our last block usdhi6_sg_advance() 455 * and host->page_idx is the index of the page, in which our new block usdhi6_sg_advance() 459 done = (host->page_idx << PAGE_SHIFT) + host->offset; usdhi6_sg_advance() 460 total = host->sg->offset + sg_dma_len(host->sg); usdhi6_sg_advance() 462 dev_dbg(mmc_dev(host->mmc), "%s(): %zu of %zu @ %zu\n", __func__, usdhi6_sg_advance() 463 done, total, host->offset); usdhi6_sg_advance() 465 if (done < total && host->offset) { usdhi6_sg_advance() 467 if (host->offset + data->blksz > PAGE_SIZE) usdhi6_sg_advance() 469 usdhi6_blk_bounce(host, host->sg); usdhi6_sg_advance() 475 usdhi6_sg_unmap(host, false); usdhi6_sg_advance() 483 struct scatterlist *next = sg_next(host->sg); usdhi6_sg_advance() 485 host->page_idx = 0; usdhi6_sg_advance() 488 host->wait = USDHI6_WAIT_FOR_DATA_END; usdhi6_sg_advance() 489 host->sg = next; usdhi6_sg_advance() 502 host->pg.page = nth_page(sg_page(host->sg), host->page_idx); usdhi6_sg_advance() 503 host->pg.mapped = kmap(host->pg.page); usdhi6_sg_advance() 504 host->blk_page = host->pg.mapped; usdhi6_sg_advance() 506 dev_dbg(mmc_dev(host->mmc), "Mapped %p (%lx) at %p for CMD%u @ 0x%p\n", usdhi6_sg_advance() 507 host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped, usdhi6_sg_advance() 508 host->mrq->cmd->opcode, host->mrq); usdhi6_sg_advance() 513 static void usdhi6_dma_release(struct usdhi6_host *host) usdhi6_dma_release() argument 515 host->dma_active = false; usdhi6_dma_release() 516 if (host->chan_tx) { usdhi6_dma_release() 517 struct dma_chan *chan = host->chan_tx; usdhi6_dma_release() 518 host->chan_tx = NULL; usdhi6_dma_release() 521 if (host->chan_rx) { usdhi6_dma_release() 522 struct dma_chan *chan = host->chan_rx; usdhi6_dma_release() 523 host->chan_rx = NULL; usdhi6_dma_release() 528 static void usdhi6_dma_stop_unmap(struct usdhi6_host *host) usdhi6_dma_stop_unmap() argument 530 struct mmc_data *data = host->mrq->data; usdhi6_dma_stop_unmap() 532 if (!host->dma_active) usdhi6_dma_stop_unmap() 535 usdhi6_write(host, USDHI6_CC_EXT_MODE, 0); usdhi6_dma_stop_unmap() 536 host->dma_active = false; usdhi6_dma_stop_unmap() 539 dma_unmap_sg(host->chan_rx->device->dev, data->sg, usdhi6_dma_stop_unmap() 542 dma_unmap_sg(host->chan_tx->device->dev, data->sg, usdhi6_dma_stop_unmap() 548 struct usdhi6_host *host = arg; usdhi6_dma_complete() local 549 struct mmc_request *mrq = host->mrq; usdhi6_dma_complete() 552 dev_name(mmc_dev(host->mmc)), mrq)) usdhi6_dma_complete() 555 dev_dbg(mmc_dev(host->mmc), "%s(): CMD%u DMA completed\n", __func__, usdhi6_dma_complete() 558 usdhi6_dma_stop_unmap(host); usdhi6_dma_complete() 559 usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ); usdhi6_dma_complete() 562 static int usdhi6_dma_setup(struct usdhi6_host *host, struct dma_chan *chan, usdhi6_dma_setup() argument 565 struct mmc_data *data = host->mrq->data; usdhi6_dma_setup() 585 host->dma_active = true; usdhi6_dma_setup() 592 desc->callback_param = host; usdhi6_dma_setup() 596 dev_dbg(mmc_dev(host->mmc), "%s(): mapped %d -> %d, cookie %d @ %p\n", usdhi6_dma_setup() 603 usdhi6_dma_release(host); usdhi6_dma_setup() 604 dev_warn(mmc_dev(host->mmc), usdhi6_dma_setup() 611 static int usdhi6_dma_start(struct usdhi6_host *host) usdhi6_dma_start() argument 613 if (!host->chan_rx || !host->chan_tx) usdhi6_dma_start() 616 if (host->mrq->data->flags & MMC_DATA_READ) usdhi6_dma_start() 617 return usdhi6_dma_setup(host, host->chan_rx, DMA_DEV_TO_MEM); usdhi6_dma_start() 619 return usdhi6_dma_setup(host, host->chan_tx, DMA_MEM_TO_DEV); usdhi6_dma_start() 622 static void usdhi6_dma_kill(struct usdhi6_host *host) usdhi6_dma_kill() argument 624 struct mmc_data *data = host->mrq->data; usdhi6_dma_kill() 626 dev_dbg(mmc_dev(host->mmc), "%s(): SG of %u: %ux%u\n", usdhi6_dma_kill() 630 dmaengine_terminate_all(host->chan_rx); usdhi6_dma_kill() 632 dmaengine_terminate_all(host->chan_tx); usdhi6_dma_kill() 635 static void usdhi6_dma_check_error(struct usdhi6_host *host) usdhi6_dma_check_error() argument 637 struct mmc_data *data = host->mrq->data; usdhi6_dma_check_error() 639 dev_dbg(mmc_dev(host->mmc), "%s(): IO error %d, status 0x%x\n", usdhi6_dma_check_error() 640 __func__, host->io_error, usdhi6_read(host, USDHI6_SD_INFO1)); usdhi6_dma_check_error() 642 if (host->io_error) { usdhi6_dma_check_error() 643 data->error = usdhi6_error_code(host); usdhi6_dma_check_error() 645 usdhi6_dma_kill(host); usdhi6_dma_check_error() 646 usdhi6_dma_release(host); usdhi6_dma_check_error() 647 dev_warn(mmc_dev(host->mmc), usdhi6_dma_check_error() 657 if (host->irq_status & USDHI6_SD_INFO1_RSP_END) usdhi6_dma_check_error() 658 dev_warn(mmc_dev(host->mmc), "Unexpected response received!\n"); usdhi6_dma_check_error() 661 static void usdhi6_dma_kick(struct usdhi6_host *host) usdhi6_dma_kick() argument 663 if (host->mrq->data->flags & MMC_DATA_READ) usdhi6_dma_kick() 664 dma_async_issue_pending(host->chan_rx); usdhi6_dma_kick() 666 dma_async_issue_pending(host->chan_tx); usdhi6_dma_kick() 669 static void usdhi6_dma_request(struct usdhi6_host *host, phys_addr_t start) usdhi6_dma_request() argument 677 host->chan_tx = dma_request_slave_channel(mmc_dev(host->mmc), "tx"); usdhi6_dma_request() 678 dev_dbg(mmc_dev(host->mmc), "%s: TX: got channel %p\n", __func__, usdhi6_dma_request() 679 host->chan_tx); usdhi6_dma_request() 681 if (!host->chan_tx) usdhi6_dma_request() 688 ret = dmaengine_slave_config(host->chan_tx, &cfg); usdhi6_dma_request() 692 host->chan_rx = dma_request_slave_channel(mmc_dev(host->mmc), "rx"); usdhi6_dma_request() 693 dev_dbg(mmc_dev(host->mmc), "%s: RX: got channel %p\n", __func__, usdhi6_dma_request() 694 host->chan_rx); usdhi6_dma_request() 696 if (!host->chan_rx) usdhi6_dma_request() 703 ret = dmaengine_slave_config(host->chan_rx, &cfg); usdhi6_dma_request() 710 dma_release_channel(host->chan_rx); usdhi6_dma_request() 711 host->chan_rx = NULL; usdhi6_dma_request() 713 dma_release_channel(host->chan_tx); usdhi6_dma_request() 714 host->chan_tx = NULL; usdhi6_dma_request() 719 static void usdhi6_clk_set(struct usdhi6_host *host, struct mmc_ios *ios) usdhi6_clk_set() argument 726 if (usdhi6_read(host, USDHI6_SD_INFO2) & USDHI6_SD_INFO2_SCLKDIVEN) usdhi6_clk_set() 732 dev_err(mmc_dev(host->mmc), "SD bus busy, clock set aborted\n"); usdhi6_clk_set() 736 val = usdhi6_read(host, USDHI6_SD_CLK_CTRL) & ~USDHI6_SD_CLK_CTRL_DIV_MASK; usdhi6_clk_set() 741 if (host->imclk <= rate) { usdhi6_clk_set() 744 new_rate = host->imclk; usdhi6_clk_set() 747 new_rate = host->imclk / 2; usdhi6_clk_set() 751 roundup_pow_of_two(DIV_ROUND_UP(host->imclk, rate)); usdhi6_clk_set() 753 new_rate = host->imclk / div; usdhi6_clk_set() 756 if (host->rate == new_rate) usdhi6_clk_set() 759 host->rate = new_rate; usdhi6_clk_set() 761 dev_dbg(mmc_dev(host->mmc), "target %lu, div %u, set %lu\n", usdhi6_clk_set() 769 if (host->imclk == rate || host->imclk == host->rate || !rate) usdhi6_clk_set() 770 usdhi6_write(host, USDHI6_SD_CLK_CTRL, usdhi6_clk_set() 774 host->rate = 0; usdhi6_clk_set() 778 usdhi6_write(host, USDHI6_SD_CLK_CTRL, val); usdhi6_clk_set() 780 if (host->imclk == rate || host->imclk == host->rate || usdhi6_clk_set() 782 usdhi6_write(host, USDHI6_SD_CLK_CTRL, usdhi6_clk_set() 786 static void usdhi6_set_power(struct usdhi6_host *host, struct mmc_ios *ios) usdhi6_set_power() argument 788 struct mmc_host *mmc = host->mmc; usdhi6_set_power() 796 static int usdhi6_reset(struct usdhi6_host *host) usdhi6_reset() argument 800 usdhi6_write(host, USDHI6_SOFT_RST, USDHI6_SOFT_RST_RESERVED); usdhi6_reset() 802 usdhi6_write(host, USDHI6_SOFT_RST, USDHI6_SOFT_RST_RESERVED | USDHI6_SOFT_RST_RESET); usdhi6_reset() 804 if (usdhi6_read(host, USDHI6_SOFT_RST) & USDHI6_SOFT_RST_RESET) usdhi6_reset() 812 struct usdhi6_host *host = mmc_priv(mmc); usdhi6_set_ios() local 821 usdhi6_set_power(host, ios); usdhi6_set_ios() 822 usdhi6_only_cd(host); usdhi6_set_ios() 829 ret = usdhi6_reset(host); usdhi6_set_ios() 833 usdhi6_set_power(host, ios); usdhi6_set_ios() 834 usdhi6_only_cd(host); usdhi6_set_ios() 838 option = usdhi6_read(host, USDHI6_SD_OPTION); usdhi6_set_ios() 854 usdhi6_write(host, USDHI6_SD_OPTION, option); usdhi6_set_ios() 855 usdhi6_write(host, USDHI6_SDIF_MODE, mode); usdhi6_set_ios() 859 if (host->rate != ios->clock) usdhi6_set_ios() 860 usdhi6_clk_set(host, ios); usdhi6_set_ios() 864 static void usdhi6_timeout_set(struct usdhi6_host *host) usdhi6_timeout_set() argument 866 struct mmc_request *mrq = host->mrq; usdhi6_timeout_set() 871 ticks = host->rate / 1000 * mrq->cmd->busy_timeout; usdhi6_timeout_set() 873 ticks = host->rate / 1000000 * (mrq->data->timeout_ns / 1000) + usdhi6_timeout_set() 885 dev_dbg(mmc_dev(host->mmc), "Set %s timeout %lu ticks @ %lu Hz\n", usdhi6_timeout_set() 886 mrq->data ? "data" : "cmd", ticks, host->rate); usdhi6_timeout_set() 889 usdhi6_write(host, USDHI6_SD_OPTION, (val << USDHI6_SD_OPTION_TIMEOUT_SHIFT) | usdhi6_timeout_set() 890 (usdhi6_read(host, USDHI6_SD_OPTION) & ~USDHI6_SD_OPTION_TIMEOUT_MASK)); usdhi6_timeout_set() 893 static void usdhi6_request_done(struct usdhi6_host *host) usdhi6_request_done() argument 895 struct mmc_request *mrq = host->mrq; usdhi6_request_done() 898 if (WARN(host->pg.page || host->head_pg.page, usdhi6_request_done() 900 host->pg.page, host->head_pg.page, host->wait, mrq->cmd->opcode, usdhi6_request_done() 902 data ? host->offset : 0, data ? data->blocks : 0, usdhi6_request_done() 904 usdhi6_sg_unmap(host, true); usdhi6_request_done() 909 dev_dbg(mmc_dev(host->mmc), "%s(CMD%d: %ux%u): err %d %d %d\n", usdhi6_request_done() 917 usdhi6_write(host, USDHI6_CC_EXT_MODE, 0); usdhi6_request_done() 918 host->wait = USDHI6_WAIT_FOR_REQUEST; usdhi6_request_done() 919 host->mrq = NULL; usdhi6_request_done() 921 mmc_request_done(host->mmc, mrq); usdhi6_request_done() 924 static int usdhi6_cmd_flags(struct usdhi6_host *host) usdhi6_cmd_flags() argument 926 struct mmc_request *mrq = host->mrq; usdhi6_cmd_flags() 930 if (host->app_cmd) { usdhi6_cmd_flags() 931 host->app_cmd = false; usdhi6_cmd_flags() 967 dev_warn(mmc_dev(host->mmc), usdhi6_cmd_flags() 977 static int usdhi6_rq_start(struct usdhi6_host *host) usdhi6_rq_start() argument 979 struct mmc_request *mrq = host->mrq; usdhi6_rq_start() 982 int opc = usdhi6_cmd_flags(host); usdhi6_rq_start() 989 if (!(usdhi6_read(host, USDHI6_SD_INFO2) & USDHI6_SD_INFO2_CBSY)) usdhi6_rq_start() 995 dev_dbg(mmc_dev(host->mmc), "Command active, request aborted\n"); usdhi6_rq_start() 1003 host->page_idx = 0; usdhi6_rq_start() 1026 dev_warn(mmc_dev(host->mmc), "%s(): %u blocks of %u bytes\n", usdhi6_rq_start() 1035 usdhi6_sg_prep(host); usdhi6_rq_start() 1037 usdhi6_write(host, USDHI6_SD_SIZE, data->blksz); usdhi6_rq_start() 1043 dev_dbg(mmc_dev(host->mmc), usdhi6_rq_start() 1050 usdhi6_dma_start(host) >= DMA_MIN_COOKIE; usdhi6_rq_start() 1053 usdhi6_write(host, USDHI6_CC_EXT_MODE, USDHI6_CC_EXT_MODE_SDRW); usdhi6_rq_start() 1055 dev_dbg(mmc_dev(host->mmc), usdhi6_rq_start() 1062 dev_dbg(mmc_dev(host->mmc), "%s(): request opcode %u\n", usdhi6_rq_start() 1067 usdhi6_wait_for_resp(host); usdhi6_rq_start() 1069 host->wait = USDHI6_WAIT_FOR_CMD; usdhi6_rq_start() 1070 schedule_delayed_work(&host->timeout_work, host->timeout); usdhi6_rq_start() 1073 usdhi6_write(host, USDHI6_SD_STOP, usdhi6_rq_start() 1075 usdhi6_write(host, USDHI6_SD_ARG, cmd->arg); usdhi6_rq_start() 1078 usdhi6_write(host, USDHI6_SD_CMD, opc); usdhi6_rq_start() 1085 struct usdhi6_host *host = mmc_priv(mmc); usdhi6_request() local 1088 cancel_delayed_work_sync(&host->timeout_work); usdhi6_request() 1090 host->mrq = mrq; usdhi6_request() 1091 host->sg = NULL; usdhi6_request() 1093 usdhi6_timeout_set(host); usdhi6_request() 1094 ret = usdhi6_rq_start(host); usdhi6_request() 1097 usdhi6_request_done(host); usdhi6_request() 1103 struct usdhi6_host *host = mmc_priv(mmc); usdhi6_get_cd() local 1105 u32 status = usdhi6_read(host, USDHI6_SD_INFO1) & USDHI6_SD_INFO1_CD; usdhi6_get_cd() 1119 struct usdhi6_host *host = mmc_priv(mmc); usdhi6_get_ro() local 1121 u32 status = usdhi6_read(host, USDHI6_SD_INFO1) & USDHI6_SD_INFO1_WP; usdhi6_get_ro() 1135 struct usdhi6_host *host = mmc_priv(mmc); usdhi6_enable_sdio_irq() local 1140 host->sdio_mask = USDHI6_SDIO_INFO1_IRQ & ~USDHI6_SDIO_INFO1_IOIRQ; usdhi6_enable_sdio_irq() 1141 usdhi6_write(host, USDHI6_SDIO_INFO1_MASK, host->sdio_mask); usdhi6_enable_sdio_irq() 1142 usdhi6_write(host, USDHI6_SDIO_MODE, 1); usdhi6_enable_sdio_irq() 1144 usdhi6_write(host, USDHI6_SDIO_MODE, 0); usdhi6_enable_sdio_irq() 1145 usdhi6_write(host, USDHI6_SDIO_INFO1_MASK, USDHI6_SDIO_INFO1_IRQ); usdhi6_enable_sdio_irq() 1146 host->sdio_mask = USDHI6_SDIO_INFO1_IRQ; usdhi6_enable_sdio_irq() 1160 static void usdhi6_resp_cmd12(struct usdhi6_host *host) usdhi6_resp_cmd12() argument 1162 struct mmc_command *cmd = host->mrq->stop; usdhi6_resp_cmd12() 1163 cmd->resp[0] = usdhi6_read(host, USDHI6_SD_RSP10); usdhi6_resp_cmd12() 1166 static void usdhi6_resp_read(struct usdhi6_host *host) usdhi6_resp_read() argument 1168 struct mmc_command *cmd = host->mrq->cmd; usdhi6_resp_read() 1189 if (!(host->irq_status & USDHI6_SD_INFO1_RSP_END)) { usdhi6_resp_read() 1190 dev_err(mmc_dev(host->mmc), usdhi6_resp_read() 1199 tmp = usdhi6_read(host, USDHI6_SD_RSP10 + i * 8); usdhi6_resp_read() 1205 rsp[0] = usdhi6_read(host, USDHI6_SD_RSP54); usdhi6_resp_read() 1207 rsp[0] = usdhi6_read(host, USDHI6_SD_RSP10); usdhi6_resp_read() 1209 dev_dbg(mmc_dev(host->mmc), "Response 0x%x\n", rsp[0]); usdhi6_resp_read() 1212 static int usdhi6_blk_read(struct usdhi6_host *host) usdhi6_blk_read() argument 1214 struct mmc_data *data = host->mrq->data; usdhi6_blk_read() 1218 if (host->io_error) { usdhi6_blk_read() 1219 data->error = usdhi6_error_code(host); usdhi6_blk_read() 1223 if (host->pg.page) { usdhi6_blk_read() 1224 p = host->blk_page + host->offset; usdhi6_blk_read() 1226 p = usdhi6_sg_map(host); usdhi6_blk_read() 1234 *p = usdhi6_read(host, USDHI6_SD_BUF0); usdhi6_blk_read() 1238 u16 d = usdhi6_read16(host, USDHI6_SD_BUF0); usdhi6_blk_read() 1247 dev_dbg(mmc_dev(host->mmc), "%s(): %d\n", __func__, data->error); usdhi6_blk_read() 1248 host->wait = USDHI6_WAIT_FOR_REQUEST; usdhi6_blk_read() 1252 static int usdhi6_blk_write(struct usdhi6_host *host) usdhi6_blk_write() argument 1254 struct mmc_data *data = host->mrq->data; usdhi6_blk_write() 1258 if (host->io_error) { usdhi6_blk_write() 1259 data->error = usdhi6_error_code(host); usdhi6_blk_write() 1263 if (host->pg.page) { usdhi6_blk_write() 1264 p = host->blk_page + host->offset; usdhi6_blk_write() 1266 p = usdhi6_sg_map(host); usdhi6_blk_write() 1274 usdhi6_write(host, USDHI6_SD_BUF0, *p); usdhi6_blk_write() 1284 usdhi6_write16(host, USDHI6_SD_BUF0, d); usdhi6_blk_write() 1290 dev_dbg(mmc_dev(host->mmc), "%s(): %d\n", __func__, data->error); usdhi6_blk_write() 1291 host->wait = USDHI6_WAIT_FOR_REQUEST; usdhi6_blk_write() 1295 static int usdhi6_stop_cmd(struct usdhi6_host *host) usdhi6_stop_cmd() argument 1297 struct mmc_request *mrq = host->mrq; usdhi6_stop_cmd() 1303 host->wait = USDHI6_WAIT_FOR_STOP; usdhi6_stop_cmd() 1308 dev_err(mmc_dev(host->mmc), usdhi6_stop_cmd() 1317 static bool usdhi6_end_cmd(struct usdhi6_host *host) usdhi6_end_cmd() argument 1319 struct mmc_request *mrq = host->mrq; usdhi6_end_cmd() 1322 if (host->io_error) { usdhi6_end_cmd() 1323 cmd->error = usdhi6_error_code(host); usdhi6_end_cmd() 1327 usdhi6_resp_read(host); usdhi6_end_cmd() 1332 if (host->dma_active) { usdhi6_end_cmd() 1333 usdhi6_dma_kick(host); usdhi6_end_cmd() 1335 host->wait = USDHI6_WAIT_FOR_DMA; usdhi6_end_cmd() 1336 else if (usdhi6_stop_cmd(host) < 0) usdhi6_end_cmd() 1342 host->wait = USDHI6_WAIT_FOR_MREAD; usdhi6_end_cmd() 1344 host->wait = USDHI6_WAIT_FOR_READ; usdhi6_end_cmd() 1349 host->wait = USDHI6_WAIT_FOR_MWRITE; usdhi6_end_cmd() 1351 host->wait = USDHI6_WAIT_FOR_WRITE; usdhi6_end_cmd() 1357 static bool usdhi6_read_block(struct usdhi6_host *host) usdhi6_read_block() argument 1360 int ret = usdhi6_blk_read(host); usdhi6_read_block() 1364 * cross-page, in which case for single-block IO host->page_idx == 0. usdhi6_read_block() 1367 usdhi6_sg_unmap(host, true); usdhi6_read_block() 1372 host->wait = USDHI6_WAIT_FOR_DATA_END; usdhi6_read_block() 1376 static bool usdhi6_mread_block(struct usdhi6_host *host) usdhi6_mread_block() argument 1378 int ret = usdhi6_blk_read(host); usdhi6_mread_block() 1383 usdhi6_sg_advance(host); usdhi6_mread_block() 1385 return !host->mrq->data->error && usdhi6_mread_block() 1386 (host->wait != USDHI6_WAIT_FOR_DATA_END || !host->mrq->stop); usdhi6_mread_block() 1389 static bool usdhi6_write_block(struct usdhi6_host *host) usdhi6_write_block() argument 1391 int ret = usdhi6_blk_write(host); usdhi6_write_block() 1394 usdhi6_sg_unmap(host, true); usdhi6_write_block() 1399 host->wait = USDHI6_WAIT_FOR_DATA_END; usdhi6_write_block() 1403 static bool usdhi6_mwrite_block(struct usdhi6_host *host) usdhi6_mwrite_block() argument 1405 int ret = usdhi6_blk_write(host); usdhi6_mwrite_block() 1410 usdhi6_sg_advance(host); usdhi6_mwrite_block() 1412 return !host->mrq->data->error && usdhi6_mwrite_block() 1413 (host->wait != USDHI6_WAIT_FOR_DATA_END || !host->mrq->stop); usdhi6_mwrite_block() 1420 struct usdhi6_host *host = dev_id; usdhi6_sd_bh() local 1426 cancel_delayed_work_sync(&host->timeout_work); usdhi6_sd_bh() 1428 mrq = host->mrq; usdhi6_sd_bh() 1435 switch (host->wait) { usdhi6_sd_bh() 1441 io_wait = usdhi6_end_cmd(host); usdhi6_sd_bh() 1445 io_wait = usdhi6_mread_block(host); usdhi6_sd_bh() 1449 io_wait = usdhi6_read_block(host); usdhi6_sd_bh() 1453 io_wait = usdhi6_mwrite_block(host); usdhi6_sd_bh() 1457 io_wait = usdhi6_write_block(host); usdhi6_sd_bh() 1460 usdhi6_dma_check_error(host); usdhi6_sd_bh() 1463 usdhi6_write(host, USDHI6_SD_STOP, 0); usdhi6_sd_bh() 1464 if (host->io_error) { usdhi6_sd_bh() 1465 int ret = usdhi6_error_code(host); usdhi6_sd_bh() 1470 dev_warn(mmc_dev(host->mmc), "%s(): %d\n", __func__, ret); usdhi6_sd_bh() 1473 usdhi6_resp_cmd12(host); usdhi6_sd_bh() 1477 if (host->io_error) { usdhi6_sd_bh() 1478 mrq->data->error = usdhi6_error_code(host); usdhi6_sd_bh() 1479 dev_warn(mmc_dev(host->mmc), "%s(): %d\n", __func__, usdhi6_sd_bh() 1485 dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait); usdhi6_sd_bh() 1486 usdhi6_request_done(host); usdhi6_sd_bh() 1491 schedule_delayed_work(&host->timeout_work, host->timeout); usdhi6_sd_bh() 1493 if (!host->dma_active) usdhi6_sd_bh() 1494 usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ); usdhi6_sd_bh() 1501 if (host->wait != USDHI6_WAIT_FOR_STOP && usdhi6_sd_bh() 1502 host->mrq->stop && usdhi6_sd_bh() 1503 !host->mrq->stop->error && usdhi6_sd_bh() 1504 !usdhi6_stop_cmd(host)) { usdhi6_sd_bh() 1506 usdhi6_wait_for_resp(host); usdhi6_sd_bh() 1508 schedule_delayed_work(&host->timeout_work, usdhi6_sd_bh() 1509 host->timeout); usdhi6_sd_bh() 1517 dev_warn(mmc_dev(host->mmc), "%s(): data error %d\n", usdhi6_sd_bh() 1519 usdhi6_sg_unmap(host, true); usdhi6_sd_bh() 1522 host->app_cmd = true; usdhi6_sd_bh() 1526 usdhi6_request_done(host); usdhi6_sd_bh() 1533 struct usdhi6_host *host = dev_id; usdhi6_sd() local 1536 status = usdhi6_read(host, USDHI6_SD_INFO1) & ~host->status_mask & usdhi6_sd() 1538 status2 = usdhi6_read(host, USDHI6_SD_INFO2) & ~host->status2_mask; usdhi6_sd() 1540 usdhi6_only_cd(host); usdhi6_sd() 1542 dev_dbg(mmc_dev(host->mmc), usdhi6_sd() 1552 usdhi6_write(host, USDHI6_SD_INFO1, usdhi6_sd() 1560 usdhi6_write(host, USDHI6_SD_INFO2, usdhi6_sd() 1564 host->io_error = error; usdhi6_sd() 1565 host->irq_status = status; usdhi6_sd() 1569 if (host->wait != USDHI6_WAIT_FOR_CMD || usdhi6_sd() 1571 dev_warn(mmc_dev(host->mmc), usdhi6_sd() 1575 dev_dbg(mmc_dev(host->mmc), usdhi6_sd() 1585 struct usdhi6_host *host = dev_id; usdhi6_sdio() local 1586 u32 status = usdhi6_read(host, USDHI6_SDIO_INFO1) & ~host->sdio_mask; usdhi6_sdio() 1588 dev_dbg(mmc_dev(host->mmc), "%s(): status 0x%x\n", __func__, status); usdhi6_sdio() 1593 usdhi6_write(host, USDHI6_SDIO_INFO1, ~status); usdhi6_sdio() 1595 mmc_signal_sdio_irq(host->mmc); usdhi6_sdio() 1602 struct usdhi6_host *host = dev_id; usdhi6_cd() local 1603 struct mmc_host *mmc = host->mmc; usdhi6_cd() 1607 status = usdhi6_read(host, USDHI6_SD_INFO1) & ~host->status_mask & usdhi6_cd() 1614 usdhi6_write(host, USDHI6_SD_INFO1, !status); usdhi6_cd() 1634 struct usdhi6_host *host = container_of(d, struct usdhi6_host, timeout_work); usdhi6_timeout_work() local 1635 struct mmc_request *mrq = host->mrq; usdhi6_timeout_work() 1638 dev_warn(mmc_dev(host->mmc), usdhi6_timeout_work() 1640 host->dma_active ? "DMA" : "PIO", usdhi6_timeout_work() 1641 host->wait, mrq ? mrq->cmd->opcode : -1, usdhi6_timeout_work() 1642 usdhi6_read(host, USDHI6_SD_INFO1), usdhi6_timeout_work() 1643 usdhi6_read(host, USDHI6_SD_INFO2), host->irq_status); usdhi6_timeout_work() 1645 if (host->dma_active) { usdhi6_timeout_work() 1646 usdhi6_dma_kill(host); usdhi6_timeout_work() 1647 usdhi6_dma_stop_unmap(host); usdhi6_timeout_work() 1650 switch (host->wait) { usdhi6_timeout_work() 1652 dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait); usdhi6_timeout_work() 1655 usdhi6_error_code(host); usdhi6_timeout_work() 1660 usdhi6_error_code(host); usdhi6_timeout_work() 1668 dev_dbg(mmc_dev(host->mmc), usdhi6_timeout_work() 1670 data->flags & MMC_DATA_READ ? 'R' : 'W', host->page_idx, usdhi6_timeout_work() 1671 host->offset, data->blocks, data->blksz, data->sg_len, usdhi6_timeout_work() 1672 sg_dma_len(host->sg), host->sg->offset); usdhi6_timeout_work() 1673 usdhi6_sg_unmap(host, true); usdhi6_timeout_work() 1679 usdhi6_error_code(host); usdhi6_timeout_work() 1684 usdhi6_request_done(host); usdhi6_timeout_work() 1699 struct usdhi6_host *host; usdhi6_probe() local 1724 host = mmc_priv(mmc); usdhi6_probe() 1725 host->mmc = mmc; usdhi6_probe() 1726 host->wait = USDHI6_WAIT_FOR_REQUEST; usdhi6_probe() 1727 host->timeout = msecs_to_jiffies(4000); usdhi6_probe() 1730 host->base = devm_ioremap_resource(dev, res); usdhi6_probe() 1731 if (IS_ERR(host->base)) { usdhi6_probe() 1732 ret = PTR_ERR(host->base); usdhi6_probe() 1736 host->clk = devm_clk_get(dev, NULL); usdhi6_probe() 1737 if (IS_ERR(host->clk)) usdhi6_probe() 1740 host->imclk = clk_get_rate(host->clk); usdhi6_probe() 1742 ret = clk_prepare_enable(host->clk); usdhi6_probe() 1746 version = usdhi6_read(host, USDHI6_VERSION); usdhi6_probe() 1752 dev_info(dev, "A USDHI6ROL0 SD host detected with %d ports\n", usdhi6_probe() 1753 usdhi6_read(host, USDHI6_SD_PORT_SEL) >> USDHI6_SD_PORT_SEL_PORTS_SHIFT); usdhi6_probe() 1755 usdhi6_mask_all(host); usdhi6_probe() 1759 dev_name(dev), host); usdhi6_probe() 1767 dev_name(dev), host); usdhi6_probe() 1772 dev_name(dev), host); usdhi6_probe() 1776 INIT_DELAYED_WORK(&host->timeout_work, usdhi6_timeout_work); usdhi6_probe() 1778 usdhi6_dma_request(host, res->start); usdhi6_probe() 1798 mmc->f_max = host->imclk; usdhi6_probe() 1799 mmc->f_min = host->imclk / 512; usdhi6_probe() 1801 platform_set_drvdata(pdev, host); usdhi6_probe() 1810 clk_disable_unprepare(host->clk); usdhi6_probe() 1819 struct usdhi6_host *host = platform_get_drvdata(pdev); usdhi6_remove() local 1821 mmc_remove_host(host->mmc); usdhi6_remove() 1823 usdhi6_mask_all(host); usdhi6_remove() 1824 cancel_delayed_work_sync(&host->timeout_work); usdhi6_remove() 1825 usdhi6_dma_release(host); usdhi6_remove() 1826 clk_disable_unprepare(host->clk); usdhi6_remove() 1827 mmc_free_host(host->mmc); usdhi6_remove() 1843 MODULE_DESCRIPTION("Renesas usdhi6rol0 SD/SDIO host driver");
|
H A D | omap_hsmmc.c | 2 * drivers/mmc/host/omap_hsmmc.c 36 #include <linux/mmc/host.h> 159 #define mmc_pdata(host) host->pdata 241 static void omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host); 245 struct omap_hsmmc_host *host = dev_get_drvdata(dev); omap_hsmmc_card_detect() local 247 return mmc_gpio_get_cd(host->mmc); omap_hsmmc_card_detect() 252 struct omap_hsmmc_host *host = dev_get_drvdata(dev); omap_hsmmc_get_cover_state() local 254 return mmc_gpio_get_cd(host->mmc); omap_hsmmc_get_cover_state() 261 struct omap_hsmmc_host *host = omap_hsmmc_set_power() local 269 if (!host->vcc) omap_hsmmc_set_power() 272 if (mmc_pdata(host)->before_set_reg) omap_hsmmc_set_power() 273 mmc_pdata(host)->before_set_reg(dev, power_on, vdd); omap_hsmmc_set_power() 275 if (host->pbias) { omap_hsmmc_set_power() 276 if (host->pbias_enabled == 1) { omap_hsmmc_set_power() 277 ret = regulator_disable(host->pbias); omap_hsmmc_set_power() 279 host->pbias_enabled = 0; omap_hsmmc_set_power() 281 regulator_set_voltage(host->pbias, VDD_3V0, VDD_3V0); omap_hsmmc_set_power() 298 if (host->vcc) omap_hsmmc_set_power() 299 ret = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd); omap_hsmmc_set_power() 301 if (ret == 0 && host->vcc_aux) { omap_hsmmc_set_power() 302 ret = regulator_enable(host->vcc_aux); omap_hsmmc_set_power() 303 if (ret < 0 && host->vcc) omap_hsmmc_set_power() 304 ret = mmc_regulator_set_ocr(host->mmc, omap_hsmmc_set_power() 305 host->vcc, 0); omap_hsmmc_set_power() 309 if (host->vcc_aux) omap_hsmmc_set_power() 310 ret = regulator_disable(host->vcc_aux); omap_hsmmc_set_power() 311 if (host->vcc) { omap_hsmmc_set_power() 313 ret = mmc_regulator_set_ocr(host->mmc, omap_hsmmc_set_power() 314 host->vcc, 0); omap_hsmmc_set_power() 318 if (host->pbias) { omap_hsmmc_set_power() 320 ret = regulator_set_voltage(host->pbias, VDD_1V8, omap_hsmmc_set_power() 323 ret = regulator_set_voltage(host->pbias, VDD_3V0, omap_hsmmc_set_power() 328 if (host->pbias_enabled == 0) { omap_hsmmc_set_power() 329 ret = regulator_enable(host->pbias); omap_hsmmc_set_power() 331 host->pbias_enabled = 1; omap_hsmmc_set_power() 335 if (mmc_pdata(host)->after_set_reg) omap_hsmmc_set_power() 336 mmc_pdata(host)->after_set_reg(dev, power_on, vdd); omap_hsmmc_set_power() 342 static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) omap_hsmmc_reg_get() argument 347 reg = devm_regulator_get(host->dev, "vmmc"); omap_hsmmc_reg_get() 349 dev_err(host->dev, "unable to get vmmc regulator %ld\n", omap_hsmmc_reg_get() 353 host->vcc = reg; omap_hsmmc_reg_get() 355 if (!mmc_pdata(host)->ocr_mask) { omap_hsmmc_reg_get() 356 mmc_pdata(host)->ocr_mask = ocr_value; omap_hsmmc_reg_get() 358 if (!(mmc_pdata(host)->ocr_mask & ocr_value)) { omap_hsmmc_reg_get() 359 dev_err(host->dev, "ocrmask %x is not supported\n", omap_hsmmc_reg_get() 360 mmc_pdata(host)->ocr_mask); omap_hsmmc_reg_get() 361 mmc_pdata(host)->ocr_mask = 0; omap_hsmmc_reg_get() 366 mmc_pdata(host)->set_power = omap_hsmmc_set_power; omap_hsmmc_reg_get() 369 reg = devm_regulator_get_optional(host->dev, "vmmc_aux"); omap_hsmmc_reg_get() 370 host->vcc_aux = IS_ERR(reg) ? NULL : reg; omap_hsmmc_reg_get() 372 reg = devm_regulator_get_optional(host->dev, "pbias"); omap_hsmmc_reg_get() 373 host->pbias = IS_ERR(reg) ? NULL : reg; omap_hsmmc_reg_get() 376 if (mmc_pdata(host)->no_regulator_off_init) omap_hsmmc_reg_get() 382 if ((host->vcc && regulator_is_enabled(host->vcc) > 0) || omap_hsmmc_reg_get() 383 (host->vcc_aux && regulator_is_enabled(host->vcc_aux))) { omap_hsmmc_reg_get() 384 int vdd = ffs(mmc_pdata(host)->ocr_mask) - 1; omap_hsmmc_reg_get() 386 mmc_pdata(host)->set_power(host->dev, 1, vdd); omap_hsmmc_reg_get() 387 mmc_pdata(host)->set_power(host->dev, 0, 0); omap_hsmmc_reg_get() 393 static void omap_hsmmc_reg_put(struct omap_hsmmc_host *host) omap_hsmmc_reg_put() argument 395 mmc_pdata(host)->set_power = NULL; omap_hsmmc_reg_put() 405 static inline int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) omap_hsmmc_reg_get() argument 410 static inline void omap_hsmmc_reg_put(struct omap_hsmmc_host *host) omap_hsmmc_reg_put() argument 424 struct omap_hsmmc_host *host, omap_hsmmc_gpio_init() 434 host->get_cover_state = omap_hsmmc_get_cover_state; omap_hsmmc_gpio_init() 441 host->card_detect = omap_hsmmc_card_detect; omap_hsmmc_gpio_init() 456 static void omap_hsmmc_start_clock(struct omap_hsmmc_host *host) omap_hsmmc_start_clock() argument 458 OMAP_HSMMC_WRITE(host->base, SYSCTL, omap_hsmmc_start_clock() 459 OMAP_HSMMC_READ(host->base, SYSCTL) | CEN); omap_hsmmc_start_clock() 465 static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host) omap_hsmmc_stop_clock() argument 467 OMAP_HSMMC_WRITE(host->base, SYSCTL, omap_hsmmc_stop_clock() 468 OMAP_HSMMC_READ(host->base, SYSCTL) & ~CEN); omap_hsmmc_stop_clock() 469 if ((OMAP_HSMMC_READ(host->base, SYSCTL) & CEN) != 0x0) omap_hsmmc_stop_clock() 470 dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stopped\n"); omap_hsmmc_stop_clock() 473 static void omap_hsmmc_enable_irq(struct omap_hsmmc_host *host, omap_hsmmc_enable_irq() argument 479 if (host->use_dma) omap_hsmmc_enable_irq() 486 spin_lock_irqsave(&host->irq_lock, flags); omap_hsmmc_enable_irq() 487 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); omap_hsmmc_enable_irq() 488 OMAP_HSMMC_WRITE(host->base, ISE, irq_mask); omap_hsmmc_enable_irq() 491 if (host->flags & HSMMC_SDIO_IRQ_ENABLED) omap_hsmmc_enable_irq() 493 OMAP_HSMMC_WRITE(host->base, IE, irq_mask); omap_hsmmc_enable_irq() 494 spin_unlock_irqrestore(&host->irq_lock, flags); omap_hsmmc_enable_irq() 497 static void omap_hsmmc_disable_irq(struct omap_hsmmc_host *host) omap_hsmmc_disable_irq() argument 502 spin_lock_irqsave(&host->irq_lock, flags); omap_hsmmc_disable_irq() 504 if (host->flags & HSMMC_SDIO_IRQ_ENABLED) omap_hsmmc_disable_irq() 506 OMAP_HSMMC_WRITE(host->base, ISE, irq_mask); omap_hsmmc_disable_irq() 507 OMAP_HSMMC_WRITE(host->base, IE, irq_mask); omap_hsmmc_disable_irq() 508 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); omap_hsmmc_disable_irq() 509 spin_unlock_irqrestore(&host->irq_lock, flags); omap_hsmmc_disable_irq() 513 static u16 calc_divisor(struct omap_hsmmc_host *host, struct mmc_ios *ios) calc_divisor() argument 518 dsor = DIV_ROUND_UP(clk_get_rate(host->fclk), ios->clock); calc_divisor() 526 static void omap_hsmmc_set_clock(struct omap_hsmmc_host *host) omap_hsmmc_set_clock() argument 528 struct mmc_ios *ios = &host->mmc->ios; omap_hsmmc_set_clock() 533 dev_vdbg(mmc_dev(host->mmc), "Set clock to %uHz\n", ios->clock); omap_hsmmc_set_clock() 535 omap_hsmmc_stop_clock(host); omap_hsmmc_set_clock() 537 regval = OMAP_HSMMC_READ(host->base, SYSCTL); omap_hsmmc_set_clock() 539 clkdiv = calc_divisor(host, ios); omap_hsmmc_set_clock() 541 OMAP_HSMMC_WRITE(host->base, SYSCTL, regval); omap_hsmmc_set_clock() 542 OMAP_HSMMC_WRITE(host->base, SYSCTL, omap_hsmmc_set_clock() 543 OMAP_HSMMC_READ(host->base, SYSCTL) | ICE); omap_hsmmc_set_clock() 547 while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != ICS omap_hsmmc_set_clock() 560 if ((mmc_pdata(host)->features & HSMMC_HAS_HSPE_SUPPORT) && omap_hsmmc_set_clock() 563 ((OMAP_HSMMC_READ(host->base, CAPA) & HSS) == HSS)) { omap_hsmmc_set_clock() 564 regval = OMAP_HSMMC_READ(host->base, HCTL); omap_hsmmc_set_clock() 565 if (clkdiv && (clk_get_rate(host->fclk)/clkdiv) > 25000000) omap_hsmmc_set_clock() 570 OMAP_HSMMC_WRITE(host->base, HCTL, regval); omap_hsmmc_set_clock() 573 omap_hsmmc_start_clock(host); omap_hsmmc_set_clock() 576 static void omap_hsmmc_set_bus_width(struct omap_hsmmc_host *host) omap_hsmmc_set_bus_width() argument 578 struct mmc_ios *ios = &host->mmc->ios; omap_hsmmc_set_bus_width() 581 con = OMAP_HSMMC_READ(host->base, CON); omap_hsmmc_set_bus_width() 589 OMAP_HSMMC_WRITE(host->base, CON, con | DW8); omap_hsmmc_set_bus_width() 592 OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8); omap_hsmmc_set_bus_width() 593 OMAP_HSMMC_WRITE(host->base, HCTL, omap_hsmmc_set_bus_width() 594 OMAP_HSMMC_READ(host->base, HCTL) | FOUR_BIT); omap_hsmmc_set_bus_width() 597 OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8); omap_hsmmc_set_bus_width() 598 OMAP_HSMMC_WRITE(host->base, HCTL, omap_hsmmc_set_bus_width() 599 OMAP_HSMMC_READ(host->base, HCTL) & ~FOUR_BIT); omap_hsmmc_set_bus_width() 604 static void omap_hsmmc_set_bus_mode(struct omap_hsmmc_host *host) omap_hsmmc_set_bus_mode() argument 606 struct mmc_ios *ios = &host->mmc->ios; omap_hsmmc_set_bus_mode() 609 con = OMAP_HSMMC_READ(host->base, CON); omap_hsmmc_set_bus_mode() 611 OMAP_HSMMC_WRITE(host->base, CON, con | OD); omap_hsmmc_set_bus_mode() 613 OMAP_HSMMC_WRITE(host->base, CON, con & ~OD); omap_hsmmc_set_bus_mode() 619 * Restore the MMC host context, if it was lost as result of a 622 static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host) omap_hsmmc_context_restore() argument 624 struct mmc_ios *ios = &host->mmc->ios; omap_hsmmc_context_restore() 628 if (host->con == OMAP_HSMMC_READ(host->base, CON) && omap_hsmmc_context_restore() 629 host->hctl == OMAP_HSMMC_READ(host->base, HCTL) && omap_hsmmc_context_restore() 630 host->sysctl == OMAP_HSMMC_READ(host->base, SYSCTL) && omap_hsmmc_context_restore() 631 host->capa == OMAP_HSMMC_READ(host->base, CAPA)) omap_hsmmc_context_restore() 634 host->context_loss++; omap_hsmmc_context_restore() 636 if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) { omap_hsmmc_context_restore() 637 if (host->power_mode != MMC_POWER_OFF && omap_hsmmc_context_restore() 648 if (host->mmc->caps & MMC_CAP_SDIO_IRQ) omap_hsmmc_context_restore() 651 OMAP_HSMMC_WRITE(host->base, HCTL, omap_hsmmc_context_restore() 652 OMAP_HSMMC_READ(host->base, HCTL) | hctl); omap_hsmmc_context_restore() 654 OMAP_HSMMC_WRITE(host->base, CAPA, omap_hsmmc_context_restore() 655 OMAP_HSMMC_READ(host->base, CAPA) | capa); omap_hsmmc_context_restore() 657 OMAP_HSMMC_WRITE(host->base, HCTL, omap_hsmmc_context_restore() 658 OMAP_HSMMC_READ(host->base, HCTL) | SDBP); omap_hsmmc_context_restore() 661 while ((OMAP_HSMMC_READ(host->base, HCTL) & SDBP) != SDBP omap_hsmmc_context_restore() 665 OMAP_HSMMC_WRITE(host->base, ISE, 0); omap_hsmmc_context_restore() 666 OMAP_HSMMC_WRITE(host->base, IE, 0); omap_hsmmc_context_restore() 667 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); omap_hsmmc_context_restore() 670 if (host->power_mode == MMC_POWER_OFF) omap_hsmmc_context_restore() 673 omap_hsmmc_set_bus_width(host); omap_hsmmc_context_restore() 675 omap_hsmmc_set_clock(host); omap_hsmmc_context_restore() 677 omap_hsmmc_set_bus_mode(host); omap_hsmmc_context_restore() 680 dev_dbg(mmc_dev(host->mmc), "context is restored: restore count %d\n", omap_hsmmc_context_restore() 681 host->context_loss); omap_hsmmc_context_restore() 686 * Save the MMC host context (store the number of power state changes so far). 688 static void omap_hsmmc_context_save(struct omap_hsmmc_host *host) omap_hsmmc_context_save() argument 690 host->con = OMAP_HSMMC_READ(host->base, CON); omap_hsmmc_context_save() 691 host->hctl = OMAP_HSMMC_READ(host->base, HCTL); omap_hsmmc_context_save() 692 host->sysctl = OMAP_HSMMC_READ(host->base, SYSCTL); omap_hsmmc_context_save() 693 host->capa = OMAP_HSMMC_READ(host->base, CAPA); omap_hsmmc_context_save() 698 static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host) omap_hsmmc_context_restore() argument 703 static void omap_hsmmc_context_save(struct omap_hsmmc_host *host) omap_hsmmc_context_save() argument 713 static void send_init_stream(struct omap_hsmmc_host *host) send_init_stream() argument 718 if (host->protect_card) send_init_stream() 721 disable_irq(host->irq); send_init_stream() 723 OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK); send_init_stream() 724 OMAP_HSMMC_WRITE(host->base, CON, send_init_stream() 725 OMAP_HSMMC_READ(host->base, CON) | INIT_STREAM); send_init_stream() 726 OMAP_HSMMC_WRITE(host->base, CMD, INIT_STREAM_CMD); send_init_stream() 730 reg = OMAP_HSMMC_READ(host->base, STAT) & CC_EN; send_init_stream() 732 OMAP_HSMMC_WRITE(host->base, CON, send_init_stream() 733 OMAP_HSMMC_READ(host->base, CON) & ~INIT_STREAM); send_init_stream() 735 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); send_init_stream() 736 OMAP_HSMMC_READ(host->base, STAT); send_init_stream() 738 enable_irq(host->irq); send_init_stream() 742 int omap_hsmmc_cover_is_closed(struct omap_hsmmc_host *host) omap_hsmmc_cover_is_closed() argument 746 if (host->get_cover_state) omap_hsmmc_cover_is_closed() 747 r = host->get_cover_state(host->dev); omap_hsmmc_cover_is_closed() 756 struct omap_hsmmc_host *host = mmc_priv(mmc); omap_hsmmc_show_cover_switch() local 759 omap_hsmmc_cover_is_closed(host) ? "closed" : "open"); omap_hsmmc_show_cover_switch() 769 struct omap_hsmmc_host *host = mmc_priv(mmc); omap_hsmmc_show_slot_name() local 771 return sprintf(buf, "%s\n", mmc_pdata(host)->name); omap_hsmmc_show_slot_name() 780 omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd, omap_hsmmc_start_command() argument 785 dev_vdbg(mmc_dev(host->mmc), "%s: CMD%d, argument 0x%08x\n", omap_hsmmc_start_command() 786 mmc_hostname(host->mmc), cmd->opcode, cmd->arg); omap_hsmmc_start_command() 787 host->cmd = cmd; omap_hsmmc_start_command() 789 omap_hsmmc_enable_irq(host, cmd); omap_hsmmc_start_command() 791 host->response_busy = 0; omap_hsmmc_start_command() 797 host->response_busy = 1; omap_hsmmc_start_command() 807 if (cmd == host->mrq->stop) omap_hsmmc_start_command() 812 if ((host->flags & AUTO_CMD23) && mmc_op_multi(cmd->opcode) && omap_hsmmc_start_command() 813 host->mrq->sbc) { omap_hsmmc_start_command() 815 OMAP_HSMMC_WRITE(host->base, SDMASA, host->mrq->sbc->arg); omap_hsmmc_start_command() 825 if (host->use_dma) omap_hsmmc_start_command() 828 host->req_in_progress = 1; omap_hsmmc_start_command() 830 OMAP_HSMMC_WRITE(host->base, ARG, cmd->arg); omap_hsmmc_start_command() 831 OMAP_HSMMC_WRITE(host->base, CMD, cmdreg); omap_hsmmc_start_command() 835 omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data) omap_hsmmc_get_dma_dir() argument 843 static struct dma_chan *omap_hsmmc_get_dma_chan(struct omap_hsmmc_host *host, omap_hsmmc_get_dma_chan() argument 846 return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan; omap_hsmmc_get_dma_chan() 849 static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq) omap_hsmmc_request_done() argument 854 spin_lock_irqsave(&host->irq_lock, flags); omap_hsmmc_request_done() 855 host->req_in_progress = 0; omap_hsmmc_request_done() 856 dma_ch = host->dma_ch; omap_hsmmc_request_done() 857 spin_unlock_irqrestore(&host->irq_lock, flags); omap_hsmmc_request_done() 859 omap_hsmmc_disable_irq(host); omap_hsmmc_request_done() 861 if (mrq->data && host->use_dma && dma_ch != -1) omap_hsmmc_request_done() 863 host->mrq = NULL; omap_hsmmc_request_done() 864 mmc_request_done(host->mmc, mrq); omap_hsmmc_request_done() 865 pm_runtime_mark_last_busy(host->dev); omap_hsmmc_request_done() 866 pm_runtime_put_autosuspend(host->dev); omap_hsmmc_request_done() 873 omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data) omap_hsmmc_xfer_done() argument 876 struct mmc_request *mrq = host->mrq; omap_hsmmc_xfer_done() 879 if (host->cmd && host->cmd->opcode == 6 && omap_hsmmc_xfer_done() 880 host->response_busy) { omap_hsmmc_xfer_done() 881 host->response_busy = 0; omap_hsmmc_xfer_done() 885 omap_hsmmc_request_done(host, mrq); omap_hsmmc_xfer_done() 889 host->data = NULL; omap_hsmmc_xfer_done() 896 if (data->stop && (data->error || !host->mrq->sbc)) omap_hsmmc_xfer_done() 897 omap_hsmmc_start_command(host, data->stop, NULL); omap_hsmmc_xfer_done() 899 omap_hsmmc_request_done(host, data->mrq); omap_hsmmc_xfer_done() 906 omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd) omap_hsmmc_cmd_done() argument 908 if (host->mrq->sbc && (host->cmd == host->mrq->sbc) && omap_hsmmc_cmd_done() 909 !host->mrq->sbc->error && !(host->flags & AUTO_CMD23)) { omap_hsmmc_cmd_done() 910 host->cmd = NULL; omap_hsmmc_cmd_done() 911 omap_hsmmc_start_dma_transfer(host); omap_hsmmc_cmd_done() 912 omap_hsmmc_start_command(host, host->mrq->cmd, omap_hsmmc_cmd_done() 913 host->mrq->data); omap_hsmmc_cmd_done() 917 host->cmd = NULL; omap_hsmmc_cmd_done() 922 cmd->resp[3] = OMAP_HSMMC_READ(host->base, RSP10); omap_hsmmc_cmd_done() 923 cmd->resp[2] = OMAP_HSMMC_READ(host->base, RSP32); omap_hsmmc_cmd_done() 924 cmd->resp[1] = OMAP_HSMMC_READ(host->base, RSP54); omap_hsmmc_cmd_done() 925 cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP76); omap_hsmmc_cmd_done() 928 cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP10); omap_hsmmc_cmd_done() 931 if ((host->data == NULL && !host->response_busy) || cmd->error) omap_hsmmc_cmd_done() 932 omap_hsmmc_request_done(host, host->mrq); omap_hsmmc_cmd_done() 938 static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno) omap_hsmmc_dma_cleanup() argument 943 host->data->error = errno; omap_hsmmc_dma_cleanup() 945 spin_lock_irqsave(&host->irq_lock, flags); omap_hsmmc_dma_cleanup() 946 dma_ch = host->dma_ch; omap_hsmmc_dma_cleanup() 947 host->dma_ch = -1; omap_hsmmc_dma_cleanup() 948 spin_unlock_irqrestore(&host->irq_lock, flags); omap_hsmmc_dma_cleanup() 950 if (host->use_dma && dma_ch != -1) { omap_hsmmc_dma_cleanup() 951 struct dma_chan *chan = omap_hsmmc_get_dma_chan(host, host->data); omap_hsmmc_dma_cleanup() 955 host->data->sg, host->data->sg_len, omap_hsmmc_dma_cleanup() 956 omap_hsmmc_get_dma_dir(host, host->data)); omap_hsmmc_dma_cleanup() 958 host->data->host_cookie = 0; omap_hsmmc_dma_cleanup() 960 host->data = NULL; omap_hsmmc_dma_cleanup() 967 static void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host, u32 status) omap_hsmmc_dbg_report_irq() argument 989 dev_vdbg(mmc_dev(host->mmc), "%s\n", res); omap_hsmmc_dbg_report_irq() 992 static inline void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host, omap_hsmmc_dbg_report_irq() argument 1005 static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host, omap_hsmmc_reset_controller_fsm() argument 1011 OMAP_HSMMC_WRITE(host->base, SYSCTL, omap_hsmmc_reset_controller_fsm() 1012 OMAP_HSMMC_READ(host->base, SYSCTL) | bit); omap_hsmmc_reset_controller_fsm() 1018 if (mmc_pdata(host)->features & HSMMC_HAS_UPDATED_RESET) { omap_hsmmc_reset_controller_fsm() 1019 while ((!(OMAP_HSMMC_READ(host->base, SYSCTL) & bit)) omap_hsmmc_reset_controller_fsm() 1025 while ((OMAP_HSMMC_READ(host->base, SYSCTL) & bit) && omap_hsmmc_reset_controller_fsm() 1029 if (OMAP_HSMMC_READ(host->base, SYSCTL) & bit) omap_hsmmc_reset_controller_fsm() 1030 dev_err(mmc_dev(host->mmc), omap_hsmmc_reset_controller_fsm() 1035 static void hsmmc_command_incomplete(struct omap_hsmmc_host *host, hsmmc_command_incomplete() argument 1039 omap_hsmmc_reset_controller_fsm(host, SRC); hsmmc_command_incomplete() 1040 if (host->cmd) hsmmc_command_incomplete() 1041 host->cmd->error = err; hsmmc_command_incomplete() 1044 if (host->data) { hsmmc_command_incomplete() 1045 omap_hsmmc_reset_controller_fsm(host, SRD); hsmmc_command_incomplete() 1046 omap_hsmmc_dma_cleanup(host, err); hsmmc_command_incomplete() 1047 } else if (host->mrq && host->mrq->cmd) hsmmc_command_incomplete() 1048 host->mrq->cmd->error = err; hsmmc_command_incomplete() 1051 static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status) omap_hsmmc_do_irq() argument 1057 data = host->data; omap_hsmmc_do_irq() 1058 dev_vdbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status); omap_hsmmc_do_irq() 1061 omap_hsmmc_dbg_report_irq(host, status); omap_hsmmc_do_irq() 1065 if (host->data || host->response_busy) { omap_hsmmc_do_irq() 1067 host->response_busy = 0; omap_hsmmc_do_irq() 1070 hsmmc_command_incomplete(host, -ETIMEDOUT, end_cmd); omap_hsmmc_do_irq() 1072 hsmmc_command_incomplete(host, -EILSEQ, end_cmd); omap_hsmmc_do_irq() 1076 ac12 = OMAP_HSMMC_READ(host->base, AC12); omap_hsmmc_do_irq() 1077 if (!(ac12 & ACNE) && host->mrq->sbc) { omap_hsmmc_do_irq() 1083 host->mrq->sbc->error = error; omap_hsmmc_do_irq() 1084 hsmmc_command_incomplete(host, error, end_cmd); omap_hsmmc_do_irq() 1086 dev_dbg(mmc_dev(host->mmc), "AC12 err: 0x%x\n", ac12); omap_hsmmc_do_irq() 1090 OMAP_HSMMC_WRITE(host->base, STAT, status); omap_hsmmc_do_irq() 1091 if (end_cmd || ((status & CC_EN) && host->cmd)) omap_hsmmc_do_irq() 1092 omap_hsmmc_cmd_done(host, host->cmd); omap_hsmmc_do_irq() 1093 if ((end_trans || (status & TC_EN)) && host->mrq) omap_hsmmc_do_irq() 1094 omap_hsmmc_xfer_done(host, data); omap_hsmmc_do_irq() 1102 struct omap_hsmmc_host *host = dev_id; omap_hsmmc_irq() local 1105 status = OMAP_HSMMC_READ(host->base, STAT); omap_hsmmc_irq() 1107 if (host->req_in_progress) omap_hsmmc_irq() 1108 omap_hsmmc_do_irq(host, status); omap_hsmmc_irq() 1111 mmc_signal_sdio_irq(host->mmc); omap_hsmmc_irq() 1114 status = OMAP_HSMMC_READ(host->base, STAT); omap_hsmmc_irq() 1122 struct omap_hsmmc_host *host = dev_id; omap_hsmmc_wake_irq() local 1125 spin_lock(&host->irq_lock); omap_hsmmc_wake_irq() 1126 if (host->flags & HSMMC_WAKE_IRQ_ENABLED) { omap_hsmmc_wake_irq() 1127 disable_irq_nosync(host->wake_irq); omap_hsmmc_wake_irq() 1128 host->flags &= ~HSMMC_WAKE_IRQ_ENABLED; omap_hsmmc_wake_irq() 1130 spin_unlock(&host->irq_lock); omap_hsmmc_wake_irq() 1131 pm_request_resume(host->dev); /* no use counter */ omap_hsmmc_wake_irq() 1136 static void set_sd_bus_power(struct omap_hsmmc_host *host) set_sd_bus_power() argument 1140 OMAP_HSMMC_WRITE(host->base, HCTL, set_sd_bus_power() 1141 OMAP_HSMMC_READ(host->base, HCTL) | SDBP); set_sd_bus_power() 1143 if (OMAP_HSMMC_READ(host->base, HCTL) & SDBP) set_sd_bus_power() 1156 static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd) omap_hsmmc_switch_opcond() argument 1162 pm_runtime_put_sync(host->dev); omap_hsmmc_switch_opcond() 1163 if (host->dbclk) omap_hsmmc_switch_opcond() 1164 clk_disable_unprepare(host->dbclk); omap_hsmmc_switch_opcond() 1167 ret = mmc_pdata(host)->set_power(host->dev, 0, 0); omap_hsmmc_switch_opcond() 1171 ret = mmc_pdata(host)->set_power(host->dev, 1, vdd); omap_hsmmc_switch_opcond() 1172 pm_runtime_get_sync(host->dev); omap_hsmmc_switch_opcond() 1173 if (host->dbclk) omap_hsmmc_switch_opcond() 1174 clk_prepare_enable(host->dbclk); omap_hsmmc_switch_opcond() 1179 OMAP_HSMMC_WRITE(host->base, HCTL, omap_hsmmc_switch_opcond() 1180 OMAP_HSMMC_READ(host->base, HCTL) & SDVSCLR); omap_hsmmc_switch_opcond() 1181 reg_val = OMAP_HSMMC_READ(host->base, HCTL); omap_hsmmc_switch_opcond() 1203 OMAP_HSMMC_WRITE(host->base, HCTL, reg_val); omap_hsmmc_switch_opcond() 1204 set_sd_bus_power(host); omap_hsmmc_switch_opcond() 1208 dev_err(mmc_dev(host->mmc), "Unable to switch operating voltage\n"); omap_hsmmc_switch_opcond() 1213 static void omap_hsmmc_protect_card(struct omap_hsmmc_host *host) omap_hsmmc_protect_card() argument 1215 if (!host->get_cover_state) omap_hsmmc_protect_card() 1218 host->reqs_blocked = 0; omap_hsmmc_protect_card() 1219 if (host->get_cover_state(host->dev)) { omap_hsmmc_protect_card() 1220 if (host->protect_card) { omap_hsmmc_protect_card() 1221 dev_info(host->dev, "%s: cover is closed, " omap_hsmmc_protect_card() 1223 mmc_hostname(host->mmc)); omap_hsmmc_protect_card() 1224 host->protect_card = 0; omap_hsmmc_protect_card() 1227 if (!host->protect_card) { omap_hsmmc_protect_card() 1228 dev_info(host->dev, "%s: cover is open, " omap_hsmmc_protect_card() 1230 mmc_hostname(host->mmc)); omap_hsmmc_protect_card() 1231 host->protect_card = 1; omap_hsmmc_protect_card() 1241 struct omap_hsmmc_host *host = dev_id; omap_hsmmc_cover_irq() local 1243 sysfs_notify(&host->mmc->class_dev.kobj, NULL, "cover_switch"); omap_hsmmc_cover_irq() 1245 omap_hsmmc_protect_card(host); omap_hsmmc_cover_irq() 1246 mmc_detect_change(host->mmc, (HZ * 200) / 1000); omap_hsmmc_cover_irq() 1252 struct omap_hsmmc_host *host = param; omap_hsmmc_dma_callback() local 1257 spin_lock_irq(&host->irq_lock); omap_hsmmc_dma_callback() 1258 if (host->dma_ch < 0) { omap_hsmmc_dma_callback() 1259 spin_unlock_irq(&host->irq_lock); omap_hsmmc_dma_callback() 1263 data = host->mrq->data; omap_hsmmc_dma_callback() 1264 chan = omap_hsmmc_get_dma_chan(host, data); omap_hsmmc_dma_callback() 1268 omap_hsmmc_get_dma_dir(host, data)); omap_hsmmc_dma_callback() 1270 req_in_progress = host->req_in_progress; omap_hsmmc_dma_callback() 1271 host->dma_ch = -1; omap_hsmmc_dma_callback() 1272 spin_unlock_irq(&host->irq_lock); omap_hsmmc_dma_callback() 1276 struct mmc_request *mrq = host->mrq; omap_hsmmc_dma_callback() 1278 host->mrq = NULL; omap_hsmmc_dma_callback() 1279 mmc_request_done(host->mmc, mrq); omap_hsmmc_dma_callback() 1280 pm_runtime_mark_last_busy(host->dev); omap_hsmmc_dma_callback() 1281 pm_runtime_put_autosuspend(host->dev); omap_hsmmc_dma_callback() 1285 static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host, omap_hsmmc_pre_dma_transfer() argument 1293 data->host_cookie != host->next_data.cookie) { omap_hsmmc_pre_dma_transfer() 1294 dev_warn(host->dev, "[%s] invalid cookie: data->host_cookie %d" omap_hsmmc_pre_dma_transfer() 1295 " host->next_data.cookie %d\n", omap_hsmmc_pre_dma_transfer() 1296 __func__, data->host_cookie, host->next_data.cookie); omap_hsmmc_pre_dma_transfer() 1301 if (next || data->host_cookie != host->next_data.cookie) { omap_hsmmc_pre_dma_transfer() 1303 omap_hsmmc_get_dma_dir(host, data)); omap_hsmmc_pre_dma_transfer() 1306 dma_len = host->next_data.dma_len; omap_hsmmc_pre_dma_transfer() 1307 host->next_data.dma_len = 0; omap_hsmmc_pre_dma_transfer() 1318 host->dma_len = dma_len; omap_hsmmc_pre_dma_transfer() 1326 static int omap_hsmmc_setup_dma_transfer(struct omap_hsmmc_host *host, omap_hsmmc_setup_dma_transfer() argument 1349 BUG_ON(host->dma_ch != -1); omap_hsmmc_setup_dma_transfer() 1351 chan = omap_hsmmc_get_dma_chan(host, data); omap_hsmmc_setup_dma_transfer() 1353 cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA; omap_hsmmc_setup_dma_transfer() 1354 cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA; omap_hsmmc_setup_dma_transfer() 1364 ret = omap_hsmmc_pre_dma_transfer(host, data, NULL, chan); omap_hsmmc_setup_dma_transfer() 1372 dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n"); omap_hsmmc_setup_dma_transfer() 1378 tx->callback_param = host; omap_hsmmc_setup_dma_transfer() 1383 host->dma_ch = 1; omap_hsmmc_setup_dma_transfer() 1388 static void set_data_timeout(struct omap_hsmmc_host *host, set_data_timeout() argument 1395 reg = OMAP_HSMMC_READ(host->base, SYSCTL); set_data_timeout() 1400 cycle_ns = 1000000000 / (host->clk_rate / clkd); set_data_timeout() 1422 OMAP_HSMMC_WRITE(host->base, SYSCTL, reg); set_data_timeout() 1425 static void omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host) omap_hsmmc_start_dma_transfer() argument 1427 struct mmc_request *req = host->mrq; omap_hsmmc_start_dma_transfer() 1432 OMAP_HSMMC_WRITE(host->base, BLK, (req->data->blksz) omap_hsmmc_start_dma_transfer() 1434 set_data_timeout(host, req->data->timeout_ns, omap_hsmmc_start_dma_transfer() 1436 chan = omap_hsmmc_get_dma_chan(host, req->data); omap_hsmmc_start_dma_transfer() 1444 omap_hsmmc_prepare_data(struct omap_hsmmc_host *host, struct mmc_request *req) omap_hsmmc_prepare_data() argument 1447 host->data = req->data; omap_hsmmc_prepare_data() 1450 OMAP_HSMMC_WRITE(host->base, BLK, 0); omap_hsmmc_prepare_data() 1456 set_data_timeout(host, 100000000U, 0); omap_hsmmc_prepare_data() 1460 if (host->use_dma) { omap_hsmmc_prepare_data() 1461 ret = omap_hsmmc_setup_dma_transfer(host, req); omap_hsmmc_prepare_data() 1463 dev_err(mmc_dev(host->mmc), "MMC start dma failure\n"); omap_hsmmc_prepare_data() 1473 struct omap_hsmmc_host *host = mmc_priv(mmc); omap_hsmmc_post_req() local 1476 if (host->use_dma && data->host_cookie) { omap_hsmmc_post_req() 1477 struct dma_chan *c = omap_hsmmc_get_dma_chan(host, data); omap_hsmmc_post_req() 1480 omap_hsmmc_get_dma_dir(host, data)); omap_hsmmc_post_req() 1488 struct omap_hsmmc_host *host = mmc_priv(mmc); omap_hsmmc_pre_req() local 1495 if (host->use_dma) { omap_hsmmc_pre_req() 1496 struct dma_chan *c = omap_hsmmc_get_dma_chan(host, mrq->data); omap_hsmmc_pre_req() 1498 if (omap_hsmmc_pre_dma_transfer(host, mrq->data, omap_hsmmc_pre_req() 1499 &host->next_data, c)) omap_hsmmc_pre_req() 1509 struct omap_hsmmc_host *host = mmc_priv(mmc); omap_hsmmc_request() local 1512 BUG_ON(host->req_in_progress); omap_hsmmc_request() 1513 BUG_ON(host->dma_ch != -1); omap_hsmmc_request() 1514 pm_runtime_get_sync(host->dev); omap_hsmmc_request() 1515 if (host->protect_card) { omap_hsmmc_request() 1516 if (host->reqs_blocked < 3) { omap_hsmmc_request() 1522 omap_hsmmc_reset_controller_fsm(host, SRD); omap_hsmmc_request() 1523 omap_hsmmc_reset_controller_fsm(host, SRC); omap_hsmmc_request() 1524 host->reqs_blocked += 1; omap_hsmmc_request() 1531 pm_runtime_mark_last_busy(host->dev); omap_hsmmc_request() 1532 pm_runtime_put_autosuspend(host->dev); omap_hsmmc_request() 1534 } else if (host->reqs_blocked) omap_hsmmc_request() 1535 host->reqs_blocked = 0; omap_hsmmc_request() 1536 WARN_ON(host->mrq != NULL); omap_hsmmc_request() 1537 host->mrq = req; omap_hsmmc_request() 1538 host->clk_rate = clk_get_rate(host->fclk); omap_hsmmc_request() 1539 err = omap_hsmmc_prepare_data(host, req); omap_hsmmc_request() 1544 host->mrq = NULL; omap_hsmmc_request() 1546 pm_runtime_mark_last_busy(host->dev); omap_hsmmc_request() 1547 pm_runtime_put_autosuspend(host->dev); omap_hsmmc_request() 1550 if (req->sbc && !(host->flags & AUTO_CMD23)) { omap_hsmmc_request() 1551 omap_hsmmc_start_command(host, req->sbc, NULL); omap_hsmmc_request() 1555 omap_hsmmc_start_dma_transfer(host); omap_hsmmc_request() 1556 omap_hsmmc_start_command(host, req->cmd, req->data); omap_hsmmc_request() 1562 struct omap_hsmmc_host *host = mmc_priv(mmc); omap_hsmmc_set_ios() local 1565 pm_runtime_get_sync(host->dev); omap_hsmmc_set_ios() 1567 if (ios->power_mode != host->power_mode) { omap_hsmmc_set_ios() 1570 mmc_pdata(host)->set_power(host->dev, 0, 0); omap_hsmmc_set_ios() 1573 mmc_pdata(host)->set_power(host->dev, 1, ios->vdd); omap_hsmmc_set_ios() 1579 host->power_mode = ios->power_mode; omap_hsmmc_set_ios() 1584 omap_hsmmc_set_bus_width(host); omap_hsmmc_set_ios() 1586 if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) { omap_hsmmc_set_ios() 1590 if ((OMAP_HSMMC_READ(host->base, HCTL) & SDVSDET) && omap_hsmmc_set_ios() 1598 if (omap_hsmmc_switch_opcond(host, ios->vdd) != 0) omap_hsmmc_set_ios() 1599 dev_dbg(mmc_dev(host->mmc), omap_hsmmc_set_ios() 1604 omap_hsmmc_set_clock(host); omap_hsmmc_set_ios() 1607 send_init_stream(host); omap_hsmmc_set_ios() 1609 omap_hsmmc_set_bus_mode(host); omap_hsmmc_set_ios() 1611 pm_runtime_put_autosuspend(host->dev); omap_hsmmc_set_ios() 1616 struct omap_hsmmc_host *host = mmc_priv(mmc); omap_hsmmc_get_cd() local 1618 if (!host->card_detect) omap_hsmmc_get_cd() 1620 return host->card_detect(host->dev); omap_hsmmc_get_cd() 1625 struct omap_hsmmc_host *host = mmc_priv(mmc); omap_hsmmc_init_card() local 1627 if (mmc_pdata(host)->init_card) omap_hsmmc_init_card() 1628 mmc_pdata(host)->init_card(card); omap_hsmmc_init_card() 1633 struct omap_hsmmc_host *host = mmc_priv(mmc); omap_hsmmc_enable_sdio_irq() local 1637 spin_lock_irqsave(&host->irq_lock, flags); omap_hsmmc_enable_sdio_irq() 1639 con = OMAP_HSMMC_READ(host->base, CON); omap_hsmmc_enable_sdio_irq() 1640 irq_mask = OMAP_HSMMC_READ(host->base, ISE); omap_hsmmc_enable_sdio_irq() 1642 host->flags |= HSMMC_SDIO_IRQ_ENABLED; omap_hsmmc_enable_sdio_irq() 1646 host->flags &= ~HSMMC_SDIO_IRQ_ENABLED; omap_hsmmc_enable_sdio_irq() 1650 OMAP_HSMMC_WRITE(host->base, CON, con); omap_hsmmc_enable_sdio_irq() 1651 OMAP_HSMMC_WRITE(host->base, IE, irq_mask); omap_hsmmc_enable_sdio_irq() 1657 if (!host->req_in_progress || !enable) omap_hsmmc_enable_sdio_irq() 1658 OMAP_HSMMC_WRITE(host->base, ISE, irq_mask); omap_hsmmc_enable_sdio_irq() 1661 OMAP_HSMMC_READ(host->base, IE); omap_hsmmc_enable_sdio_irq() 1663 spin_unlock_irqrestore(&host->irq_lock, flags); omap_hsmmc_enable_sdio_irq() 1666 static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host) omap_hsmmc_configure_wake_irq() argument 1668 struct mmc_host *mmc = host->mmc; omap_hsmmc_configure_wake_irq() 1677 if (!host->dev->of_node || !host->wake_irq) omap_hsmmc_configure_wake_irq() 1681 irq_set_status_flags(host->wake_irq, IRQ_NOAUTOEN); omap_hsmmc_configure_wake_irq() 1682 ret = devm_request_irq(host->dev, host->wake_irq, omap_hsmmc_wake_irq, omap_hsmmc_configure_wake_irq() 1684 mmc_hostname(mmc), host); omap_hsmmc_configure_wake_irq() 1686 dev_err(mmc_dev(host->mmc), "Unable to request wake IRQ\n"); omap_hsmmc_configure_wake_irq() 1694 if (host->pdata->controller_flags & OMAP_HSMMC_SWAKEUP_MISSING) { omap_hsmmc_configure_wake_irq() 1695 struct pinctrl *p = devm_pinctrl_get(host->dev); omap_hsmmc_configure_wake_irq() 1701 dev_info(host->dev, "missing default pinctrl state\n"); omap_hsmmc_configure_wake_irq() 1708 dev_info(host->dev, "missing idle pinctrl state\n"); omap_hsmmc_configure_wake_irq() 1716 OMAP_HSMMC_WRITE(host->base, HCTL, omap_hsmmc_configure_wake_irq() 1717 OMAP_HSMMC_READ(host->base, HCTL) | IWE); omap_hsmmc_configure_wake_irq() 1721 devm_free_irq(host->dev, host->wake_irq, host); omap_hsmmc_configure_wake_irq() 1723 dev_warn(host->dev, "no SDIO IRQ support, falling back to polling\n"); omap_hsmmc_configure_wake_irq() 1724 host->wake_irq = 0; omap_hsmmc_configure_wake_irq() 1728 static void omap_hsmmc_conf_bus_power(struct omap_hsmmc_host *host) omap_hsmmc_conf_bus_power() argument 1733 if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) { omap_hsmmc_conf_bus_power() 1741 value = OMAP_HSMMC_READ(host->base, HCTL) & ~SDVS_MASK; omap_hsmmc_conf_bus_power() 1742 OMAP_HSMMC_WRITE(host->base, HCTL, value | hctl); omap_hsmmc_conf_bus_power() 1744 value = OMAP_HSMMC_READ(host->base, CAPA); omap_hsmmc_conf_bus_power() 1745 OMAP_HSMMC_WRITE(host->base, CAPA, value | capa); omap_hsmmc_conf_bus_power() 1748 set_sd_bus_power(host); omap_hsmmc_conf_bus_power() 1777 struct omap_hsmmc_host *host = mmc_priv(mmc); omap_hsmmc_regs_show() local 1785 (host->flags & HSMMC_SDIO_IRQ_ENABLED) ? "enabled" omap_hsmmc_regs_show() 1788 seq_printf(s, "ctx_loss:\t%d\n", host->context_loss); omap_hsmmc_regs_show() 1790 pm_runtime_get_sync(host->dev); omap_hsmmc_regs_show() 1793 OMAP_HSMMC_READ(host->base, CON)); omap_hsmmc_regs_show() 1795 OMAP_HSMMC_READ(host->base, PSTATE)); omap_hsmmc_regs_show() 1797 OMAP_HSMMC_READ(host->base, HCTL)); omap_hsmmc_regs_show() 1799 OMAP_HSMMC_READ(host->base, SYSCTL)); omap_hsmmc_regs_show() 1801 OMAP_HSMMC_READ(host->base, IE)); omap_hsmmc_regs_show() 1803 OMAP_HSMMC_READ(host->base, ISE)); omap_hsmmc_regs_show() 1805 OMAP_HSMMC_READ(host->base, CAPA)); omap_hsmmc_regs_show() 1807 pm_runtime_mark_last_busy(host->dev); omap_hsmmc_regs_show() 1808 pm_runtime_put_autosuspend(host->dev); omap_hsmmc_regs_show() 1918 struct omap_hsmmc_host *host = NULL; omap_hsmmc_probe() local 1965 host = mmc_priv(mmc); omap_hsmmc_probe() 1966 host->mmc = mmc; omap_hsmmc_probe() 1967 host->pdata = pdata; omap_hsmmc_probe() 1968 host->dev = &pdev->dev; omap_hsmmc_probe() 1969 host->use_dma = 1; omap_hsmmc_probe() 1970 host->dma_ch = -1; omap_hsmmc_probe() 1971 host->irq = irq; omap_hsmmc_probe() 1972 host->mapbase = res->start + pdata->reg_offset; omap_hsmmc_probe() 1973 host->base = base + pdata->reg_offset; omap_hsmmc_probe() 1974 host->power_mode = MMC_POWER_OFF; omap_hsmmc_probe() 1975 host->next_data.cookie = 1; omap_hsmmc_probe() 1976 host->pbias_enabled = 0; omap_hsmmc_probe() 1978 ret = omap_hsmmc_gpio_init(mmc, host, pdata); omap_hsmmc_probe() 1982 platform_set_drvdata(pdev, host); omap_hsmmc_probe() 1985 host->wake_irq = irq_of_parse_and_map(pdev->dev.of_node, 1); omap_hsmmc_probe() 1996 spin_lock_init(&host->irq_lock); omap_hsmmc_probe() 1998 host->fclk = devm_clk_get(&pdev->dev, "fck"); omap_hsmmc_probe() 1999 if (IS_ERR(host->fclk)) { omap_hsmmc_probe() 2000 ret = PTR_ERR(host->fclk); omap_hsmmc_probe() 2001 host->fclk = NULL; omap_hsmmc_probe() 2005 if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) { omap_hsmmc_probe() 2010 pm_runtime_enable(host->dev); omap_hsmmc_probe() 2011 pm_runtime_get_sync(host->dev); omap_hsmmc_probe() 2012 pm_runtime_set_autosuspend_delay(host->dev, MMC_AUTOSUSPEND_DELAY); omap_hsmmc_probe() 2013 pm_runtime_use_autosuspend(host->dev); omap_hsmmc_probe() 2015 omap_hsmmc_context_save(host); omap_hsmmc_probe() 2017 host->dbclk = devm_clk_get(&pdev->dev, "mmchsdb_fck"); omap_hsmmc_probe() 2021 if (IS_ERR(host->dbclk)) { omap_hsmmc_probe() 2022 host->dbclk = NULL; omap_hsmmc_probe() 2023 } else if (clk_prepare_enable(host->dbclk) != 0) { omap_hsmmc_probe() 2024 dev_warn(mmc_dev(host->mmc), "Failed to enable debounce clk\n"); omap_hsmmc_probe() 2025 host->dbclk = NULL; omap_hsmmc_probe() 2040 mmc->caps |= mmc_pdata(host)->caps; omap_hsmmc_probe() 2044 if (mmc_pdata(host)->nonremovable) omap_hsmmc_probe() 2047 mmc->pm_caps |= mmc_pdata(host)->pm_caps; omap_hsmmc_probe() 2049 omap_hsmmc_conf_bus_power(host); omap_hsmmc_probe() 2054 dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n"); omap_hsmmc_probe() 2062 dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n"); omap_hsmmc_probe() 2072 host->rx_chan = omap_hsmmc_probe() 2076 if (!host->rx_chan) { omap_hsmmc_probe() 2077 dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req); omap_hsmmc_probe() 2082 host->tx_chan = omap_hsmmc_probe() 2086 if (!host->tx_chan) { omap_hsmmc_probe() 2087 dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req); omap_hsmmc_probe() 2093 ret = devm_request_irq(&pdev->dev, host->irq, omap_hsmmc_irq, 0, omap_hsmmc_probe() 2094 mmc_hostname(mmc), host); omap_hsmmc_probe() 2096 dev_err(mmc_dev(host->mmc), "Unable to grab HSMMC IRQ\n"); omap_hsmmc_probe() 2100 if (omap_hsmmc_have_reg() && !mmc_pdata(host)->set_power) { omap_hsmmc_probe() 2101 ret = omap_hsmmc_reg_get(host); omap_hsmmc_probe() 2104 host->use_reg = 1; omap_hsmmc_probe() 2107 mmc->ocr_avail = mmc_pdata(host)->ocr_mask; omap_hsmmc_probe() 2109 omap_hsmmc_disable_irq(host); omap_hsmmc_probe() 2119 ret = omap_hsmmc_configure_wake_irq(host); omap_hsmmc_probe() 2123 omap_hsmmc_protect_card(host); omap_hsmmc_probe() 2127 if (mmc_pdata(host)->name != NULL) { omap_hsmmc_probe() 2132 if (host->get_cover_state) { omap_hsmmc_probe() 2140 pm_runtime_mark_last_busy(host->dev); omap_hsmmc_probe() 2141 pm_runtime_put_autosuspend(host->dev); omap_hsmmc_probe() 2147 if (host->use_reg) omap_hsmmc_probe() 2148 omap_hsmmc_reg_put(host); omap_hsmmc_probe() 2150 if (host->tx_chan) omap_hsmmc_probe() 2151 dma_release_channel(host->tx_chan); omap_hsmmc_probe() 2152 if (host->rx_chan) omap_hsmmc_probe() 2153 dma_release_channel(host->rx_chan); omap_hsmmc_probe() 2154 pm_runtime_put_sync(host->dev); omap_hsmmc_probe() 2155 pm_runtime_disable(host->dev); omap_hsmmc_probe() 2156 if (host->dbclk) omap_hsmmc_probe() 2157 clk_disable_unprepare(host->dbclk); omap_hsmmc_probe() 2167 struct omap_hsmmc_host *host = platform_get_drvdata(pdev); omap_hsmmc_remove() local 2169 pm_runtime_get_sync(host->dev); omap_hsmmc_remove() 2170 mmc_remove_host(host->mmc); omap_hsmmc_remove() 2171 if (host->use_reg) omap_hsmmc_remove() 2172 omap_hsmmc_reg_put(host); omap_hsmmc_remove() 2174 if (host->tx_chan) omap_hsmmc_remove() 2175 dma_release_channel(host->tx_chan); omap_hsmmc_remove() 2176 if (host->rx_chan) omap_hsmmc_remove() 2177 dma_release_channel(host->rx_chan); omap_hsmmc_remove() 2179 pm_runtime_put_sync(host->dev); omap_hsmmc_remove() 2180 pm_runtime_disable(host->dev); omap_hsmmc_remove() 2181 if (host->dbclk) omap_hsmmc_remove() 2182 clk_disable_unprepare(host->dbclk); omap_hsmmc_remove() 2184 mmc_free_host(host->mmc); omap_hsmmc_remove() 2192 struct omap_hsmmc_host *host = dev_get_drvdata(dev); omap_hsmmc_suspend() local 2194 if (!host) omap_hsmmc_suspend() 2197 pm_runtime_get_sync(host->dev); omap_hsmmc_suspend() 2199 if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER)) { omap_hsmmc_suspend() 2200 OMAP_HSMMC_WRITE(host->base, ISE, 0); omap_hsmmc_suspend() 2201 OMAP_HSMMC_WRITE(host->base, IE, 0); omap_hsmmc_suspend() 2202 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); omap_hsmmc_suspend() 2203 OMAP_HSMMC_WRITE(host->base, HCTL, omap_hsmmc_suspend() 2204 OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP); omap_hsmmc_suspend() 2208 if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) && omap_hsmmc_suspend() 2209 !(host->mmc->pm_flags & MMC_PM_WAKE_SDIO_IRQ)) omap_hsmmc_suspend() 2210 disable_irq(host->wake_irq); omap_hsmmc_suspend() 2212 if (host->dbclk) omap_hsmmc_suspend() 2213 clk_disable_unprepare(host->dbclk); omap_hsmmc_suspend() 2215 pm_runtime_put_sync(host->dev); omap_hsmmc_suspend() 2222 struct omap_hsmmc_host *host = dev_get_drvdata(dev); omap_hsmmc_resume() local 2224 if (!host) omap_hsmmc_resume() 2227 pm_runtime_get_sync(host->dev); omap_hsmmc_resume() 2229 if (host->dbclk) omap_hsmmc_resume() 2230 clk_prepare_enable(host->dbclk); omap_hsmmc_resume() 2232 if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER)) omap_hsmmc_resume() 2233 omap_hsmmc_conf_bus_power(host); omap_hsmmc_resume() 2235 omap_hsmmc_protect_card(host); omap_hsmmc_resume() 2237 if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) && omap_hsmmc_resume() 2238 !(host->mmc->pm_flags & MMC_PM_WAKE_SDIO_IRQ)) omap_hsmmc_resume() 2239 enable_irq(host->wake_irq); omap_hsmmc_resume() 2241 pm_runtime_mark_last_busy(host->dev); omap_hsmmc_resume() 2242 pm_runtime_put_autosuspend(host->dev); omap_hsmmc_resume() 2249 struct omap_hsmmc_host *host; omap_hsmmc_runtime_suspend() local 2253 host = platform_get_drvdata(to_platform_device(dev)); omap_hsmmc_runtime_suspend() 2254 omap_hsmmc_context_save(host); omap_hsmmc_runtime_suspend() 2257 spin_lock_irqsave(&host->irq_lock, flags); omap_hsmmc_runtime_suspend() 2258 if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) && omap_hsmmc_runtime_suspend() 2259 (host->flags & HSMMC_SDIO_IRQ_ENABLED)) { omap_hsmmc_runtime_suspend() 2261 OMAP_HSMMC_WRITE(host->base, ISE, 0); omap_hsmmc_runtime_suspend() 2262 OMAP_HSMMC_WRITE(host->base, IE, 0); omap_hsmmc_runtime_suspend() 2264 if (!(OMAP_HSMMC_READ(host->base, PSTATE) & DLEV_DAT(1))) { omap_hsmmc_runtime_suspend() 2271 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); omap_hsmmc_runtime_suspend() 2272 OMAP_HSMMC_WRITE(host->base, ISE, CIRQ_EN); omap_hsmmc_runtime_suspend() 2273 OMAP_HSMMC_WRITE(host->base, IE, CIRQ_EN); omap_hsmmc_runtime_suspend() 2281 WARN_ON(host->flags & HSMMC_WAKE_IRQ_ENABLED); omap_hsmmc_runtime_suspend() 2282 enable_irq(host->wake_irq); omap_hsmmc_runtime_suspend() 2283 host->flags |= HSMMC_WAKE_IRQ_ENABLED; omap_hsmmc_runtime_suspend() 2289 spin_unlock_irqrestore(&host->irq_lock, flags); omap_hsmmc_runtime_suspend() 2295 struct omap_hsmmc_host *host; omap_hsmmc_runtime_resume() local 2298 host = platform_get_drvdata(to_platform_device(dev)); omap_hsmmc_runtime_resume() 2299 omap_hsmmc_context_restore(host); omap_hsmmc_runtime_resume() 2302 spin_lock_irqsave(&host->irq_lock, flags); omap_hsmmc_runtime_resume() 2303 if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) && omap_hsmmc_runtime_resume() 2304 (host->flags & HSMMC_SDIO_IRQ_ENABLED)) { omap_hsmmc_runtime_resume() 2306 if (host->flags & HSMMC_WAKE_IRQ_ENABLED) { omap_hsmmc_runtime_resume() 2307 disable_irq_nosync(host->wake_irq); omap_hsmmc_runtime_resume() 2308 host->flags &= ~HSMMC_WAKE_IRQ_ENABLED; omap_hsmmc_runtime_resume() 2311 pinctrl_pm_select_default_state(host->dev); omap_hsmmc_runtime_resume() 2314 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); omap_hsmmc_runtime_resume() 2315 OMAP_HSMMC_WRITE(host->base, ISE, CIRQ_EN); omap_hsmmc_runtime_resume() 2316 OMAP_HSMMC_WRITE(host->base, IE, CIRQ_EN); omap_hsmmc_runtime_resume() 2318 pinctrl_pm_select_default_state(host->dev); omap_hsmmc_runtime_resume() 2320 spin_unlock_irqrestore(&host->irq_lock, flags); omap_hsmmc_runtime_resume() 423 omap_hsmmc_gpio_init(struct mmc_host *mmc, struct omap_hsmmc_host *host, struct omap_hsmmc_platform_data *pdata) omap_hsmmc_gpio_init() argument
|
H A D | toshsd.c | 24 #include <linux/mmc/host.h> 38 static void toshsd_init(struct toshsd_host *host) toshsd_init() argument 41 pci_write_config_byte(host->pdev, SD_PCICFG_CLKSTOP, toshsd_init() 43 pci_write_config_byte(host->pdev, SD_PCICFG_CARDDETECT, 2); toshsd_init() 46 iowrite16(0, host->ioaddr + SD_SOFTWARERESET); /* assert */ toshsd_init() 48 iowrite16(1, host->ioaddr + SD_SOFTWARERESET); /* deassert */ toshsd_init() 52 iowrite16(0, host->ioaddr + SD_CARDCLOCKCTRL); toshsd_init() 53 iowrite32(0, host->ioaddr + SD_CARDSTATUS); toshsd_init() 54 iowrite32(0, host->ioaddr + SD_ERRORSTATUS0); toshsd_init() 55 iowrite16(0, host->ioaddr + SD_STOPINTERNAL); toshsd_init() 58 iowrite16(0x100, host->ioaddr + SDIO_BASE + SDIO_CLOCKNWAITCTRL); toshsd_init() 61 pci_write_config_byte(host->pdev, SD_PCICFG_SDLED_ENABLE1, toshsd_init() 63 pci_write_config_byte(host->pdev, SD_PCICFG_SDLED_ENABLE2, toshsd_init() 71 host->ioaddr + SD_INTMASKCARD); toshsd_init() 73 iowrite16(0x1000, host->ioaddr + SD_TRANSACTIONCTRL); toshsd_init() 83 struct toshsd_host *host = mmc_priv(mmc); __toshsd_set_ios() local 95 pci_write_config_byte(host->pdev, SD_PCICFG_CLKMODE, __toshsd_set_ios() 99 pci_write_config_byte(host->pdev, SD_PCICFG_CLKMODE, 0); __toshsd_set_ios() 102 iowrite16(clk, host->ioaddr + SD_CARDCLOCKCTRL); __toshsd_set_ios() 106 iowrite16(0, host->ioaddr + SD_CARDCLOCKCTRL); __toshsd_set_ios() 110 pci_write_config_byte(host->pdev, SD_PCICFG_POWER1, __toshsd_set_ios() 117 pci_write_config_byte(host->pdev, SD_PCICFG_POWER1, __toshsd_set_ios() 119 pci_write_config_byte(host->pdev, SD_PCICFG_POWER2, __toshsd_set_ios() 130 host->ioaddr + SD_CARDOPTIONSETUP); __toshsd_set_ios() 136 host->ioaddr + SD_CARDOPTIONSETUP); __toshsd_set_ios() 141 static void toshsd_set_led(struct toshsd_host *host, unsigned char state) toshsd_set_led() argument 143 iowrite16(state, host->ioaddr + SDIO_BASE + SDIO_LEDCTRL); toshsd_set_led() 146 static void toshsd_finish_request(struct toshsd_host *host) toshsd_finish_request() argument 148 struct mmc_request *mrq = host->mrq; toshsd_finish_request() 151 host->mrq = NULL; toshsd_finish_request() 152 host->cmd = NULL; toshsd_finish_request() 153 host->data = NULL; toshsd_finish_request() 155 toshsd_set_led(host, 0); toshsd_finish_request() 156 mmc_request_done(host->mmc, mrq); toshsd_finish_request() 161 struct toshsd_host *host = dev_id; toshsd_thread_irq() local 162 struct mmc_data *data = host->data; toshsd_thread_irq() 163 struct sg_mapping_iter *sg_miter = &host->sg_miter; toshsd_thread_irq() 169 dev_warn(&host->pdev->dev, "Spurious Data IRQ\n"); toshsd_thread_irq() 170 if (host->cmd) { toshsd_thread_irq() 171 host->cmd->error = -EIO; toshsd_thread_irq() 172 toshsd_finish_request(host); toshsd_thread_irq() 176 spin_lock_irqsave(&host->lock, flags); toshsd_thread_irq() 190 dev_dbg(&host->pdev->dev, "count: %08x, flags %08x\n", count, toshsd_thread_irq() 195 ioread32_rep(host->ioaddr + SD_DATAPORT, buf, count >> 2); toshsd_thread_irq() 197 iowrite32_rep(host->ioaddr + SD_DATAPORT, buf, count >> 2); toshsd_thread_irq() 203 spin_unlock_irqrestore(&host->lock, flags); toshsd_thread_irq() 208 static void toshsd_cmd_irq(struct toshsd_host *host) toshsd_cmd_irq() argument 210 struct mmc_command *cmd = host->cmd; toshsd_cmd_irq() 214 if (!host->cmd) { toshsd_cmd_irq() 215 dev_warn(&host->pdev->dev, "Spurious CMD irq\n"); toshsd_cmd_irq() 219 host->cmd = NULL; toshsd_cmd_irq() 224 data = ioread16(host->ioaddr + SD_RESPONSE0); toshsd_cmd_irq() 227 data = ioread16(host->ioaddr + SD_RESPONSE1); toshsd_cmd_irq() 230 data = ioread16(host->ioaddr + SD_RESPONSE2); toshsd_cmd_irq() 233 data = ioread16(host->ioaddr + SD_RESPONSE3); toshsd_cmd_irq() 236 data = ioread16(host->ioaddr + SD_RESPONSE4); toshsd_cmd_irq() 239 data = ioread16(host->ioaddr + SD_RESPONSE5); toshsd_cmd_irq() 242 data = ioread16(host->ioaddr + SD_RESPONSE6); toshsd_cmd_irq() 245 data = ioread16(host->ioaddr + SD_RESPONSE7); toshsd_cmd_irq() 249 data = ioread16(host->ioaddr + SD_RESPONSE0); toshsd_cmd_irq() 252 data = ioread16(host->ioaddr + SD_RESPONSE1); toshsd_cmd_irq() 257 dev_dbg(&host->pdev->dev, "Command IRQ complete %d %d %x\n", toshsd_cmd_irq() 262 if (host->data) toshsd_cmd_irq() 265 toshsd_finish_request(host); toshsd_cmd_irq() 268 static void toshsd_data_end_irq(struct toshsd_host *host) toshsd_data_end_irq() argument 270 struct mmc_data *data = host->data; toshsd_data_end_irq() 272 host->data = NULL; toshsd_data_end_irq() 275 dev_warn(&host->pdev->dev, "Spurious data end IRQ\n"); toshsd_data_end_irq() 284 dev_dbg(&host->pdev->dev, "Completed data request xfr=%d\n", toshsd_data_end_irq() 287 iowrite16(0, host->ioaddr + SD_STOPINTERNAL); toshsd_data_end_irq() 289 toshsd_finish_request(host); toshsd_data_end_irq() 294 struct toshsd_host *host = dev_id; toshsd_irq() local 298 spin_lock(&host->lock); toshsd_irq() 299 int_status = ioread32(host->ioaddr + SD_CARDSTATUS); toshsd_irq() 300 int_mask = ioread32(host->ioaddr + SD_INTMASKCARD); toshsd_irq() 303 dev_dbg(&host->pdev->dev, "IRQ status:%x mask:%x\n", toshsd_irq() 314 dev_dbg(&host->pdev->dev, "Timeout\n"); toshsd_irq() 317 dev_err(&host->pdev->dev, "BadCRC\n"); toshsd_irq() 324 dev_err(&host->pdev->dev, "Buffer status error: { %s%s%s%s%s%s}\n", toshsd_irq() 332 detail = ioread32(host->ioaddr + SD_ERRORSTATUS0); toshsd_irq() 333 dev_err(&host->pdev->dev, "detail error status { %s%s%s%s%s%s%s%s%s%s%s%s%s}\n", toshsd_irq() 351 if (host->cmd) toshsd_irq() 352 host->cmd->error = error; toshsd_irq() 357 host->ioaddr + SD_CARDSTATUS); toshsd_irq() 359 toshsd_init(host); toshsd_irq() 360 __toshsd_set_ios(host->mmc, &host->mmc->ios); toshsd_irq() 369 host->ioaddr + SD_CARDSTATUS); toshsd_irq() 372 toshsd_init(host); toshsd_irq() 374 mmc_detect_change(host->mmc, 1); toshsd_irq() 381 host->ioaddr + SD_CARDSTATUS); toshsd_irq() 390 host->ioaddr + SD_CARDSTATUS); toshsd_irq() 391 toshsd_cmd_irq(host); toshsd_irq() 397 host->ioaddr + SD_CARDSTATUS); toshsd_irq() 398 toshsd_data_end_irq(host); toshsd_irq() 401 spin_unlock(&host->lock); toshsd_irq() 405 static void toshsd_start_cmd(struct toshsd_host *host, struct mmc_command *cmd) toshsd_start_cmd() argument 407 struct mmc_data *data = host->data; toshsd_start_cmd() 410 dev_dbg(&host->pdev->dev, "Command opcode: %d\n", cmd->opcode); toshsd_start_cmd() 414 host->ioaddr + SD_STOPINTERNAL); toshsd_start_cmd() 421 toshsd_finish_request(host); toshsd_start_cmd() 444 dev_err(&host->pdev->dev, "Unknown response type %d\n", toshsd_start_cmd() 449 host->cmd = cmd; toshsd_start_cmd() 462 host->ioaddr + SD_STOPINTERNAL); toshsd_start_cmd() 473 iowrite32(cmd->arg, host->ioaddr + SD_ARG0); toshsd_start_cmd() 474 iowrite16(c, host->ioaddr + SD_CMD); toshsd_start_cmd() 477 static void toshsd_start_data(struct toshsd_host *host, struct mmc_data *data) toshsd_start_data() argument 481 dev_dbg(&host->pdev->dev, "setup data transfer: blocksize %08x nr_blocks %d, offset: %08x\n", toshsd_start_data() 484 host->data = data; toshsd_start_data() 491 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); toshsd_start_data() 494 iowrite16(data->blocks, host->ioaddr + SD_BLOCKCOUNT); toshsd_start_data() 495 iowrite16(data->blksz, host->ioaddr + SD_CARDXFERDATALEN); toshsd_start_data() 501 struct toshsd_host *host = mmc_priv(mmc); toshsd_request() local 505 if (!(ioread16(host->ioaddr + SD_CARDSTATUS) & SD_CARD_PRESENT_0)) { toshsd_request() 511 spin_lock_irqsave(&host->lock, flags); toshsd_request() 513 WARN_ON(host->mrq != NULL); toshsd_request() 515 host->mrq = mrq; toshsd_request() 518 toshsd_start_data(host, mrq->data); toshsd_request() 520 toshsd_set_led(host, 1); toshsd_request() 522 toshsd_start_cmd(host, mrq->cmd); toshsd_request() 524 spin_unlock_irqrestore(&host->lock, flags); toshsd_request() 529 struct toshsd_host *host = mmc_priv(mmc); toshsd_set_ios() local 532 spin_lock_irqsave(&host->lock, flags); toshsd_set_ios() 534 spin_unlock_irqrestore(&host->lock, flags); toshsd_set_ios() 539 struct toshsd_host *host = mmc_priv(mmc); toshsd_get_ro() local 542 return !(ioread16(host->ioaddr + SD_CARDSTATUS) & SD_CARD_WRITE_PROTECT); toshsd_get_ro() 547 struct toshsd_host *host = mmc_priv(mmc); toshsd_get_cd() local 549 return !!(ioread16(host->ioaddr + SD_CARDSTATUS) & SD_CARD_PRESENT_0); toshsd_get_cd() 560 static void toshsd_powerdown(struct toshsd_host *host) toshsd_powerdown() argument 563 iowrite32(0xffffffff, host->ioaddr + SD_INTMASKCARD); toshsd_powerdown() 565 iowrite16(0x000, host->ioaddr + SDIO_BASE + SDIO_CLOCKNWAITCTRL); toshsd_powerdown() 566 iowrite16(0, host->ioaddr + SD_CARDCLOCKCTRL); toshsd_powerdown() 568 pci_write_config_byte(host->pdev, SD_PCICFG_POWER1, SD_PCICFG_PWR1_OFF); toshsd_powerdown() 570 pci_write_config_byte(host->pdev, SD_PCICFG_CLKSTOP, 0); toshsd_powerdown() 577 struct toshsd_host *host = pci_get_drvdata(pdev); toshsd_pm_suspend() local 579 toshsd_powerdown(host); toshsd_pm_suspend() 592 struct toshsd_host *host = pci_get_drvdata(pdev); toshsd_pm_resume() local 601 toshsd_init(host); toshsd_pm_resume() 610 struct toshsd_host *host; toshsd_probe() local 624 host = mmc_priv(mmc); toshsd_probe() 625 host->mmc = mmc; toshsd_probe() 627 host->pdev = pdev; toshsd_probe() 628 pci_set_drvdata(pdev, host); toshsd_probe() 634 host->ioaddr = pci_iomap(pdev, 0, 0); toshsd_probe() 635 if (!host->ioaddr) { toshsd_probe() 640 /* Set MMC host parameters */ toshsd_probe() 648 spin_lock_init(&host->lock); toshsd_probe() 650 toshsd_init(host); toshsd_probe() 653 IRQF_SHARED, DRIVER_NAME, host); toshsd_probe() 667 pci_iounmap(pdev, host->ioaddr); toshsd_probe() 680 struct toshsd_host *host = pci_get_drvdata(pdev); toshsd_remove() local 682 mmc_remove_host(host->mmc); toshsd_remove() 683 toshsd_powerdown(host); toshsd_remove() 684 free_irq(pdev->irq, host); toshsd_remove() 685 pci_iounmap(pdev, host->ioaddr); toshsd_remove() 687 mmc_free_host(host->mmc); toshsd_remove()
|
H A D | davinci_mmc.c | 29 #include <linux/mmc/host.h> 231 static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host) mmc_davinci_sg_to_buf() argument 233 host->buffer_bytes_left = sg_dma_len(host->sg); mmc_davinci_sg_to_buf() 234 host->buffer = sg_virt(host->sg); mmc_davinci_sg_to_buf() 235 if (host->buffer_bytes_left > host->bytes_left) mmc_davinci_sg_to_buf() 236 host->buffer_bytes_left = host->bytes_left; mmc_davinci_sg_to_buf() 239 static void davinci_fifo_data_trans(struct mmc_davinci_host *host, davinci_fifo_data_trans() argument 245 if (host->buffer_bytes_left == 0) { davinci_fifo_data_trans() 246 host->sg = sg_next(host->data->sg); davinci_fifo_data_trans() 247 mmc_davinci_sg_to_buf(host); davinci_fifo_data_trans() 250 p = host->buffer; davinci_fifo_data_trans() 251 if (n > host->buffer_bytes_left) davinci_fifo_data_trans() 252 n = host->buffer_bytes_left; davinci_fifo_data_trans() 253 host->buffer_bytes_left -= n; davinci_fifo_data_trans() 254 host->bytes_left -= n; davinci_fifo_data_trans() 260 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { davinci_fifo_data_trans() 262 writel(*((u32 *)p), host->base + DAVINCI_MMCDXR); davinci_fifo_data_trans() 266 iowrite8_rep(host->base + DAVINCI_MMCDXR, p, (n & 3)); davinci_fifo_data_trans() 271 *((u32 *)p) = readl(host->base + DAVINCI_MMCDRR); davinci_fifo_data_trans() 275 ioread8_rep(host->base + DAVINCI_MMCDRR, p, (n & 3)); davinci_fifo_data_trans() 279 host->buffer = p; davinci_fifo_data_trans() 282 static void mmc_davinci_start_command(struct mmc_davinci_host *host, mmc_davinci_start_command() argument 288 dev_dbg(mmc_dev(host->mmc), "CMD%d, arg 0x%08x%s\n", mmc_davinci_start_command() 308 host->cmd = cmd; mmc_davinci_start_command() 329 dev_dbg(mmc_dev(host->mmc), "unknown resp_type %04x\n", mmc_davinci_start_command() 338 if (host->do_dma) mmc_davinci_start_command() 341 if (host->version == MMC_CTLR_VERSION_2 && host->data != NULL && mmc_davinci_start_command() 342 host->data_dir == DAVINCI_MMC_DATADIR_READ) mmc_davinci_start_command() 354 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) mmc_davinci_start_command() 357 if (host->bus_mode == MMC_BUSMODE_PUSHPULL) mmc_davinci_start_command() 361 writel(0x1FFF, host->base + DAVINCI_MMCTOR); mmc_davinci_start_command() 365 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { mmc_davinci_start_command() 368 if (!host->do_dma) mmc_davinci_start_command() 370 } else if (host->data_dir == DAVINCI_MMC_DATADIR_READ) { mmc_davinci_start_command() 373 if (!host->do_dma) mmc_davinci_start_command() 381 if (!host->do_dma && (host->data_dir == DAVINCI_MMC_DATADIR_WRITE)) mmc_davinci_start_command() 382 davinci_fifo_data_trans(host, rw_threshold); mmc_davinci_start_command() 384 writel(cmd->arg, host->base + DAVINCI_MMCARGHL); mmc_davinci_start_command() 385 writel(cmd_reg, host->base + DAVINCI_MMCCMD); mmc_davinci_start_command() 387 host->active_request = true; mmc_davinci_start_command() 389 if (!host->do_dma && host->bytes_left <= poll_threshold) { mmc_davinci_start_command() 392 while (host->active_request && count--) { mmc_davinci_start_command() 393 mmc_davinci_irq(0, host); mmc_davinci_start_command() 398 if (host->active_request) mmc_davinci_start_command() 399 writel(im_val, host->base + DAVINCI_MMCIM); mmc_davinci_start_command() 406 static void davinci_abort_dma(struct mmc_davinci_host *host) davinci_abort_dma() argument 410 if (host->data_dir == DAVINCI_MMC_DATADIR_READ) davinci_abort_dma() 411 sync_dev = host->dma_rx; davinci_abort_dma() 413 sync_dev = host->dma_tx; davinci_abort_dma() 418 static int mmc_davinci_send_dma_request(struct mmc_davinci_host *host, mmc_davinci_send_dma_request() argument 425 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { mmc_davinci_send_dma_request() 428 .dst_addr = host->mem_res->start + DAVINCI_MMCDXR, mmc_davinci_send_dma_request() 433 chan = host->dma_tx; mmc_davinci_send_dma_request() 434 dmaengine_slave_config(host->dma_tx, &dma_tx_conf); mmc_davinci_send_dma_request() 436 desc = dmaengine_prep_slave_sg(host->dma_tx, mmc_davinci_send_dma_request() 438 host->sg_len, mmc_davinci_send_dma_request() 442 dev_dbg(mmc_dev(host->mmc), mmc_davinci_send_dma_request() 450 .src_addr = host->mem_res->start + DAVINCI_MMCDRR, mmc_davinci_send_dma_request() 455 chan = host->dma_rx; mmc_davinci_send_dma_request() 456 dmaengine_slave_config(host->dma_rx, &dma_rx_conf); mmc_davinci_send_dma_request() 458 desc = dmaengine_prep_slave_sg(host->dma_rx, mmc_davinci_send_dma_request() 460 host->sg_len, mmc_davinci_send_dma_request() 464 dev_dbg(mmc_dev(host->mmc), mmc_davinci_send_dma_request() 478 static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host, mmc_davinci_start_dma_transfer() argument 485 host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, mmc_davinci_start_dma_transfer() 491 for (i = 0; i < host->sg_len; i++) { mmc_davinci_start_dma_transfer() 493 dma_unmap_sg(mmc_dev(host->mmc), mmc_davinci_start_dma_transfer() 502 host->do_dma = 1; mmc_davinci_start_dma_transfer() 503 ret = mmc_davinci_send_dma_request(host, data); mmc_davinci_start_dma_transfer() 509 davinci_release_dma_channels(struct mmc_davinci_host *host) davinci_release_dma_channels() argument 511 if (!host->use_dma) davinci_release_dma_channels() 514 dma_release_channel(host->dma_tx); davinci_release_dma_channels() 515 dma_release_channel(host->dma_rx); davinci_release_dma_channels() 518 static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host) davinci_acquire_dma_channels() argument 526 host->dma_tx = davinci_acquire_dma_channels() 528 &host->txdma, mmc_dev(host->mmc), "tx"); davinci_acquire_dma_channels() 529 if (!host->dma_tx) { davinci_acquire_dma_channels() 530 dev_err(mmc_dev(host->mmc), "Can't get dma_tx channel\n"); davinci_acquire_dma_channels() 534 host->dma_rx = davinci_acquire_dma_channels() 536 &host->rxdma, mmc_dev(host->mmc), "rx"); davinci_acquire_dma_channels() 537 if (!host->dma_rx) { davinci_acquire_dma_channels() 538 dev_err(mmc_dev(host->mmc), "Can't get dma_rx channel\n"); davinci_acquire_dma_channels() 546 dma_release_channel(host->dma_tx); davinci_acquire_dma_channels() 554 mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req) mmc_davinci_prepare_data() argument 560 if (host->version == MMC_CTLR_VERSION_2) mmc_davinci_prepare_data() 563 host->data = data; mmc_davinci_prepare_data() 565 host->data_dir = DAVINCI_MMC_DATADIR_NONE; mmc_davinci_prepare_data() 566 writel(0, host->base + DAVINCI_MMCBLEN); mmc_davinci_prepare_data() 567 writel(0, host->base + DAVINCI_MMCNBLK); mmc_davinci_prepare_data() 571 dev_dbg(mmc_dev(host->mmc), "%s %s, %d blocks of %d bytes\n", mmc_davinci_prepare_data() 575 dev_dbg(mmc_dev(host->mmc), " DTO %d cycles + %d ns\n", mmc_davinci_prepare_data() 578 (data->timeout_ns / host->ns_in_one_cycle); mmc_davinci_prepare_data() 582 writel(timeout, host->base + DAVINCI_MMCTOD); mmc_davinci_prepare_data() 583 writel(data->blocks, host->base + DAVINCI_MMCNBLK); mmc_davinci_prepare_data() 584 writel(data->blksz, host->base + DAVINCI_MMCBLEN); mmc_davinci_prepare_data() 589 host->data_dir = DAVINCI_MMC_DATADIR_WRITE; mmc_davinci_prepare_data() 591 host->base + DAVINCI_MMCFIFOCTL); mmc_davinci_prepare_data() 593 host->base + DAVINCI_MMCFIFOCTL); mmc_davinci_prepare_data() 597 host->data_dir = DAVINCI_MMC_DATADIR_READ; mmc_davinci_prepare_data() 599 host->base + DAVINCI_MMCFIFOCTL); mmc_davinci_prepare_data() 601 host->base + DAVINCI_MMCFIFOCTL); mmc_davinci_prepare_data() 605 host->buffer = NULL; mmc_davinci_prepare_data() 606 host->bytes_left = data->blocks * data->blksz; mmc_davinci_prepare_data() 616 if (host->use_dma && (host->bytes_left & (rw_threshold - 1)) == 0 mmc_davinci_prepare_data() 617 && mmc_davinci_start_dma_transfer(host, data) == 0) { mmc_davinci_prepare_data() 619 host->bytes_left = 0; mmc_davinci_prepare_data() 622 host->sg_len = data->sg_len; mmc_davinci_prepare_data() 623 host->sg = host->data->sg; mmc_davinci_prepare_data() 624 mmc_davinci_sg_to_buf(host); mmc_davinci_prepare_data() 630 struct mmc_davinci_host *host = mmc_priv(mmc); mmc_davinci_request() local 638 mmcst1 = readl(host->base + DAVINCI_MMCST1); mmc_davinci_request() 644 dev_err(mmc_dev(host->mmc), "still BUSY? bad ... \n"); mmc_davinci_request() 650 host->do_dma = 0; mmc_davinci_request() 651 mmc_davinci_prepare_data(host, req); mmc_davinci_request() 652 mmc_davinci_start_command(host, req->cmd); mmc_davinci_request() 655 static unsigned int calculate_freq_for_card(struct mmc_davinci_host *host, calculate_freq_for_card() argument 660 mmc_pclk = host->mmc_input_clk; calculate_freq_for_card() 674 host->ns_in_one_cycle = (1000000) / (((mmc_pclk calculate_freq_for_card() 677 host->ns_in_one_cycle = (1000000) / (((mmc_pclk calculate_freq_for_card() 687 struct mmc_davinci_host *host = mmc_priv(mmc); calculate_clk_divider() local 701 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK; calculate_clk_divider() 703 writel(temp, host->base + DAVINCI_MMCCLK); calculate_clk_divider() 706 host->ns_in_one_cycle = (1000000) / (MMCSD_INIT_CLOCK/1000); calculate_clk_divider() 709 mmc_push_pull_freq = calculate_freq_for_card(host, ios->clock); calculate_clk_divider() 714 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKEN; calculate_clk_divider() 715 writel(temp, host->base + DAVINCI_MMCCLK); calculate_clk_divider() 719 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK; calculate_clk_divider() 721 writel(temp, host->base + DAVINCI_MMCCLK); calculate_clk_divider() 723 writel(temp | MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); calculate_clk_divider() 731 struct mmc_davinci_host *host = mmc_priv(mmc); mmc_davinci_set_ios() local 735 dev_dbg(mmc_dev(host->mmc), mmc_davinci_set_ios() 753 dev_dbg(mmc_dev(host->mmc), "Enabling 8 bit mode\n"); mmc_davinci_set_ios() 754 writel((readl(host->base + DAVINCI_MMCCTL) & mmc_davinci_set_ios() 756 host->base + DAVINCI_MMCCTL); mmc_davinci_set_ios() 759 dev_dbg(mmc_dev(host->mmc), "Enabling 4 bit mode\n"); mmc_davinci_set_ios() 760 if (host->version == MMC_CTLR_VERSION_2) mmc_davinci_set_ios() 761 writel((readl(host->base + DAVINCI_MMCCTL) & mmc_davinci_set_ios() 763 host->base + DAVINCI_MMCCTL); mmc_davinci_set_ios() 765 writel(readl(host->base + DAVINCI_MMCCTL) | mmc_davinci_set_ios() 767 host->base + DAVINCI_MMCCTL); mmc_davinci_set_ios() 770 dev_dbg(mmc_dev(host->mmc), "Enabling 1 bit mode\n"); mmc_davinci_set_ios() 771 if (host->version == MMC_CTLR_VERSION_2) mmc_davinci_set_ios() 772 writel(readl(host->base + DAVINCI_MMCCTL) & mmc_davinci_set_ios() 774 host->base + DAVINCI_MMCCTL); mmc_davinci_set_ios() 776 writel(readl(host->base + DAVINCI_MMCCTL) & mmc_davinci_set_ios() 778 host->base + DAVINCI_MMCCTL); mmc_davinci_set_ios() 784 host->bus_mode = ios->bus_mode; mmc_davinci_set_ios() 790 writel(0, host->base + DAVINCI_MMCARGHL); mmc_davinci_set_ios() 791 writel(MMCCMD_INITCK, host->base + DAVINCI_MMCCMD); mmc_davinci_set_ios() 793 u32 tmp = readl(host->base + DAVINCI_MMCST0); mmc_davinci_set_ios() 802 dev_warn(mmc_dev(host->mmc), "powerup timeout\n"); mmc_davinci_set_ios() 809 mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data) mmc_davinci_xfer_done() argument 811 host->data = NULL; mmc_davinci_xfer_done() 813 if (host->mmc->caps & MMC_CAP_SDIO_IRQ) { mmc_davinci_xfer_done() 819 if (host->sdio_int && !(readl(host->base + DAVINCI_SDIOST0) & mmc_davinci_xfer_done() 821 writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); mmc_davinci_xfer_done() 822 mmc_signal_sdio_irq(host->mmc); mmc_davinci_xfer_done() 826 if (host->do_dma) { mmc_davinci_xfer_done() 827 davinci_abort_dma(host); mmc_davinci_xfer_done() 829 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, mmc_davinci_xfer_done() 833 host->do_dma = false; mmc_davinci_xfer_done() 835 host->data_dir = DAVINCI_MMC_DATADIR_NONE; mmc_davinci_xfer_done() 837 if (!data->stop || (host->cmd && host->cmd->error)) { mmc_davinci_xfer_done() 838 mmc_request_done(host->mmc, data->mrq); mmc_davinci_xfer_done() 839 writel(0, host->base + DAVINCI_MMCIM); mmc_davinci_xfer_done() 840 host->active_request = false; mmc_davinci_xfer_done() 842 mmc_davinci_start_command(host, data->stop); mmc_davinci_xfer_done() 845 static void mmc_davinci_cmd_done(struct mmc_davinci_host *host, mmc_davinci_cmd_done() argument 848 host->cmd = NULL; mmc_davinci_cmd_done() 853 cmd->resp[3] = readl(host->base + DAVINCI_MMCRSP01); mmc_davinci_cmd_done() 854 cmd->resp[2] = readl(host->base + DAVINCI_MMCRSP23); mmc_davinci_cmd_done() 855 cmd->resp[1] = readl(host->base + DAVINCI_MMCRSP45); mmc_davinci_cmd_done() 856 cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67); mmc_davinci_cmd_done() 859 cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67); mmc_davinci_cmd_done() 863 if (host->data == NULL || cmd->error) { mmc_davinci_cmd_done() 866 mmc_request_done(host->mmc, cmd->mrq); mmc_davinci_cmd_done() 867 writel(0, host->base + DAVINCI_MMCIM); mmc_davinci_cmd_done() 868 host->active_request = false; mmc_davinci_cmd_done() 872 static inline void mmc_davinci_reset_ctrl(struct mmc_davinci_host *host, mmc_davinci_reset_ctrl() argument 877 temp = readl(host->base + DAVINCI_MMCCTL); mmc_davinci_reset_ctrl() 883 writel(temp, host->base + DAVINCI_MMCCTL); mmc_davinci_reset_ctrl() 888 davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data) davinci_abort_data() argument 890 mmc_davinci_reset_ctrl(host, 1); davinci_abort_data() 891 mmc_davinci_reset_ctrl(host, 0); davinci_abort_data() 896 struct mmc_davinci_host *host = dev_id; mmc_davinci_sdio_irq() local 899 status = readl(host->base + DAVINCI_SDIOIST); mmc_davinci_sdio_irq() 901 dev_dbg(mmc_dev(host->mmc), mmc_davinci_sdio_irq() 903 writel(status | SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); mmc_davinci_sdio_irq() 904 mmc_signal_sdio_irq(host->mmc); mmc_davinci_sdio_irq() 911 struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id; mmc_davinci_irq() local 915 struct mmc_data *data = host->data; mmc_davinci_irq() 917 if (host->cmd == NULL && host->data == NULL) { mmc_davinci_irq() 918 status = readl(host->base + DAVINCI_MMCST0); mmc_davinci_irq() 919 dev_dbg(mmc_dev(host->mmc), mmc_davinci_irq() 922 writel(0, host->base + DAVINCI_MMCIM); mmc_davinci_irq() 926 status = readl(host->base + DAVINCI_MMCST0); mmc_davinci_irq() 936 if (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))) { mmc_davinci_irq() 946 im_val = readl(host->base + DAVINCI_MMCIM); mmc_davinci_irq() 947 writel(0, host->base + DAVINCI_MMCIM); mmc_davinci_irq() 950 davinci_fifo_data_trans(host, rw_threshold); mmc_davinci_irq() 951 status = readl(host->base + DAVINCI_MMCST0); mmc_davinci_irq() 953 } while (host->bytes_left && mmc_davinci_irq() 962 writel(im_val, host->base + DAVINCI_MMCIM); mmc_davinci_irq() 968 if ((host->do_dma == 0) && (host->bytes_left > 0)) { mmc_davinci_irq() 972 davinci_fifo_data_trans(host, host->bytes_left); mmc_davinci_irq() 977 dev_err(mmc_dev(host->mmc), mmc_davinci_irq() 978 "DATDNE with no host->data\n"); mmc_davinci_irq() 987 dev_dbg(mmc_dev(host->mmc), mmc_davinci_irq() 991 davinci_abort_data(host, data); mmc_davinci_irq() 1006 u32 temp = readb(host->base + DAVINCI_MMCDRSP); mmc_davinci_irq() 1011 dev_dbg(mmc_dev(host->mmc), "data %s %s error\n", mmc_davinci_irq() 1015 davinci_abort_data(host, data); mmc_davinci_irq() 1020 if (host->cmd) { mmc_davinci_irq() 1021 dev_dbg(mmc_dev(host->mmc), mmc_davinci_irq() 1023 host->cmd->opcode, qstatus); mmc_davinci_irq() 1024 host->cmd->error = -ETIMEDOUT; mmc_davinci_irq() 1027 davinci_abort_data(host, data); mmc_davinci_irq() 1035 dev_dbg(mmc_dev(host->mmc), "Command CRC error\n"); mmc_davinci_irq() 1036 if (host->cmd) { mmc_davinci_irq() 1037 host->cmd->error = -EILSEQ; mmc_davinci_irq() 1044 end_command = (int) host->cmd; mmc_davinci_irq() 1048 mmc_davinci_cmd_done(host, host->cmd); mmc_davinci_irq() 1050 mmc_davinci_xfer_done(host, data); mmc_davinci_irq() 1076 struct mmc_davinci_host *host = mmc_priv(mmc); mmc_davinci_enable_sdio_irq() local 1079 if (!(readl(host->base + DAVINCI_SDIOST0) & SDIOST0_DAT1_HI)) { mmc_davinci_enable_sdio_irq() 1080 writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); mmc_davinci_enable_sdio_irq() 1081 mmc_signal_sdio_irq(host->mmc); mmc_davinci_enable_sdio_irq() 1083 host->sdio_int = true; mmc_davinci_enable_sdio_irq() 1084 writel(readl(host->base + DAVINCI_SDIOIEN) | mmc_davinci_enable_sdio_irq() 1085 SDIOIEN_IOINTEN, host->base + DAVINCI_SDIOIEN); mmc_davinci_enable_sdio_irq() 1088 host->sdio_int = false; mmc_davinci_enable_sdio_irq() 1089 writel(readl(host->base + DAVINCI_SDIOIEN) & ~SDIOIEN_IOINTEN, mmc_davinci_enable_sdio_irq() 1090 host->base + DAVINCI_SDIOIEN); mmc_davinci_enable_sdio_irq() 1108 struct mmc_davinci_host *host; mmc_davinci_cpufreq_transition() local 1113 host = container_of(nb, struct mmc_davinci_host, freq_transition); mmc_davinci_cpufreq_transition() 1114 mmc = host->mmc; mmc_davinci_cpufreq_transition() 1115 mmc_pclk = clk_get_rate(host->clk); mmc_davinci_cpufreq_transition() 1119 host->mmc_input_clk = mmc_pclk; mmc_davinci_cpufreq_transition() 1127 static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host) mmc_davinci_cpufreq_register() argument 1129 host->freq_transition.notifier_call = mmc_davinci_cpufreq_transition; mmc_davinci_cpufreq_register() 1131 return cpufreq_register_notifier(&host->freq_transition, mmc_davinci_cpufreq_register() 1135 static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host) mmc_davinci_cpufreq_deregister() argument 1137 cpufreq_unregister_notifier(&host->freq_transition, mmc_davinci_cpufreq_deregister() 1141 static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host) mmc_davinci_cpufreq_register() argument 1146 static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host) mmc_davinci_cpufreq_deregister() argument 1150 static void __init init_mmcsd_host(struct mmc_davinci_host *host) init_mmcsd_host() argument 1153 mmc_davinci_reset_ctrl(host, 1); init_mmcsd_host() 1155 writel(0, host->base + DAVINCI_MMCCLK); init_mmcsd_host() 1156 writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); init_mmcsd_host() 1158 writel(0x1FFF, host->base + DAVINCI_MMCTOR); init_mmcsd_host() 1159 writel(0xFFFF, host->base + DAVINCI_MMCTOD); init_mmcsd_host() 1161 mmc_davinci_reset_ctrl(host, 0); init_mmcsd_host() 1232 struct mmc_davinci_host *host = NULL; davinci_mmcsd_probe() local 1262 host = mmc_priv(mmc); davinci_mmcsd_probe() 1263 host->mmc = mmc; /* Important */ davinci_mmcsd_probe() 1269 host->rxdma = r->start; davinci_mmcsd_probe() 1275 host->txdma = r->start; davinci_mmcsd_probe() 1277 host->mem_res = mem; davinci_mmcsd_probe() 1278 host->base = ioremap(mem->start, mem_size); davinci_mmcsd_probe() 1279 if (!host->base) davinci_mmcsd_probe() 1283 host->clk = clk_get(&pdev->dev, "MMCSDCLK"); davinci_mmcsd_probe() 1284 if (IS_ERR(host->clk)) { davinci_mmcsd_probe() 1285 ret = PTR_ERR(host->clk); davinci_mmcsd_probe() 1288 clk_enable(host->clk); davinci_mmcsd_probe() 1289 host->mmc_input_clk = clk_get_rate(host->clk); davinci_mmcsd_probe() 1291 init_mmcsd_host(host); davinci_mmcsd_probe() 1294 host->nr_sg = pdata->nr_sg - 1; davinci_mmcsd_probe() 1296 if (host->nr_sg > MAX_NR_SG || !host->nr_sg) davinci_mmcsd_probe() 1297 host->nr_sg = MAX_NR_SG; davinci_mmcsd_probe() 1299 host->use_dma = use_dma; davinci_mmcsd_probe() 1300 host->mmc_irq = irq; davinci_mmcsd_probe() 1301 host->sdio_irq = platform_get_irq(pdev, 1); davinci_mmcsd_probe() 1303 if (host->use_dma && davinci_acquire_dma_channels(host) != 0) davinci_mmcsd_probe() 1304 host->use_dma = 0; davinci_mmcsd_probe() 1318 host->version = id_entry->driver_data; davinci_mmcsd_probe() 1343 dev_dbg(mmc_dev(host->mmc), "max_segs=%d\n", mmc->max_segs); davinci_mmcsd_probe() 1344 dev_dbg(mmc_dev(host->mmc), "max_blk_size=%d\n", mmc->max_blk_size); davinci_mmcsd_probe() 1345 dev_dbg(mmc_dev(host->mmc), "max_req_size=%d\n", mmc->max_req_size); davinci_mmcsd_probe() 1346 dev_dbg(mmc_dev(host->mmc), "max_seg_size=%d\n", mmc->max_seg_size); davinci_mmcsd_probe() 1348 platform_set_drvdata(pdev, host); davinci_mmcsd_probe() 1350 ret = mmc_davinci_cpufreq_register(host); davinci_mmcsd_probe() 1360 ret = request_irq(irq, mmc_davinci_irq, 0, mmc_hostname(mmc), host); davinci_mmcsd_probe() 1364 if (host->sdio_irq >= 0) { davinci_mmcsd_probe() 1365 ret = request_irq(host->sdio_irq, mmc_davinci_sdio_irq, 0, davinci_mmcsd_probe() 1366 mmc_hostname(mmc), host); davinci_mmcsd_probe() 1373 dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n", davinci_mmcsd_probe() 1374 host->use_dma ? "DMA" : "PIO", davinci_mmcsd_probe() 1380 mmc_davinci_cpufreq_deregister(host); davinci_mmcsd_probe() 1382 if (host) { davinci_mmcsd_probe() 1383 davinci_release_dma_channels(host); davinci_mmcsd_probe() 1385 if (host->clk) { davinci_mmcsd_probe() 1386 clk_disable(host->clk); davinci_mmcsd_probe() 1387 clk_put(host->clk); davinci_mmcsd_probe() 1390 if (host->base) davinci_mmcsd_probe() 1391 iounmap(host->base); davinci_mmcsd_probe() 1407 struct mmc_davinci_host *host = platform_get_drvdata(pdev); davinci_mmcsd_remove() local 1409 if (host) { davinci_mmcsd_remove() 1410 mmc_davinci_cpufreq_deregister(host); davinci_mmcsd_remove() 1412 mmc_remove_host(host->mmc); davinci_mmcsd_remove() 1413 free_irq(host->mmc_irq, host); davinci_mmcsd_remove() 1414 if (host->mmc->caps & MMC_CAP_SDIO_IRQ) davinci_mmcsd_remove() 1415 free_irq(host->sdio_irq, host); davinci_mmcsd_remove() 1417 davinci_release_dma_channels(host); davinci_mmcsd_remove() 1419 clk_disable(host->clk); davinci_mmcsd_remove() 1420 clk_put(host->clk); davinci_mmcsd_remove() 1422 iounmap(host->base); davinci_mmcsd_remove() 1424 release_resource(host->mem_res); davinci_mmcsd_remove() 1426 mmc_free_host(host->mmc); davinci_mmcsd_remove() 1436 struct mmc_davinci_host *host = platform_get_drvdata(pdev); davinci_mmcsd_suspend() local 1438 writel(0, host->base + DAVINCI_MMCIM); davinci_mmcsd_suspend() 1439 mmc_davinci_reset_ctrl(host, 1); davinci_mmcsd_suspend() 1440 clk_disable(host->clk); davinci_mmcsd_suspend() 1448 struct mmc_davinci_host *host = platform_get_drvdata(pdev); davinci_mmcsd_resume() local 1450 clk_enable(host->clk); davinci_mmcsd_resume() 1451 mmc_davinci_reset_ctrl(host, 0); davinci_mmcsd_resume()
|
H A D | au1xmmc.c | 2 * linux/drivers/mmc/host/au1xmmc.c - AU1XX0 MMC driver 44 #include <linux/mmc/host.h> 125 /* Status flags used by the host structure */ 165 static inline void IRQ_ON(struct au1xmmc_host *host, u32 mask) IRQ_ON() argument 167 u32 val = __raw_readl(HOST_CONFIG(host)); IRQ_ON() 169 __raw_writel(val, HOST_CONFIG(host)); IRQ_ON() 173 static inline void FLUSH_FIFO(struct au1xmmc_host *host) FLUSH_FIFO() argument 175 u32 val = __raw_readl(HOST_CONFIG2(host)); FLUSH_FIFO() 177 __raw_writel(val | SD_CONFIG2_FF, HOST_CONFIG2(host)); FLUSH_FIFO() 184 __raw_writel(val, HOST_CONFIG2(host)); FLUSH_FIFO() 188 static inline void IRQ_OFF(struct au1xmmc_host *host, u32 mask) IRQ_OFF() argument 190 u32 val = __raw_readl(HOST_CONFIG(host)); IRQ_OFF() 192 __raw_writel(val, HOST_CONFIG(host)); IRQ_OFF() 196 static inline void SEND_STOP(struct au1xmmc_host *host) SEND_STOP() argument 200 WARN_ON(host->status != HOST_S_DATA); SEND_STOP() 201 host->status = HOST_S_STOP; SEND_STOP() 203 config2 = __raw_readl(HOST_CONFIG2(host)); SEND_STOP() 204 __raw_writel(config2 | SD_CONFIG2_DF, HOST_CONFIG2(host)); SEND_STOP() 208 __raw_writel(STOP_CMD, HOST_CMD(host)); SEND_STOP() 212 static void au1xmmc_set_power(struct au1xmmc_host *host, int state) au1xmmc_set_power() argument 214 if (host->platdata && host->platdata->set_power) au1xmmc_set_power() 215 host->platdata->set_power(host->mmc, state); au1xmmc_set_power() 220 struct au1xmmc_host *host = mmc_priv(mmc); au1xmmc_card_inserted() local 222 if (host->platdata && host->platdata->card_inserted) au1xmmc_card_inserted() 223 return !!host->platdata->card_inserted(host->mmc); au1xmmc_card_inserted() 230 struct au1xmmc_host *host = mmc_priv(mmc); au1xmmc_card_readonly() local 232 if (host->platdata && host->platdata->card_readonly) au1xmmc_card_readonly() 233 return !!host->platdata->card_readonly(mmc); au1xmmc_card_readonly() 238 static void au1xmmc_finish_request(struct au1xmmc_host *host) au1xmmc_finish_request() argument 240 struct mmc_request *mrq = host->mrq; au1xmmc_finish_request() 242 host->mrq = NULL; au1xmmc_finish_request() 243 host->flags &= HOST_F_ACTIVE | HOST_F_DMA; au1xmmc_finish_request() 245 host->dma.len = 0; au1xmmc_finish_request() 246 host->dma.dir = 0; au1xmmc_finish_request() 248 host->pio.index = 0; au1xmmc_finish_request() 249 host->pio.offset = 0; au1xmmc_finish_request() 250 host->pio.len = 0; au1xmmc_finish_request() 252 host->status = HOST_S_IDLE; au1xmmc_finish_request() 254 mmc_request_done(host->mmc, mrq); au1xmmc_finish_request() 259 struct au1xmmc_host *host = (struct au1xmmc_host *) param; au1xmmc_tasklet_finish() local 260 au1xmmc_finish_request(host); au1xmmc_tasklet_finish() 263 static int au1xmmc_send_command(struct au1xmmc_host *host, int wait, au1xmmc_send_command() argument 303 __raw_writel(cmd->arg, HOST_CMDARG(host)); au1xmmc_send_command() 307 IRQ_OFF(host, SD_CONFIG_CR); au1xmmc_send_command() 309 __raw_writel((mmccmd | SD_CMD_GO), HOST_CMD(host)); au1xmmc_send_command() 313 while (__raw_readl(HOST_CMD(host)) & SD_CMD_GO) au1xmmc_send_command() 318 u32 status = __raw_readl(HOST_STATUS(host)); au1xmmc_send_command() 321 status = __raw_readl(HOST_STATUS(host)); au1xmmc_send_command() 324 __raw_writel(SD_STATUS_CR, HOST_STATUS(host)); au1xmmc_send_command() 326 IRQ_ON(host, SD_CONFIG_CR); au1xmmc_send_command() 332 static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status) au1xmmc_data_complete() argument 334 struct mmc_request *mrq = host->mrq; au1xmmc_data_complete() 338 WARN_ON((host->status != HOST_S_DATA) && (host->status != HOST_S_STOP)); au1xmmc_data_complete() 340 if (host->mrq == NULL) au1xmmc_data_complete() 346 status = __raw_readl(HOST_STATUS(host)); au1xmmc_data_complete() 349 while ((host->flags & HOST_F_XMIT) && (status & SD_STATUS_DB)) au1xmmc_data_complete() 350 status = __raw_readl(HOST_STATUS(host)); au1xmmc_data_complete() 353 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma.dir); au1xmmc_data_complete() 357 if (host->flags & HOST_F_XMIT) au1xmmc_data_complete() 364 __raw_writel(SD_STATUS_WC | SD_STATUS_RC, HOST_STATUS(host)); au1xmmc_data_complete() 369 if (host->flags & (HOST_F_DMA | HOST_F_DBDMA)) { au1xmmc_data_complete() 370 u32 chan = DMA_CHANNEL(host); au1xmmc_data_complete() 377 (data->blocks * data->blksz) - host->pio.len; au1xmmc_data_complete() 380 au1xmmc_finish_request(host); au1xmmc_data_complete() 385 struct au1xmmc_host *host = (struct au1xmmc_host *)param; au1xmmc_tasklet_data() local 387 u32 status = __raw_readl(HOST_STATUS(host)); au1xmmc_tasklet_data() 388 au1xmmc_data_complete(host, status); au1xmmc_tasklet_data() 393 static void au1xmmc_send_pio(struct au1xmmc_host *host) au1xmmc_send_pio() argument 401 data = host->mrq->data; au1xmmc_send_pio() 403 if (!(host->flags & HOST_F_XMIT)) au1xmmc_send_pio() 407 sg = &data->sg[host->pio.index]; au1xmmc_send_pio() 408 sg_ptr = sg_virt(sg) + host->pio.offset; au1xmmc_send_pio() 411 sg_len = data->sg[host->pio.index].length - host->pio.offset; au1xmmc_send_pio() 414 max = (sg_len > host->pio.len) ? host->pio.len : sg_len; au1xmmc_send_pio() 419 status = __raw_readl(HOST_STATUS(host)); au1xmmc_send_pio() 426 __raw_writel((unsigned long)val, HOST_TXPORT(host)); au1xmmc_send_pio() 430 host->pio.len -= count; au1xmmc_send_pio() 431 host->pio.offset += count; au1xmmc_send_pio() 434 host->pio.index++; au1xmmc_send_pio() 435 host->pio.offset = 0; au1xmmc_send_pio() 438 if (host->pio.len == 0) { au1xmmc_send_pio() 439 IRQ_OFF(host, SD_CONFIG_TH); au1xmmc_send_pio() 441 if (host->flags & HOST_F_STOP) au1xmmc_send_pio() 442 SEND_STOP(host); au1xmmc_send_pio() 444 tasklet_schedule(&host->data_task); au1xmmc_send_pio() 448 static void au1xmmc_receive_pio(struct au1xmmc_host *host) au1xmmc_receive_pio() argument 456 data = host->mrq->data; au1xmmc_receive_pio() 458 if (!(host->flags & HOST_F_RECV)) au1xmmc_receive_pio() 461 max = host->pio.len; au1xmmc_receive_pio() 463 if (host->pio.index < host->dma.len) { au1xmmc_receive_pio() 464 sg = &data->sg[host->pio.index]; au1xmmc_receive_pio() 465 sg_ptr = sg_virt(sg) + host->pio.offset; au1xmmc_receive_pio() 468 sg_len = sg_dma_len(&data->sg[host->pio.index]) - host->pio.offset; au1xmmc_receive_pio() 479 status = __raw_readl(HOST_STATUS(host)); au1xmmc_receive_pio() 485 DBG("RX CRC Error [%d + %d].\n", host->pdev->id, au1xmmc_receive_pio() 486 host->pio.len, count); au1xmmc_receive_pio() 491 DBG("RX Overrun [%d + %d]\n", host->pdev->id, au1xmmc_receive_pio() 492 host->pio.len, count); au1xmmc_receive_pio() 496 DBG("RX Underrun [%d + %d]\n", host->pdev->id, au1xmmc_receive_pio() 497 host->pio.len, count); au1xmmc_receive_pio() 501 val = __raw_readl(HOST_RXPORT(host)); au1xmmc_receive_pio() 507 host->pio.len -= count; au1xmmc_receive_pio() 508 host->pio.offset += count; au1xmmc_receive_pio() 511 host->pio.index++; au1xmmc_receive_pio() 512 host->pio.offset = 0; au1xmmc_receive_pio() 515 if (host->pio.len == 0) { au1xmmc_receive_pio() 516 /* IRQ_OFF(host, SD_CONFIG_RA | SD_CONFIG_RF); */ au1xmmc_receive_pio() 517 IRQ_OFF(host, SD_CONFIG_NE); au1xmmc_receive_pio() 519 if (host->flags & HOST_F_STOP) au1xmmc_receive_pio() 520 SEND_STOP(host); au1xmmc_receive_pio() 522 tasklet_schedule(&host->data_task); au1xmmc_receive_pio() 529 static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status) au1xmmc_cmd_complete() argument 531 struct mmc_request *mrq = host->mrq; au1xmmc_cmd_complete() 536 if (!host->mrq) au1xmmc_cmd_complete() 544 r[0] = __raw_readl(host->iobase + SD_RESP3); au1xmmc_cmd_complete() 545 r[1] = __raw_readl(host->iobase + SD_RESP2); au1xmmc_cmd_complete() 546 r[2] = __raw_readl(host->iobase + SD_RESP1); au1xmmc_cmd_complete() 547 r[3] = __raw_readl(host->iobase + SD_RESP0); au1xmmc_cmd_complete() 566 cmd->resp[0] = __raw_readl(host->iobase + SD_RESP0); au1xmmc_cmd_complete() 574 trans = host->flags & (HOST_F_XMIT | HOST_F_RECV); au1xmmc_cmd_complete() 577 IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA | SD_CONFIG_RF); au1xmmc_cmd_complete() 578 tasklet_schedule(&host->finish_task); au1xmmc_cmd_complete() 582 host->status = HOST_S_DATA; au1xmmc_cmd_complete() 584 if ((host->flags & (HOST_F_DMA | HOST_F_DBDMA))) { au1xmmc_cmd_complete() 585 u32 channel = DMA_CHANNEL(host); au1xmmc_cmd_complete() 589 if (host->flags & HOST_F_RECV) { au1xmmc_cmd_complete() 593 status = __raw_readl(HOST_STATUS(host)); au1xmmc_cmd_complete() 600 static void au1xmmc_set_clock(struct au1xmmc_host *host, int rate) au1xmmc_set_clock() argument 602 unsigned int pbus = clk_get_rate(host->clk); au1xmmc_set_clock() 606 config = __raw_readl(HOST_CONFIG(host)); au1xmmc_set_clock() 611 __raw_writel(config, HOST_CONFIG(host)); au1xmmc_set_clock() 615 static int au1xmmc_prepare_data(struct au1xmmc_host *host, au1xmmc_prepare_data() argument 621 host->flags |= HOST_F_RECV; au1xmmc_prepare_data() 623 host->flags |= HOST_F_XMIT; au1xmmc_prepare_data() 625 if (host->mrq->stop) au1xmmc_prepare_data() 626 host->flags |= HOST_F_STOP; au1xmmc_prepare_data() 628 host->dma.dir = DMA_BIDIRECTIONAL; au1xmmc_prepare_data() 630 host->dma.len = dma_map_sg(mmc_dev(host->mmc), data->sg, au1xmmc_prepare_data() 631 data->sg_len, host->dma.dir); au1xmmc_prepare_data() 633 if (host->dma.len == 0) au1xmmc_prepare_data() 636 __raw_writel(data->blksz - 1, HOST_BLKSIZE(host)); au1xmmc_prepare_data() 638 if (host->flags & (HOST_F_DMA | HOST_F_DBDMA)) { au1xmmc_prepare_data() 640 u32 channel = DMA_CHANNEL(host); au1xmmc_prepare_data() 644 for (i = 0; i < host->dma.len; i++) { au1xmmc_prepare_data() 651 if (i == host->dma.len - 1) au1xmmc_prepare_data() 654 if (host->flags & HOST_F_XMIT) { au1xmmc_prepare_data() 668 host->pio.index = 0; au1xmmc_prepare_data() 669 host->pio.offset = 0; au1xmmc_prepare_data() 670 host->pio.len = datalen; au1xmmc_prepare_data() 672 if (host->flags & HOST_F_XMIT) au1xmmc_prepare_data() 673 IRQ_ON(host, SD_CONFIG_TH); au1xmmc_prepare_data() 675 IRQ_ON(host, SD_CONFIG_NE); au1xmmc_prepare_data() 676 /* IRQ_ON(host, SD_CONFIG_RA | SD_CONFIG_RF); */ au1xmmc_prepare_data() 682 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, au1xmmc_prepare_data() 683 host->dma.dir); au1xmmc_prepare_data() 690 struct au1xmmc_host *host = mmc_priv(mmc); au1xmmc_request() local 694 WARN_ON(host->status != HOST_S_IDLE); au1xmmc_request() 696 host->mrq = mrq; au1xmmc_request() 697 host->status = HOST_S_CMD; au1xmmc_request() 702 au1xmmc_finish_request(host); au1xmmc_request() 707 FLUSH_FIFO(host); au1xmmc_request() 708 ret = au1xmmc_prepare_data(host, mrq->data); au1xmmc_request() 712 ret = au1xmmc_send_command(host, 0, mrq->cmd, mrq->data); au1xmmc_request() 716 au1xmmc_finish_request(host); au1xmmc_request() 720 static void au1xmmc_reset_controller(struct au1xmmc_host *host) au1xmmc_reset_controller() argument 723 __raw_writel(SD_ENABLE_CE, HOST_ENABLE(host)); au1xmmc_reset_controller() 727 __raw_writel(SD_ENABLE_R | SD_ENABLE_CE, HOST_ENABLE(host)); au1xmmc_reset_controller() 731 __raw_writel(~0, HOST_STATUS(host)); au1xmmc_reset_controller() 734 __raw_writel(0, HOST_BLKSIZE(host)); au1xmmc_reset_controller() 735 __raw_writel(0x001fffff, HOST_TIMEOUT(host)); au1xmmc_reset_controller() 738 __raw_writel(SD_CONFIG2_EN, HOST_CONFIG2(host)); au1xmmc_reset_controller() 741 __raw_writel(SD_CONFIG2_EN | SD_CONFIG2_FF, HOST_CONFIG2(host)); au1xmmc_reset_controller() 745 __raw_writel(SD_CONFIG2_EN, HOST_CONFIG2(host)); au1xmmc_reset_controller() 749 __raw_writel(AU1XMMC_INTERRUPTS, HOST_CONFIG(host)); au1xmmc_reset_controller() 756 struct au1xmmc_host *host = mmc_priv(mmc); au1xmmc_set_ios() local 760 au1xmmc_set_power(host, 0); au1xmmc_set_ios() 762 au1xmmc_set_power(host, 1); au1xmmc_set_ios() 765 if (ios->clock && ios->clock != host->clock) { au1xmmc_set_ios() 766 au1xmmc_set_clock(host, ios->clock); au1xmmc_set_ios() 767 host->clock = ios->clock; au1xmmc_set_ios() 770 config2 = __raw_readl(HOST_CONFIG2(host)); au1xmmc_set_ios() 783 __raw_writel(config2, HOST_CONFIG2(host)); au1xmmc_set_ios() 793 struct au1xmmc_host *host = dev_id; au1xmmc_irq() local 796 status = __raw_readl(HOST_STATUS(host)); au1xmmc_irq() 802 mmc_signal_sdio_irq(host->mmc); au1xmmc_irq() 804 if (host->mrq && (status & STATUS_TIMEOUT)) { au1xmmc_irq() 806 host->mrq->cmd->error = -ETIMEDOUT; au1xmmc_irq() 808 host->mrq->data->error = -ETIMEDOUT; au1xmmc_irq() 811 IRQ_OFF(host, SD_CONFIG_NE | SD_CONFIG_TH); au1xmmc_irq() 813 /* IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA | SD_CONFIG_RF); */ au1xmmc_irq() 814 tasklet_schedule(&host->finish_task); au1xmmc_irq() 819 if (!(host->flags & HOST_F_DMA) && (status & SD_STATUS_NE)) au1xmmc_irq() 820 au1xmmc_receive_pio(host); au1xmmc_irq() 822 au1xmmc_data_complete(host, status); au1xmmc_irq() 823 /* tasklet_schedule(&host->data_task); */ au1xmmc_irq() 828 if (host->status == HOST_S_CMD) au1xmmc_irq() 829 au1xmmc_cmd_complete(host, status); au1xmmc_irq() 831 } else if (!(host->flags & HOST_F_DMA)) { au1xmmc_irq() 832 if ((host->flags & HOST_F_XMIT) && (status & STATUS_DATA_OUT)) au1xmmc_irq() 833 au1xmmc_send_pio(host); au1xmmc_irq() 834 else if ((host->flags & HOST_F_RECV) && (status & STATUS_DATA_IN)) au1xmmc_irq() 835 au1xmmc_receive_pio(host); au1xmmc_irq() 838 DBG("Unhandled status %8.8x\n", host->pdev->id, au1xmmc_irq() 842 __raw_writel(status, HOST_STATUS(host)); au1xmmc_irq() 862 struct au1xmmc_host *host = (struct au1xmmc_host *)dev_id; au1xmmc_dbdma_callback() local 865 if (!host->mrq) au1xmmc_dbdma_callback() 868 if (host->flags & HOST_F_STOP) au1xmmc_dbdma_callback() 869 SEND_STOP(host); au1xmmc_dbdma_callback() 871 tasklet_schedule(&host->data_task); au1xmmc_dbdma_callback() 874 static int au1xmmc_dbdma_init(struct au1xmmc_host *host) au1xmmc_dbdma_init() argument 879 res = platform_get_resource(host->pdev, IORESOURCE_DMA, 0); au1xmmc_dbdma_init() 884 res = platform_get_resource(host->pdev, IORESOURCE_DMA, 1); au1xmmc_dbdma_init() 892 host->tx_chan = au1xxx_dbdma_chan_alloc(memid, txid, au1xmmc_dbdma_init() 893 au1xmmc_dbdma_callback, (void *)host); au1xmmc_dbdma_init() 894 if (!host->tx_chan) { au1xmmc_dbdma_init() 895 dev_err(&host->pdev->dev, "cannot allocate TX DMA\n"); au1xmmc_dbdma_init() 899 host->rx_chan = au1xxx_dbdma_chan_alloc(rxid, memid, au1xmmc_dbdma_init() 900 au1xmmc_dbdma_callback, (void *)host); au1xmmc_dbdma_init() 901 if (!host->rx_chan) { au1xmmc_dbdma_init() 902 dev_err(&host->pdev->dev, "cannot allocate RX DMA\n"); au1xmmc_dbdma_init() 903 au1xxx_dbdma_chan_free(host->tx_chan); au1xmmc_dbdma_init() 907 au1xxx_dbdma_set_devwidth(host->tx_chan, 8); au1xmmc_dbdma_init() 908 au1xxx_dbdma_set_devwidth(host->rx_chan, 8); au1xmmc_dbdma_init() 910 au1xxx_dbdma_ring_alloc(host->tx_chan, AU1XMMC_DESCRIPTOR_COUNT); au1xmmc_dbdma_init() 911 au1xxx_dbdma_ring_alloc(host->rx_chan, AU1XMMC_DESCRIPTOR_COUNT); au1xmmc_dbdma_init() 914 host->flags |= HOST_F_DMA | HOST_F_DBDMA; au1xmmc_dbdma_init() 919 static void au1xmmc_dbdma_shutdown(struct au1xmmc_host *host) au1xmmc_dbdma_shutdown() argument 921 if (host->flags & HOST_F_DMA) { au1xmmc_dbdma_shutdown() 922 host->flags &= ~HOST_F_DMA; au1xmmc_dbdma_shutdown() 923 au1xxx_dbdma_chan_free(host->tx_chan); au1xmmc_dbdma_shutdown() 924 au1xxx_dbdma_chan_free(host->rx_chan); au1xmmc_dbdma_shutdown() 930 struct au1xmmc_host *host = mmc_priv(mmc); au1xmmc_enable_sdio_irq() local 933 IRQ_ON(host, SD_CONFIG_SI); au1xmmc_enable_sdio_irq() 935 IRQ_OFF(host, SD_CONFIG_SI); au1xmmc_enable_sdio_irq() 949 struct au1xmmc_host *host; au1xmmc_probe() local 960 host = mmc_priv(mmc); au1xmmc_probe() 961 host->mmc = mmc; au1xmmc_probe() 962 host->platdata = pdev->dev.platform_data; au1xmmc_probe() 963 host->pdev = pdev; au1xmmc_probe() 972 host->ioarea = request_mem_region(r->start, resource_size(r), au1xmmc_probe() 974 if (!host->ioarea) { au1xmmc_probe() 979 host->iobase = ioremap(r->start, 0x3c); au1xmmc_probe() 980 if (!host->iobase) { au1xmmc_probe() 990 host->irq = r->start; au1xmmc_probe() 1017 if (host->ioarea->start == AU1100_SD0_PHYS_ADDR) au1xmmc_probe() 1022 ret = request_irq(host->irq, au1xmmc_irq, iflag, DRIVER_NAME, host); au1xmmc_probe() 1028 host->clk = clk_get(&pdev->dev, ALCHEMY_PERIPH_CLK); au1xmmc_probe() 1029 if (IS_ERR(host->clk)) { au1xmmc_probe() 1031 ret = PTR_ERR(host->clk); au1xmmc_probe() 1035 ret = clk_prepare_enable(host->clk); au1xmmc_probe() 1041 host->status = HOST_S_IDLE; au1xmmc_probe() 1044 if (host->platdata && host->platdata->cd_setup) { au1xmmc_probe() 1045 ret = host->platdata->cd_setup(mmc, 1); au1xmmc_probe() 1054 if (host->platdata) au1xmmc_probe() 1055 mmc->caps &= ~(host->platdata->mask_host_caps); au1xmmc_probe() 1057 tasklet_init(&host->data_task, au1xmmc_tasklet_data, au1xmmc_probe() 1058 (unsigned long)host); au1xmmc_probe() 1060 tasklet_init(&host->finish_task, au1xmmc_tasklet_finish, au1xmmc_probe() 1061 (unsigned long)host); au1xmmc_probe() 1064 ret = au1xmmc_dbdma_init(host); au1xmmc_probe() 1070 if (host->platdata && host->platdata->led) { au1xmmc_probe() 1071 struct led_classdev *led = host->platdata->led; au1xmmc_probe() 1081 au1xmmc_reset_controller(host); au1xmmc_probe() 1085 dev_err(&pdev->dev, "cannot add mmc host\n"); au1xmmc_probe() 1089 platform_set_drvdata(pdev, host); au1xmmc_probe() 1092 " (mode=%s)\n", pdev->id, host->iobase, au1xmmc_probe() 1093 host->flags & HOST_F_DMA ? "dma" : "pio"); au1xmmc_probe() 1099 if (host->platdata && host->platdata->led) au1xmmc_probe() 1100 led_classdev_unregister(host->platdata->led); au1xmmc_probe() 1103 __raw_writel(0, HOST_ENABLE(host)); au1xmmc_probe() 1104 __raw_writel(0, HOST_CONFIG(host)); au1xmmc_probe() 1105 __raw_writel(0, HOST_CONFIG2(host)); au1xmmc_probe() 1108 if (host->flags & HOST_F_DBDMA) au1xmmc_probe() 1109 au1xmmc_dbdma_shutdown(host); au1xmmc_probe() 1111 tasklet_kill(&host->data_task); au1xmmc_probe() 1112 tasklet_kill(&host->finish_task); au1xmmc_probe() 1114 if (host->platdata && host->platdata->cd_setup && au1xmmc_probe() 1116 host->platdata->cd_setup(mmc, 0); au1xmmc_probe() 1118 clk_disable_unprepare(host->clk); au1xmmc_probe() 1119 clk_put(host->clk); au1xmmc_probe() 1121 free_irq(host->irq, host); au1xmmc_probe() 1123 iounmap((void *)host->iobase); au1xmmc_probe() 1125 release_resource(host->ioarea); au1xmmc_probe() 1126 kfree(host->ioarea); au1xmmc_probe() 1135 struct au1xmmc_host *host = platform_get_drvdata(pdev); au1xmmc_remove() local 1137 if (host) { au1xmmc_remove() 1138 mmc_remove_host(host->mmc); au1xmmc_remove() 1141 if (host->platdata && host->platdata->led) au1xmmc_remove() 1142 led_classdev_unregister(host->platdata->led); au1xmmc_remove() 1145 if (host->platdata && host->platdata->cd_setup && au1xmmc_remove() 1146 !(host->mmc->caps & MMC_CAP_NEEDS_POLL)) au1xmmc_remove() 1147 host->platdata->cd_setup(host->mmc, 0); au1xmmc_remove() 1149 __raw_writel(0, HOST_ENABLE(host)); au1xmmc_remove() 1150 __raw_writel(0, HOST_CONFIG(host)); au1xmmc_remove() 1151 __raw_writel(0, HOST_CONFIG2(host)); au1xmmc_remove() 1154 tasklet_kill(&host->data_task); au1xmmc_remove() 1155 tasklet_kill(&host->finish_task); au1xmmc_remove() 1157 if (host->flags & HOST_F_DBDMA) au1xmmc_remove() 1158 au1xmmc_dbdma_shutdown(host); au1xmmc_remove() 1160 au1xmmc_set_power(host, 0); au1xmmc_remove() 1162 clk_disable_unprepare(host->clk); au1xmmc_remove() 1163 clk_put(host->clk); au1xmmc_remove() 1165 free_irq(host->irq, host); au1xmmc_remove() 1166 iounmap((void *)host->iobase); au1xmmc_remove() 1167 release_resource(host->ioarea); au1xmmc_remove() 1168 kfree(host->ioarea); au1xmmc_remove() 1170 mmc_free_host(host->mmc); au1xmmc_remove() 1178 struct au1xmmc_host *host = platform_get_drvdata(pdev); au1xmmc_suspend() local 1180 __raw_writel(0, HOST_CONFIG2(host)); au1xmmc_suspend() 1181 __raw_writel(0, HOST_CONFIG(host)); au1xmmc_suspend() 1182 __raw_writel(0xffffffff, HOST_STATUS(host)); au1xmmc_suspend() 1183 __raw_writel(0, HOST_ENABLE(host)); au1xmmc_suspend() 1191 struct au1xmmc_host *host = platform_get_drvdata(pdev); au1xmmc_resume() local 1193 au1xmmc_reset_controller(host); au1xmmc_resume()
|
H A D | mxcmmc.c | 2 * linux/drivers/mmc/host/mxcmmc.c - Freescale i.MX MMCI driver 28 #include <linux/mmc/host.h> 197 static inline int is_imx31_mmc(struct mxcmci_host *host) is_imx31_mmc() argument 199 return host->devtype == IMX31_MMC; is_imx31_mmc() 202 static inline int is_mpc512x_mmc(struct mxcmci_host *host) is_mpc512x_mmc() argument 204 return host->devtype == MPC512X_MMC; is_mpc512x_mmc() 207 static inline u32 mxcmci_readl(struct mxcmci_host *host, int reg) mxcmci_readl() argument 210 return ioread32be(host->base + reg); mxcmci_readl() 212 return readl(host->base + reg); mxcmci_readl() 215 static inline void mxcmci_writel(struct mxcmci_host *host, u32 val, int reg) mxcmci_writel() argument 218 iowrite32be(val, host->base + reg); mxcmci_writel() 220 writel(val, host->base + reg); mxcmci_writel() 223 static inline u16 mxcmci_readw(struct mxcmci_host *host, int reg) mxcmci_readw() argument 226 return ioread32be(host->base + reg); mxcmci_readw() 228 return readw(host->base + reg); mxcmci_readw() 231 static inline void mxcmci_writew(struct mxcmci_host *host, u16 val, int reg) mxcmci_writew() argument 234 iowrite32be(val, host->base + reg); mxcmci_writew() 236 writew(val, host->base + reg); mxcmci_writew() 239 static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios); 241 static void mxcmci_set_power(struct mxcmci_host *host, unsigned int vdd) mxcmci_set_power() argument 243 if (!IS_ERR(host->mmc->supply.vmmc)) { mxcmci_set_power() 244 if (host->power_mode == MMC_POWER_UP) mxcmci_set_power() 245 mmc_regulator_set_ocr(host->mmc, mxcmci_set_power() 246 host->mmc->supply.vmmc, vdd); mxcmci_set_power() 247 else if (host->power_mode == MMC_POWER_OFF) mxcmci_set_power() 248 mmc_regulator_set_ocr(host->mmc, mxcmci_set_power() 249 host->mmc->supply.vmmc, 0); mxcmci_set_power() 252 if (host->pdata && host->pdata->setpower) mxcmci_set_power() 253 host->pdata->setpower(mmc_dev(host->mmc), vdd); mxcmci_set_power() 256 static inline int mxcmci_use_dma(struct mxcmci_host *host) mxcmci_use_dma() argument 258 return host->do_dma; mxcmci_use_dma() 261 static void mxcmci_softreset(struct mxcmci_host *host) mxcmci_softreset() argument 265 dev_dbg(mmc_dev(host->mmc), "mxcmci_softreset\n"); mxcmci_softreset() 268 mxcmci_writew(host, STR_STP_CLK_RESET, MMC_REG_STR_STP_CLK); mxcmci_softreset() 269 mxcmci_writew(host, STR_STP_CLK_RESET | STR_STP_CLK_START_CLK, mxcmci_softreset() 273 mxcmci_writew(host, STR_STP_CLK_START_CLK, MMC_REG_STR_STP_CLK); mxcmci_softreset() 275 mxcmci_writew(host, 0xff, MMC_REG_RES_TO); mxcmci_softreset() 301 static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) mxcmci_setup_data() argument 313 host->data = data; mxcmci_setup_data() 316 mxcmci_writew(host, nob, MMC_REG_NOB); mxcmci_setup_data() 317 mxcmci_writew(host, blksz, MMC_REG_BLK_LEN); mxcmci_setup_data() 318 host->datasize = datasize; mxcmci_setup_data() 320 if (!mxcmci_use_dma(host)) mxcmci_setup_data() 325 host->do_dma = 0; mxcmci_setup_data() 331 host->dma_dir = DMA_FROM_DEVICE; mxcmci_setup_data() 334 host->dma_dir = DMA_TO_DEVICE; mxcmci_setup_data() 340 nents = dma_map_sg(host->dma->device->dev, data->sg, mxcmci_setup_data() 341 data->sg_len, host->dma_dir); mxcmci_setup_data() 345 host->desc = dmaengine_prep_slave_sg(host->dma, mxcmci_setup_data() 349 if (!host->desc) { mxcmci_setup_data() 350 dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len, mxcmci_setup_data() 351 host->dma_dir); mxcmci_setup_data() 352 host->do_dma = 0; mxcmci_setup_data() 357 dmaengine_submit(host->desc); mxcmci_setup_data() 358 dma_async_issue_pending(host->dma); mxcmci_setup_data() 360 mod_timer(&host->watchdog, jiffies + msecs_to_jiffies(MXCMCI_TIMEOUT_MS)); mxcmci_setup_data() 365 static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat); 366 static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat); 370 struct mxcmci_host *host = data; mxcmci_dma_callback() local 373 del_timer(&host->watchdog); mxcmci_dma_callback() 375 stat = mxcmci_readl(host, MMC_REG_STATUS); mxcmci_dma_callback() 377 dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat); mxcmci_dma_callback() 379 mxcmci_data_done(host, stat); mxcmci_dma_callback() 382 static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd, mxcmci_start_cmd() argument 385 u32 int_cntr = host->default_irq_mask; mxcmci_start_cmd() 388 WARN_ON(host->cmd != NULL); mxcmci_start_cmd() 389 host->cmd = cmd; mxcmci_start_cmd() 405 dev_err(mmc_dev(host->mmc), "unhandled response type 0x%x\n", mxcmci_start_cmd() 413 if (mxcmci_use_dma(host)) { mxcmci_start_cmd() 414 if (host->dma_dir == DMA_FROM_DEVICE) { mxcmci_start_cmd() 415 host->desc->callback = mxcmci_dma_callback; mxcmci_start_cmd() 416 host->desc->callback_param = host; mxcmci_start_cmd() 422 spin_lock_irqsave(&host->lock, flags); mxcmci_start_cmd() 423 if (host->use_sdio) mxcmci_start_cmd() 425 mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR); mxcmci_start_cmd() 426 spin_unlock_irqrestore(&host->lock, flags); mxcmci_start_cmd() 428 mxcmci_writew(host, cmd->opcode, MMC_REG_CMD); mxcmci_start_cmd() 429 mxcmci_writel(host, cmd->arg, MMC_REG_ARG); mxcmci_start_cmd() 430 mxcmci_writew(host, cmdat, MMC_REG_CMD_DAT_CONT); mxcmci_start_cmd() 435 static void mxcmci_finish_request(struct mxcmci_host *host, mxcmci_finish_request() argument 438 u32 int_cntr = host->default_irq_mask; mxcmci_finish_request() 441 spin_lock_irqsave(&host->lock, flags); mxcmci_finish_request() 442 if (host->use_sdio) mxcmci_finish_request() 444 mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR); mxcmci_finish_request() 445 spin_unlock_irqrestore(&host->lock, flags); mxcmci_finish_request() 447 host->req = NULL; mxcmci_finish_request() 448 host->cmd = NULL; mxcmci_finish_request() 449 host->data = NULL; mxcmci_finish_request() 451 mmc_request_done(host->mmc, req); mxcmci_finish_request() 454 static int mxcmci_finish_data(struct mxcmci_host *host, unsigned int stat) mxcmci_finish_data() argument 456 struct mmc_data *data = host->data; mxcmci_finish_data() 459 if (mxcmci_use_dma(host)) { mxcmci_finish_data() 460 dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len, mxcmci_finish_data() 461 host->dma_dir); mxcmci_finish_data() 466 dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n", mxcmci_finish_data() 469 dev_err(mmc_dev(host->mmc), "%s: -EILSEQ\n", __func__); mxcmci_finish_data() 474 dev_err(mmc_dev(host->mmc), mxcmci_finish_data() 478 dev_err(mmc_dev(host->mmc), mxcmci_finish_data() 483 dev_err(mmc_dev(host->mmc), mxcmci_finish_data() 487 dev_err(mmc_dev(host->mmc), "%s: -EIO\n", __func__); mxcmci_finish_data() 491 data->bytes_xfered = host->datasize; mxcmci_finish_data() 496 host->data = NULL; mxcmci_finish_data() 501 static void mxcmci_read_response(struct mxcmci_host *host, unsigned int stat) mxcmci_read_response() argument 503 struct mmc_command *cmd = host->cmd; mxcmci_read_response() 511 dev_dbg(mmc_dev(host->mmc), "CMD TIMEOUT\n"); mxcmci_read_response() 514 dev_dbg(mmc_dev(host->mmc), "cmd crc error\n"); mxcmci_read_response() 521 a = mxcmci_readw(host, MMC_REG_RES_FIFO); mxcmci_read_response() 522 b = mxcmci_readw(host, MMC_REG_RES_FIFO); mxcmci_read_response() 526 a = mxcmci_readw(host, MMC_REG_RES_FIFO); mxcmci_read_response() 527 b = mxcmci_readw(host, MMC_REG_RES_FIFO); mxcmci_read_response() 528 c = mxcmci_readw(host, MMC_REG_RES_FIFO); mxcmci_read_response() 534 static int mxcmci_poll_status(struct mxcmci_host *host, u32 mask) mxcmci_poll_status() argument 540 stat = mxcmci_readl(host, MMC_REG_STATUS); mxcmci_poll_status() 544 mxcmci_softreset(host); mxcmci_poll_status() 545 mxcmci_set_clk_rate(host, host->clock); mxcmci_poll_status() 554 static int mxcmci_pull(struct mxcmci_host *host, void *_buf, int bytes) mxcmci_pull() argument 560 stat = mxcmci_poll_status(host, mxcmci_pull() 564 *buf++ = cpu_to_le32(mxcmci_readl(host, MMC_REG_BUFFER_ACCESS)); mxcmci_pull() 572 stat = mxcmci_poll_status(host, mxcmci_pull() 576 tmp = cpu_to_le32(mxcmci_readl(host, MMC_REG_BUFFER_ACCESS)); mxcmci_pull() 583 static int mxcmci_push(struct mxcmci_host *host, void *_buf, int bytes) mxcmci_push() argument 589 stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY); mxcmci_push() 592 mxcmci_writel(host, cpu_to_le32(*buf++), MMC_REG_BUFFER_ACCESS); mxcmci_push() 600 stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY); mxcmci_push() 605 mxcmci_writel(host, cpu_to_le32(tmp), MMC_REG_BUFFER_ACCESS); mxcmci_push() 608 stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY); mxcmci_push() 615 static int mxcmci_transfer_data(struct mxcmci_host *host) mxcmci_transfer_data() argument 617 struct mmc_data *data = host->req->data; mxcmci_transfer_data() 621 host->data = data; mxcmci_transfer_data() 622 host->datasize = 0; mxcmci_transfer_data() 626 stat = mxcmci_pull(host, sg_virt(sg), sg->length); mxcmci_transfer_data() 629 host->datasize += sg->length; mxcmci_transfer_data() 633 stat = mxcmci_push(host, sg_virt(sg), sg->length); mxcmci_transfer_data() 636 host->datasize += sg->length; mxcmci_transfer_data() 638 stat = mxcmci_poll_status(host, STATUS_WRITE_OP_DONE); mxcmci_transfer_data() 647 struct mxcmci_host *host = container_of(work, struct mxcmci_host, mxcmci_datawork() local 649 int datastat = mxcmci_transfer_data(host); mxcmci_datawork() 651 mxcmci_writel(host, STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE, mxcmci_datawork() 653 mxcmci_finish_data(host, datastat); mxcmci_datawork() 655 if (host->req->stop) { mxcmci_datawork() 656 if (mxcmci_start_cmd(host, host->req->stop, 0)) { mxcmci_datawork() 657 mxcmci_finish_request(host, host->req); mxcmci_datawork() 661 mxcmci_finish_request(host, host->req); mxcmci_datawork() 665 static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat) mxcmci_data_done() argument 671 spin_lock_irqsave(&host->lock, flags); mxcmci_data_done() 673 if (!host->data) { mxcmci_data_done() 674 spin_unlock_irqrestore(&host->lock, flags); mxcmci_data_done() 678 if (!host->req) { mxcmci_data_done() 679 spin_unlock_irqrestore(&host->lock, flags); mxcmci_data_done() 683 req = host->req; mxcmci_data_done() 685 host->req = NULL; /* we will handle finish req below */ mxcmci_data_done() 687 data_error = mxcmci_finish_data(host, stat); mxcmci_data_done() 689 spin_unlock_irqrestore(&host->lock, flags); mxcmci_data_done() 691 mxcmci_read_response(host, stat); mxcmci_data_done() 692 host->cmd = NULL; mxcmci_data_done() 695 if (mxcmci_start_cmd(host, req->stop, 0)) { mxcmci_data_done() 696 mxcmci_finish_request(host, req); mxcmci_data_done() 700 mxcmci_finish_request(host, req); mxcmci_data_done() 704 static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat) mxcmci_cmd_done() argument 706 mxcmci_read_response(host, stat); mxcmci_cmd_done() 707 host->cmd = NULL; mxcmci_cmd_done() 709 if (!host->data && host->req) { mxcmci_cmd_done() 710 mxcmci_finish_request(host, host->req); mxcmci_cmd_done() 718 if (!mxcmci_use_dma(host) && host->data) mxcmci_cmd_done() 719 schedule_work(&host->datawork); mxcmci_cmd_done() 725 struct mxcmci_host *host = devid; mxcmci_irq() local 730 stat = mxcmci_readl(host, MMC_REG_STATUS); mxcmci_irq() 731 mxcmci_writel(host, mxcmci_irq() 736 dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat); mxcmci_irq() 738 spin_lock_irqsave(&host->lock, flags); mxcmci_irq() 739 sdio_irq = (stat & STATUS_SDIO_INT_ACTIVE) && host->use_sdio; mxcmci_irq() 740 spin_unlock_irqrestore(&host->lock, flags); mxcmci_irq() 742 if (mxcmci_use_dma(host) && (stat & (STATUS_WRITE_OP_DONE))) mxcmci_irq() 743 mxcmci_writel(host, STATUS_WRITE_OP_DONE, MMC_REG_STATUS); mxcmci_irq() 746 mxcmci_writel(host, STATUS_SDIO_INT_ACTIVE, MMC_REG_STATUS); mxcmci_irq() 747 mmc_signal_sdio_irq(host->mmc); mxcmci_irq() 751 mxcmci_cmd_done(host, stat); mxcmci_irq() 753 if (mxcmci_use_dma(host) && (stat & STATUS_WRITE_OP_DONE)) { mxcmci_irq() 754 del_timer(&host->watchdog); mxcmci_irq() 755 mxcmci_data_done(host, stat); mxcmci_irq() 758 if (host->default_irq_mask && mxcmci_irq() 760 mmc_detect_change(host->mmc, msecs_to_jiffies(200)); mxcmci_irq() 767 struct mxcmci_host *host = mmc_priv(mmc); mxcmci_request() local 768 unsigned int cmdat = host->cmdat; mxcmci_request() 771 WARN_ON(host->req != NULL); mxcmci_request() 773 host->req = req; mxcmci_request() 774 host->cmdat &= ~CMD_DAT_CONT_INIT; mxcmci_request() 776 if (host->dma) mxcmci_request() 777 host->do_dma = 1; mxcmci_request() 780 error = mxcmci_setup_data(host, req->data); mxcmci_request() 793 error = mxcmci_start_cmd(host, req->cmd, cmdat); mxcmci_request() 797 mxcmci_finish_request(host, req); mxcmci_request() 800 static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios) mxcmci_set_clk_rate() argument 804 unsigned int clk_in = clk_get_rate(host->clk_per); mxcmci_set_clk_rate() 827 mxcmci_writew(host, (prescaler << 4) | divider, MMC_REG_CLK_RATE); mxcmci_set_clk_rate() 829 dev_dbg(mmc_dev(host->mmc), "scaler: %d divider: %d in: %d out: %d\n", mxcmci_set_clk_rate() 835 struct mxcmci_host *host = mmc_priv(mmc); mxcmci_setup_dma() local 836 struct dma_slave_config *config = &host->dma_slave_config; mxcmci_setup_dma() 838 config->dst_addr = host->phys_base + MMC_REG_BUFFER_ACCESS; mxcmci_setup_dma() 839 config->src_addr = host->phys_base + MMC_REG_BUFFER_ACCESS; mxcmci_setup_dma() 842 config->dst_maxburst = host->burstlen; mxcmci_setup_dma() 843 config->src_maxburst = host->burstlen; mxcmci_setup_dma() 846 return dmaengine_slave_config(host->dma, config); mxcmci_setup_dma() 851 struct mxcmci_host *host = mmc_priv(mmc); mxcmci_set_ios() local 863 if (mxcmci_use_dma(host) && burstlen != host->burstlen) { mxcmci_set_ios() 864 host->burstlen = burstlen; mxcmci_set_ios() 867 dev_err(mmc_dev(host->mmc), mxcmci_set_ios() 869 dma_release_channel(host->dma); mxcmci_set_ios() 870 host->do_dma = 0; mxcmci_set_ios() 871 host->dma = NULL; mxcmci_set_ios() 876 host->cmdat |= CMD_DAT_CONT_BUS_WIDTH_4; mxcmci_set_ios() 878 host->cmdat &= ~CMD_DAT_CONT_BUS_WIDTH_4; mxcmci_set_ios() 880 if (host->power_mode != ios->power_mode) { mxcmci_set_ios() 881 host->power_mode = ios->power_mode; mxcmci_set_ios() 882 mxcmci_set_power(host, ios->vdd); mxcmci_set_ios() 885 host->cmdat |= CMD_DAT_CONT_INIT; mxcmci_set_ios() 889 mxcmci_set_clk_rate(host, ios->clock); mxcmci_set_ios() 890 mxcmci_writew(host, STR_STP_CLK_START_CLK, MMC_REG_STR_STP_CLK); mxcmci_set_ios() 892 mxcmci_writew(host, STR_STP_CLK_STOP_CLK, MMC_REG_STR_STP_CLK); mxcmci_set_ios() 895 host->clock = ios->clock; mxcmci_set_ios() 910 struct mxcmci_host *host = mmc_priv(mmc); mxcmci_get_ro() local 912 if (host->pdata && host->pdata->get_ro) mxcmci_get_ro() 913 return !!host->pdata->get_ro(mmc_dev(mmc)); mxcmci_get_ro() 924 struct mxcmci_host *host = mmc_priv(mmc); mxcmci_enable_sdio_irq() local 928 spin_lock_irqsave(&host->lock, flags); mxcmci_enable_sdio_irq() 929 host->use_sdio = enable; mxcmci_enable_sdio_irq() 930 int_cntr = mxcmci_readl(host, MMC_REG_INT_CNTR); mxcmci_enable_sdio_irq() 937 mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR); mxcmci_enable_sdio_irq() 938 spin_unlock_irqrestore(&host->lock, flags); mxcmci_enable_sdio_irq() 941 static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card) mxcmci_init_card() argument 943 struct mxcmci_host *mxcmci = mmc_priv(host); mxcmci_init_card() 953 host->caps &= ~MMC_CAP_4_BIT_DATA; mxcmci_init_card() 955 host->caps |= MMC_CAP_4_BIT_DATA; mxcmci_init_card() 960 struct mxcmci_host *host = param; filter() local 965 chan->private = &host->dma_data; filter() 973 struct mxcmci_host *host = mmc_priv(mmc); mxcmci_watchdog() local 974 struct mmc_request *req = host->req; mxcmci_watchdog() 975 unsigned int stat = mxcmci_readl(host, MMC_REG_STATUS); mxcmci_watchdog() 977 if (host->dma_dir == DMA_FROM_DEVICE) { mxcmci_watchdog() 978 dmaengine_terminate_all(host->dma); mxcmci_watchdog() 979 dev_err(mmc_dev(host->mmc), mxcmci_watchdog() 983 dev_err(mmc_dev(host->mmc), mxcmci_watchdog() 986 mxcmci_softreset(host); mxcmci_watchdog() 991 if (host->data) mxcmci_watchdog() 992 host->data->error = -ETIMEDOUT; mxcmci_watchdog() 993 host->req = NULL; mxcmci_watchdog() 994 host->cmd = NULL; mxcmci_watchdog() 995 host->data = NULL; mxcmci_watchdog() 996 mmc_request_done(host->mmc, req); mxcmci_watchdog() 1010 struct mxcmci_host *host; mxcmci_probe() local 1027 mmc = mmc_alloc_host(sizeof(*host), &pdev->dev); mxcmci_probe() 1031 host = mmc_priv(mmc); mxcmci_probe() 1033 host->base = devm_ioremap_resource(&pdev->dev, res); mxcmci_probe() 1034 if (IS_ERR(host->base)) { mxcmci_probe() 1035 ret = PTR_ERR(host->base); mxcmci_probe() 1039 host->phys_base = res->start; mxcmci_probe() 1060 host->devtype = id_entry->driver_data; mxcmci_probe() 1062 host->devtype = pdev->id_entry->driver_data; mxcmci_probe() 1066 if (!is_mpc512x_mmc(host)) mxcmci_probe() 1069 host->mmc = mmc; mxcmci_probe() 1070 host->pdata = pdata; mxcmci_probe() 1071 spin_lock_init(&host->lock); mxcmci_probe() 1091 host->default_irq_mask = mxcmci_probe() 1094 host->default_irq_mask = 0; mxcmci_probe() 1096 host->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); mxcmci_probe() 1097 if (IS_ERR(host->clk_ipg)) { mxcmci_probe() 1098 ret = PTR_ERR(host->clk_ipg); mxcmci_probe() 1102 host->clk_per = devm_clk_get(&pdev->dev, "per"); mxcmci_probe() 1103 if (IS_ERR(host->clk_per)) { mxcmci_probe() 1104 ret = PTR_ERR(host->clk_per); mxcmci_probe() 1108 clk_prepare_enable(host->clk_per); mxcmci_probe() 1109 clk_prepare_enable(host->clk_ipg); mxcmci_probe() 1111 mxcmci_softreset(host); mxcmci_probe() 1113 host->rev_no = mxcmci_readw(host, MMC_REG_REV_NO); mxcmci_probe() 1114 if (host->rev_no != 0x400) { mxcmci_probe() 1116 dev_err(mmc_dev(host->mmc), "wrong rev.no. 0x%08x. aborting.\n", mxcmci_probe() 1117 host->rev_no); mxcmci_probe() 1121 mmc->f_min = clk_get_rate(host->clk_per) >> 16; mxcmci_probe() 1122 mmc->f_max = clk_get_rate(host->clk_per) >> 1; mxcmci_probe() 1125 mxcmci_writew(host, 0x2db4, MMC_REG_READ_TO); mxcmci_probe() 1127 mxcmci_writel(host, host->default_irq_mask, MMC_REG_INT_CNTR); mxcmci_probe() 1129 if (!host->pdata) { mxcmci_probe() 1130 host->dma = dma_request_slave_channel(&pdev->dev, "rx-tx"); mxcmci_probe() 1134 host->dmareq = res->start; mxcmci_probe() 1135 host->dma_data.peripheral_type = IMX_DMATYPE_SDHC; mxcmci_probe() 1136 host->dma_data.priority = DMA_PRIO_LOW; mxcmci_probe() 1137 host->dma_data.dma_request = host->dmareq; mxcmci_probe() 1140 host->dma = dma_request_channel(mask, filter, host); mxcmci_probe() 1143 if (host->dma) mxcmci_probe() 1145 host->dma->device->dev); mxcmci_probe() 1147 dev_info(mmc_dev(host->mmc), "dma not available. Using PIO\n"); mxcmci_probe() 1149 INIT_WORK(&host->datawork, mxcmci_datawork); mxcmci_probe() 1152 dev_name(&pdev->dev), host); mxcmci_probe() 1158 if (host->pdata && host->pdata->init) { mxcmci_probe() 1159 ret = host->pdata->init(&pdev->dev, mxcmci_detect_irq, mxcmci_probe() 1160 host->mmc); mxcmci_probe() 1165 init_timer(&host->watchdog); mxcmci_probe() 1166 host->watchdog.function = &mxcmci_watchdog; mxcmci_probe() 1167 host->watchdog.data = (unsigned long)mmc; mxcmci_probe() 1174 if (host->dma) mxcmci_probe() 1175 dma_release_channel(host->dma); mxcmci_probe() 1178 clk_disable_unprepare(host->clk_per); mxcmci_probe() 1179 clk_disable_unprepare(host->clk_ipg); mxcmci_probe() 1190 struct mxcmci_host *host = mmc_priv(mmc); mxcmci_remove() local 1194 if (host->pdata && host->pdata->exit) mxcmci_remove() 1195 host->pdata->exit(&pdev->dev, mmc); mxcmci_remove() 1197 if (host->dma) mxcmci_remove() 1198 dma_release_channel(host->dma); mxcmci_remove() 1200 clk_disable_unprepare(host->clk_per); mxcmci_remove() 1201 clk_disable_unprepare(host->clk_ipg); mxcmci_remove() 1211 struct mxcmci_host *host = mmc_priv(mmc); mxcmci_suspend() local 1213 clk_disable_unprepare(host->clk_per); mxcmci_suspend() 1214 clk_disable_unprepare(host->clk_ipg); mxcmci_suspend() 1221 struct mxcmci_host *host = mmc_priv(mmc); mxcmci_resume() local 1223 clk_prepare_enable(host->clk_per); mxcmci_resume() 1224 clk_prepare_enable(host->clk_ipg); mxcmci_resume()
|
H A D | wbsd.c | 2 * linux/drivers/mmc/host/wbsd.c - Winbond W83L51xD SD/MMC driver 35 #include <linux/mmc/host.h> 87 static inline void wbsd_unlock_config(struct wbsd_host *host) wbsd_unlock_config() argument 89 BUG_ON(host->config == 0); wbsd_unlock_config() 91 outb(host->unlock_code, host->config); wbsd_unlock_config() 92 outb(host->unlock_code, host->config); wbsd_unlock_config() 95 static inline void wbsd_lock_config(struct wbsd_host *host) wbsd_lock_config() argument 97 BUG_ON(host->config == 0); wbsd_lock_config() 99 outb(LOCK_CODE, host->config); wbsd_lock_config() 102 static inline void wbsd_write_config(struct wbsd_host *host, u8 reg, u8 value) wbsd_write_config() argument 104 BUG_ON(host->config == 0); wbsd_write_config() 106 outb(reg, host->config); wbsd_write_config() 107 outb(value, host->config + 1); wbsd_write_config() 110 static inline u8 wbsd_read_config(struct wbsd_host *host, u8 reg) wbsd_read_config() argument 112 BUG_ON(host->config == 0); wbsd_read_config() 114 outb(reg, host->config); wbsd_read_config() 115 return inb(host->config + 1); wbsd_read_config() 118 static inline void wbsd_write_index(struct wbsd_host *host, u8 index, u8 value) wbsd_write_index() argument 120 outb(index, host->base + WBSD_IDXR); wbsd_write_index() 121 outb(value, host->base + WBSD_DATAR); wbsd_write_index() 124 static inline u8 wbsd_read_index(struct wbsd_host *host, u8 index) wbsd_read_index() argument 126 outb(index, host->base + WBSD_IDXR); wbsd_read_index() 127 return inb(host->base + WBSD_DATAR); wbsd_read_index() 134 static void wbsd_init_device(struct wbsd_host *host) wbsd_init_device() argument 141 setup = wbsd_read_index(host, WBSD_IDX_SETUP); wbsd_init_device() 143 wbsd_write_index(host, WBSD_IDX_SETUP, setup); wbsd_init_device() 149 wbsd_write_index(host, WBSD_IDX_SETUP, setup); wbsd_init_device() 150 host->flags &= ~WBSD_FIGNORE_DETECT; wbsd_init_device() 155 host->clk = wbsd_read_index(host, WBSD_IDX_CLK); wbsd_init_device() 160 outb(WBSD_POWER_N, host->base + WBSD_CSR); wbsd_init_device() 165 wbsd_write_index(host, WBSD_IDX_TAAC, 0x7F); wbsd_init_device() 170 if (inb(host->base + WBSD_CSR) & WBSD_CARDPRESENT) wbsd_init_device() 171 host->flags |= WBSD_FCARD_PRESENT; wbsd_init_device() 173 host->flags &= ~WBSD_FCARD_PRESENT; wbsd_init_device() 185 outb(ier, host->base + WBSD_EIR); wbsd_init_device() 190 inb(host->base + WBSD_ISR); wbsd_init_device() 193 static void wbsd_reset(struct wbsd_host *host) wbsd_reset() argument 197 pr_err("%s: Resetting chip\n", mmc_hostname(host->mmc)); wbsd_reset() 202 setup = wbsd_read_index(host, WBSD_IDX_SETUP); wbsd_reset() 204 wbsd_write_index(host, WBSD_IDX_SETUP, setup); wbsd_reset() 207 static void wbsd_request_end(struct wbsd_host *host, struct mmc_request *mrq) wbsd_request_end() argument 211 if (host->dma >= 0) { wbsd_request_end() 216 disable_dma(host->dma); wbsd_request_end() 217 clear_dma_ff(host->dma); wbsd_request_end() 221 * Disable DMA on host. wbsd_request_end() 223 wbsd_write_index(host, WBSD_IDX_DMA, 0); wbsd_request_end() 226 host->mrq = NULL; wbsd_request_end() 231 spin_unlock(&host->lock); wbsd_request_end() 232 mmc_request_done(host->mmc, mrq); wbsd_request_end() 233 spin_lock(&host->lock); wbsd_request_end() 240 static inline void wbsd_init_sg(struct wbsd_host *host, struct mmc_data *data) wbsd_init_sg() argument 245 host->cur_sg = data->sg; wbsd_init_sg() 246 host->num_sg = data->sg_len; wbsd_init_sg() 248 host->offset = 0; wbsd_init_sg() 249 host->remain = host->cur_sg->length; wbsd_init_sg() 252 static inline int wbsd_next_sg(struct wbsd_host *host) wbsd_next_sg() argument 257 host->cur_sg++; wbsd_next_sg() 258 host->num_sg--; wbsd_next_sg() 263 if (host->num_sg > 0) { wbsd_next_sg() 264 host->offset = 0; wbsd_next_sg() 265 host->remain = host->cur_sg->length; wbsd_next_sg() 268 return host->num_sg; wbsd_next_sg() 271 static inline char *wbsd_sg_to_buffer(struct wbsd_host *host) wbsd_sg_to_buffer() argument 273 return sg_virt(host->cur_sg); wbsd_sg_to_buffer() 276 static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data) wbsd_sg_to_dma() argument 280 char *dmabuf = host->dma_buffer; wbsd_sg_to_dma() 293 static inline void wbsd_dma_to_sg(struct wbsd_host *host, struct mmc_data *data) wbsd_dma_to_sg() argument 297 char *dmabuf = host->dma_buffer; wbsd_dma_to_sg() 314 static inline void wbsd_get_short_reply(struct wbsd_host *host, wbsd_get_short_reply() argument 320 if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_SHORT) { wbsd_get_short_reply() 325 cmd->resp[0] = wbsd_read_index(host, WBSD_IDX_RESP12) << 24; wbsd_get_short_reply() 326 cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP13) << 16; wbsd_get_short_reply() 327 cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP14) << 8; wbsd_get_short_reply() 328 cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP15) << 0; wbsd_get_short_reply() 329 cmd->resp[1] = wbsd_read_index(host, WBSD_IDX_RESP16) << 24; wbsd_get_short_reply() 332 static inline void wbsd_get_long_reply(struct wbsd_host *host, wbsd_get_long_reply() argument 340 if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_LONG) { wbsd_get_long_reply() 347 wbsd_read_index(host, WBSD_IDX_RESP1 + i * 4) << 24; wbsd_get_long_reply() 349 wbsd_read_index(host, WBSD_IDX_RESP2 + i * 4) << 16; wbsd_get_long_reply() 351 wbsd_read_index(host, WBSD_IDX_RESP3 + i * 4) << 8; wbsd_get_long_reply() 353 wbsd_read_index(host, WBSD_IDX_RESP4 + i * 4) << 0; wbsd_get_long_reply() 357 static void wbsd_send_command(struct wbsd_host *host, struct mmc_command *cmd) wbsd_send_command() argument 367 host->isr = 0; wbsd_send_command() 370 * Send the command (CRC calculated by host). wbsd_send_command() 372 outb(cmd->opcode, host->base + WBSD_CMDR); wbsd_send_command() 374 outb((cmd->arg >> (i * 8)) & 0xff, host->base + WBSD_CMDR); wbsd_send_command() 382 status = wbsd_read_index(host, WBSD_IDX_STATUS); wbsd_send_command() 392 isr = host->isr; wbsd_send_command() 406 wbsd_get_long_reply(host, cmd); wbsd_send_command() 408 wbsd_get_short_reply(host, cmd); wbsd_send_command() 417 static void wbsd_empty_fifo(struct wbsd_host *host) wbsd_empty_fifo() argument 419 struct mmc_data *data = host->mrq->cmd->data; wbsd_empty_fifo() 426 if (host->num_sg == 0) wbsd_empty_fifo() 429 buffer = wbsd_sg_to_buffer(host) + host->offset; wbsd_empty_fifo() 435 while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_EMPTY)) { wbsd_empty_fifo() 448 *buffer = inb(host->base + WBSD_DFR); wbsd_empty_fifo() 450 host->offset++; wbsd_empty_fifo() 451 host->remain--; wbsd_empty_fifo() 458 if (host->remain == 0) { wbsd_empty_fifo() 462 if (!wbsd_next_sg(host)) wbsd_empty_fifo() 465 buffer = wbsd_sg_to_buffer(host); wbsd_empty_fifo() 476 tasklet_schedule(&host->fifo_tasklet); wbsd_empty_fifo() 479 static void wbsd_fill_fifo(struct wbsd_host *host) wbsd_fill_fifo() argument 481 struct mmc_data *data = host->mrq->cmd->data; wbsd_fill_fifo() 489 if (host->num_sg == 0) wbsd_fill_fifo() 492 buffer = wbsd_sg_to_buffer(host) + host->offset; wbsd_fill_fifo() 498 while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_FULL)) { wbsd_fill_fifo() 511 outb(*buffer, host->base + WBSD_DFR); wbsd_fill_fifo() 513 host->offset++; wbsd_fill_fifo() 514 host->remain--; wbsd_fill_fifo() 521 if (host->remain == 0) { wbsd_fill_fifo() 525 if (!wbsd_next_sg(host)) wbsd_fill_fifo() 528 buffer = wbsd_sg_to_buffer(host); wbsd_fill_fifo() 538 tasklet_schedule(&host->fifo_tasklet); wbsd_fill_fifo() 541 static void wbsd_prepare_data(struct wbsd_host *host, struct mmc_data *data) wbsd_prepare_data() argument 558 wbsd_write_index(host, WBSD_IDX_TAAC, 127); wbsd_prepare_data() 560 wbsd_write_index(host, WBSD_IDX_TAAC, wbsd_prepare_data() 565 wbsd_write_index(host, WBSD_IDX_NSAC, 255); wbsd_prepare_data() 567 wbsd_write_index(host, WBSD_IDX_NSAC, data->timeout_clks); wbsd_prepare_data() 577 if (host->bus_width == MMC_BUS_WIDTH_1) { wbsd_prepare_data() 580 wbsd_write_index(host, WBSD_IDX_PBSMSB, (blksize >> 4) & 0xF0); wbsd_prepare_data() 581 wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF); wbsd_prepare_data() 582 } else if (host->bus_width == MMC_BUS_WIDTH_4) { wbsd_prepare_data() 585 wbsd_write_index(host, WBSD_IDX_PBSMSB, wbsd_prepare_data() 587 wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF); wbsd_prepare_data() 598 setup = wbsd_read_index(host, WBSD_IDX_SETUP); wbsd_prepare_data() 600 wbsd_write_index(host, WBSD_IDX_SETUP, setup); wbsd_prepare_data() 605 if (host->dma >= 0) { wbsd_prepare_data() 620 wbsd_sg_to_dma(host, data); wbsd_prepare_data() 626 disable_dma(host->dma); wbsd_prepare_data() 627 clear_dma_ff(host->dma); wbsd_prepare_data() 629 set_dma_mode(host->dma, DMA_MODE_READ & ~0x40); wbsd_prepare_data() 631 set_dma_mode(host->dma, DMA_MODE_WRITE & ~0x40); wbsd_prepare_data() 632 set_dma_addr(host->dma, host->dma_addr); wbsd_prepare_data() 633 set_dma_count(host->dma, size); wbsd_prepare_data() 635 enable_dma(host->dma); wbsd_prepare_data() 639 * Enable DMA on the host. wbsd_prepare_data() 641 wbsd_write_index(host, WBSD_IDX_DMA, WBSD_DMA_ENABLE); wbsd_prepare_data() 647 host->firsterr = 1; wbsd_prepare_data() 652 wbsd_init_sg(host, data); wbsd_prepare_data() 657 wbsd_write_index(host, WBSD_IDX_DMA, 0); wbsd_prepare_data() 664 wbsd_write_index(host, WBSD_IDX_FIFOEN, wbsd_prepare_data() 667 wbsd_write_index(host, WBSD_IDX_FIFOEN, wbsd_prepare_data() 669 wbsd_fill_fifo(host); wbsd_prepare_data() 676 static void wbsd_finish_data(struct wbsd_host *host, struct mmc_data *data) wbsd_finish_data() argument 682 WARN_ON(host->mrq == NULL); wbsd_finish_data() 688 wbsd_send_command(host, data->stop); wbsd_finish_data() 695 status = wbsd_read_index(host, WBSD_IDX_STATUS); wbsd_finish_data() 701 if (host->dma >= 0) { wbsd_finish_data() 703 * Disable DMA on the host. wbsd_finish_data() 705 wbsd_write_index(host, WBSD_IDX_DMA, 0); wbsd_finish_data() 711 disable_dma(host->dma); wbsd_finish_data() 712 clear_dma_ff(host->dma); wbsd_finish_data() 713 count = get_dma_residue(host->dma); wbsd_finish_data() 716 data->bytes_xfered = host->mrq->data->blocks * wbsd_finish_data() 717 host->mrq->data->blksz - count; wbsd_finish_data() 726 mmc_hostname(host->mmc), count); wbsd_finish_data() 736 wbsd_dma_to_sg(host, data); wbsd_finish_data() 745 wbsd_request_end(host, host->mrq); wbsd_finish_data() 756 struct wbsd_host *host = mmc_priv(mmc); wbsd_request() local 762 spin_lock_bh(&host->lock); wbsd_request() 764 BUG_ON(host->mrq != NULL); wbsd_request() 768 host->mrq = mrq; wbsd_request() 773 if (!(host->flags & WBSD_FCARD_PRESENT)) { wbsd_request() 807 mmc_hostname(host->mmc), cmd->opcode); wbsd_request() 819 wbsd_prepare_data(host, cmd->data); wbsd_request() 825 wbsd_send_command(host, cmd); wbsd_request() 836 if (host->dma == -1) wbsd_request() 837 tasklet_schedule(&host->fifo_tasklet); wbsd_request() 839 spin_unlock_bh(&host->lock); wbsd_request() 845 wbsd_request_end(host, mrq); wbsd_request() 847 spin_unlock_bh(&host->lock); wbsd_request() 852 struct wbsd_host *host = mmc_priv(mmc); wbsd_set_ios() local 855 spin_lock_bh(&host->lock); wbsd_set_ios() 862 wbsd_init_device(host); wbsd_set_ios() 877 if (clk != host->clk) { wbsd_set_ios() 878 wbsd_write_index(host, WBSD_IDX_CLK, clk); wbsd_set_ios() 879 host->clk = clk; wbsd_set_ios() 886 pwr = inb(host->base + WBSD_CSR); wbsd_set_ios() 888 outb(pwr, host->base + WBSD_CSR); wbsd_set_ios() 896 setup = wbsd_read_index(host, WBSD_IDX_SETUP); wbsd_set_ios() 900 host->flags |= WBSD_FIGNORE_DETECT; wbsd_set_ios() 909 mod_timer(&host->ignore_timer, jiffies + HZ / 100); wbsd_set_ios() 912 wbsd_write_index(host, WBSD_IDX_SETUP, setup); wbsd_set_ios() 918 host->bus_width = ios->bus_width; wbsd_set_ios() 920 spin_unlock_bh(&host->lock); wbsd_set_ios() 925 struct wbsd_host *host = mmc_priv(mmc); wbsd_get_ro() local 928 spin_lock_bh(&host->lock); wbsd_get_ro() 930 csr = inb(host->base + WBSD_CSR); wbsd_get_ro() 932 outb(csr, host->base + WBSD_CSR); wbsd_get_ro() 936 csr = inb(host->base + WBSD_CSR); wbsd_get_ro() 938 outb(csr, host->base + WBSD_CSR); wbsd_get_ro() 940 spin_unlock_bh(&host->lock); wbsd_get_ro() 963 struct wbsd_host *host = (struct wbsd_host *)data; wbsd_reset_ignore() local 965 BUG_ON(host == NULL); wbsd_reset_ignore() 969 spin_lock_bh(&host->lock); wbsd_reset_ignore() 971 host->flags &= ~WBSD_FIGNORE_DETECT; wbsd_reset_ignore() 977 tasklet_schedule(&host->card_tasklet); wbsd_reset_ignore() 979 spin_unlock_bh(&host->lock); wbsd_reset_ignore() 986 static inline struct mmc_data *wbsd_get_data(struct wbsd_host *host) wbsd_get_data() argument 988 WARN_ON(!host->mrq); wbsd_get_data() 989 if (!host->mrq) wbsd_get_data() 992 WARN_ON(!host->mrq->cmd); wbsd_get_data() 993 if (!host->mrq->cmd) wbsd_get_data() 996 WARN_ON(!host->mrq->cmd->data); wbsd_get_data() 997 if (!host->mrq->cmd->data) wbsd_get_data() 1000 return host->mrq->cmd->data; wbsd_get_data() 1005 struct wbsd_host *host = (struct wbsd_host *)param; wbsd_tasklet_card() local 1009 spin_lock(&host->lock); wbsd_tasklet_card() 1011 if (host->flags & WBSD_FIGNORE_DETECT) { wbsd_tasklet_card() 1012 spin_unlock(&host->lock); wbsd_tasklet_card() 1016 csr = inb(host->base + WBSD_CSR); wbsd_tasklet_card() 1020 if (!(host->flags & WBSD_FCARD_PRESENT)) { wbsd_tasklet_card() 1022 host->flags |= WBSD_FCARD_PRESENT; wbsd_tasklet_card() 1026 } else if (host->flags & WBSD_FCARD_PRESENT) { wbsd_tasklet_card() 1028 host->flags &= ~WBSD_FCARD_PRESENT; wbsd_tasklet_card() 1030 if (host->mrq) { wbsd_tasklet_card() 1032 mmc_hostname(host->mmc)); wbsd_tasklet_card() 1033 wbsd_reset(host); wbsd_tasklet_card() 1035 host->mrq->cmd->error = -ENOMEDIUM; wbsd_tasklet_card() 1036 tasklet_schedule(&host->finish_tasklet); wbsd_tasklet_card() 1046 spin_unlock(&host->lock); wbsd_tasklet_card() 1049 mmc_detect_change(host->mmc, msecs_to_jiffies(delay)); wbsd_tasklet_card() 1054 struct wbsd_host *host = (struct wbsd_host *)param; wbsd_tasklet_fifo() local 1057 spin_lock(&host->lock); wbsd_tasklet_fifo() 1059 if (!host->mrq) wbsd_tasklet_fifo() 1062 data = wbsd_get_data(host); wbsd_tasklet_fifo() 1067 wbsd_fill_fifo(host); wbsd_tasklet_fifo() 1069 wbsd_empty_fifo(host); wbsd_tasklet_fifo() 1074 if (host->num_sg == 0) { wbsd_tasklet_fifo() 1075 wbsd_write_index(host, WBSD_IDX_FIFOEN, 0); wbsd_tasklet_fifo() 1076 tasklet_schedule(&host->finish_tasklet); wbsd_tasklet_fifo() 1080 spin_unlock(&host->lock); wbsd_tasklet_fifo() 1085 struct wbsd_host *host = (struct wbsd_host *)param; wbsd_tasklet_crc() local 1088 spin_lock(&host->lock); wbsd_tasklet_crc() 1090 if (!host->mrq) wbsd_tasklet_crc() 1093 data = wbsd_get_data(host); wbsd_tasklet_crc() 1101 tasklet_schedule(&host->finish_tasklet); wbsd_tasklet_crc() 1104 spin_unlock(&host->lock); wbsd_tasklet_crc() 1109 struct wbsd_host *host = (struct wbsd_host *)param; wbsd_tasklet_timeout() local 1112 spin_lock(&host->lock); wbsd_tasklet_timeout() 1114 if (!host->mrq) wbsd_tasklet_timeout() 1117 data = wbsd_get_data(host); wbsd_tasklet_timeout() 1125 tasklet_schedule(&host->finish_tasklet); wbsd_tasklet_timeout() 1128 spin_unlock(&host->lock); wbsd_tasklet_timeout() 1133 struct wbsd_host *host = (struct wbsd_host *)param; wbsd_tasklet_finish() local 1136 spin_lock(&host->lock); wbsd_tasklet_finish() 1138 WARN_ON(!host->mrq); wbsd_tasklet_finish() 1139 if (!host->mrq) wbsd_tasklet_finish() 1142 data = wbsd_get_data(host); wbsd_tasklet_finish() 1146 wbsd_finish_data(host, data); wbsd_tasklet_finish() 1149 spin_unlock(&host->lock); wbsd_tasklet_finish() 1158 struct wbsd_host *host = dev_id; wbsd_irq() local 1161 isr = inb(host->base + WBSD_ISR); wbsd_irq() 1169 host->isr |= isr; wbsd_irq() 1175 tasklet_schedule(&host->card_tasklet); wbsd_irq() 1177 tasklet_schedule(&host->fifo_tasklet); wbsd_irq() 1179 tasklet_hi_schedule(&host->crc_tasklet); wbsd_irq() 1181 tasklet_hi_schedule(&host->timeout_tasklet); wbsd_irq() 1183 tasklet_schedule(&host->finish_tasklet); wbsd_irq() 1201 struct wbsd_host *host; wbsd_alloc_mmc() local 1210 host = mmc_priv(mmc); wbsd_alloc_mmc() 1211 host->mmc = mmc; wbsd_alloc_mmc() 1213 host->dma = -1; wbsd_alloc_mmc() 1216 * Set host parameters. wbsd_alloc_mmc() 1224 spin_lock_init(&host->lock); wbsd_alloc_mmc() 1229 init_timer(&host->ignore_timer); wbsd_alloc_mmc() 1230 host->ignore_timer.data = (unsigned long)host; wbsd_alloc_mmc() 1231 host->ignore_timer.function = wbsd_reset_ignore; wbsd_alloc_mmc() 1270 struct wbsd_host *host; wbsd_free_mmc() local 1276 host = mmc_priv(mmc); wbsd_free_mmc() 1277 BUG_ON(host == NULL); wbsd_free_mmc() 1279 del_timer_sync(&host->ignore_timer); wbsd_free_mmc() 1290 static int wbsd_scan(struct wbsd_host *host) wbsd_scan() argument 1306 host->config = config_ports[i]; wbsd_scan() 1307 host->unlock_code = unlock_codes[j]; wbsd_scan() 1309 wbsd_unlock_config(host); wbsd_scan() 1317 wbsd_lock_config(host); wbsd_scan() 1321 host->chip_id = id; wbsd_scan() 1336 host->config = 0; wbsd_scan() 1337 host->unlock_code = 0; wbsd_scan() 1346 static int wbsd_request_region(struct wbsd_host *host, int base) wbsd_request_region() argument 1354 host->base = base; wbsd_request_region() 1359 static void wbsd_release_regions(struct wbsd_host *host) wbsd_release_regions() argument 1361 if (host->base) wbsd_release_regions() 1362 release_region(host->base, 8); wbsd_release_regions() 1364 host->base = 0; wbsd_release_regions() 1366 if (host->config) wbsd_release_regions() 1367 release_region(host->config, 2); wbsd_release_regions() 1369 host->config = 0; wbsd_release_regions() 1376 static void wbsd_request_dma(struct wbsd_host *host, int dma) wbsd_request_dma() argument 1388 host->dma_buffer = kmalloc(WBSD_DMA_SIZE, wbsd_request_dma() 1390 if (!host->dma_buffer) wbsd_request_dma() 1396 host->dma_addr = dma_map_single(mmc_dev(host->mmc), host->dma_buffer, wbsd_request_dma() 1402 if ((host->dma_addr & 0xffff) != 0) wbsd_request_dma() 1407 else if (host->dma_addr >= 0x1000000) wbsd_request_dma() 1410 host->dma = dma; wbsd_request_dma() 1420 dma_unmap_single(mmc_dev(host->mmc), host->dma_addr, wbsd_request_dma() 1422 host->dma_addr = 0; wbsd_request_dma() 1424 kfree(host->dma_buffer); wbsd_request_dma() 1425 host->dma_buffer = NULL; wbsd_request_dma() 1435 static void wbsd_release_dma(struct wbsd_host *host) wbsd_release_dma() argument 1437 if (host->dma_addr) { wbsd_release_dma() 1438 dma_unmap_single(mmc_dev(host->mmc), host->dma_addr, wbsd_release_dma() 1441 kfree(host->dma_buffer); wbsd_release_dma() 1442 if (host->dma >= 0) wbsd_release_dma() 1443 free_dma(host->dma); wbsd_release_dma() 1445 host->dma = -1; wbsd_release_dma() 1446 host->dma_buffer = NULL; wbsd_release_dma() 1447 host->dma_addr = 0; wbsd_release_dma() 1454 static int wbsd_request_irq(struct wbsd_host *host, int irq) wbsd_request_irq() argument 1461 tasklet_init(&host->card_tasklet, wbsd_tasklet_card, wbsd_request_irq() 1462 (unsigned long)host); wbsd_request_irq() 1463 tasklet_init(&host->fifo_tasklet, wbsd_tasklet_fifo, wbsd_request_irq() 1464 (unsigned long)host); wbsd_request_irq() 1465 tasklet_init(&host->crc_tasklet, wbsd_tasklet_crc, wbsd_request_irq() 1466 (unsigned long)host); wbsd_request_irq() 1467 tasklet_init(&host->timeout_tasklet, wbsd_tasklet_timeout, wbsd_request_irq() 1468 (unsigned long)host); wbsd_request_irq() 1469 tasklet_init(&host->finish_tasklet, wbsd_tasklet_finish, wbsd_request_irq() 1470 (unsigned long)host); wbsd_request_irq() 1475 ret = request_irq(irq, wbsd_irq, IRQF_SHARED, DRIVER_NAME, host); wbsd_request_irq() 1479 host->irq = irq; wbsd_request_irq() 1484 static void wbsd_release_irq(struct wbsd_host *host) wbsd_release_irq() argument 1486 if (!host->irq) wbsd_release_irq() 1489 free_irq(host->irq, host); wbsd_release_irq() 1491 host->irq = 0; wbsd_release_irq() 1493 tasklet_kill(&host->card_tasklet); wbsd_release_irq() 1494 tasklet_kill(&host->fifo_tasklet); wbsd_release_irq() 1495 tasklet_kill(&host->crc_tasklet); wbsd_release_irq() 1496 tasklet_kill(&host->timeout_tasklet); wbsd_release_irq() 1497 tasklet_kill(&host->finish_tasklet); wbsd_release_irq() 1501 * Allocate all resources for the host. 1504 static int wbsd_request_resources(struct wbsd_host *host, wbsd_request_resources() argument 1512 ret = wbsd_request_region(host, base); wbsd_request_resources() 1519 ret = wbsd_request_irq(host, irq); wbsd_request_resources() 1526 wbsd_request_dma(host, dma); wbsd_request_resources() 1532 * Release all resources for the host. 1535 static void wbsd_release_resources(struct wbsd_host *host) wbsd_release_resources() argument 1537 wbsd_release_dma(host); wbsd_release_resources() 1538 wbsd_release_irq(host); wbsd_release_resources() 1539 wbsd_release_regions(host); wbsd_release_resources() 1546 static void wbsd_chip_config(struct wbsd_host *host) wbsd_chip_config() argument 1548 wbsd_unlock_config(host); wbsd_chip_config() 1553 wbsd_write_config(host, WBSD_CONF_SWRST, 1); wbsd_chip_config() 1554 wbsd_write_config(host, WBSD_CONF_SWRST, 0); wbsd_chip_config() 1559 wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD); wbsd_chip_config() 1564 wbsd_write_config(host, WBSD_CONF_PINS, WBSD_PINS_DETECT_GP11); wbsd_chip_config() 1569 wbsd_write_config(host, WBSD_CONF_PORT_HI, host->base >> 8); wbsd_chip_config() 1570 wbsd_write_config(host, WBSD_CONF_PORT_LO, host->base & 0xff); wbsd_chip_config() 1572 wbsd_write_config(host, WBSD_CONF_IRQ, host->irq); wbsd_chip_config() 1574 if (host->dma >= 0) wbsd_chip_config() 1575 wbsd_write_config(host, WBSD_CONF_DRQ, host->dma); wbsd_chip_config() 1580 wbsd_write_config(host, WBSD_CONF_ENABLE, 1); wbsd_chip_config() 1581 wbsd_write_config(host, WBSD_CONF_POWER, 0x20); wbsd_chip_config() 1583 wbsd_lock_config(host); wbsd_chip_config() 1590 static int wbsd_chip_validate(struct wbsd_host *host) wbsd_chip_validate() argument 1594 wbsd_unlock_config(host); wbsd_chip_validate() 1599 wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD); wbsd_chip_validate() 1604 base = wbsd_read_config(host, WBSD_CONF_PORT_HI) << 8; wbsd_chip_validate() 1605 base |= wbsd_read_config(host, WBSD_CONF_PORT_LO); wbsd_chip_validate() 1607 irq = wbsd_read_config(host, WBSD_CONF_IRQ); wbsd_chip_validate() 1609 dma = wbsd_read_config(host, WBSD_CONF_DRQ); wbsd_chip_validate() 1611 wbsd_lock_config(host); wbsd_chip_validate() 1616 if (base != host->base) wbsd_chip_validate() 1618 if (irq != host->irq) wbsd_chip_validate() 1620 if ((dma != host->dma) && (host->dma != -1)) wbsd_chip_validate() 1630 static void wbsd_chip_poweroff(struct wbsd_host *host) wbsd_chip_poweroff() argument 1632 wbsd_unlock_config(host); wbsd_chip_poweroff() 1634 wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD); wbsd_chip_poweroff() 1635 wbsd_write_config(host, WBSD_CONF_ENABLE, 0); wbsd_chip_poweroff() 1637 wbsd_lock_config(host); wbsd_chip_poweroff() 1649 struct wbsd_host *host = NULL; wbsd_init() local 1658 host = mmc_priv(mmc); wbsd_init() 1663 ret = wbsd_scan(host); wbsd_init() 1676 ret = wbsd_request_resources(host, base, irq, dma); wbsd_init() 1678 wbsd_release_resources(host); wbsd_init() 1687 if ((host->config != 0) && !wbsd_chip_validate(host)) { wbsd_init() 1689 wbsd_chip_config(host); wbsd_init() 1692 wbsd_chip_config(host); wbsd_init() 1699 if (host->config) { wbsd_init() 1700 wbsd_unlock_config(host); wbsd_init() 1701 wbsd_write_config(host, WBSD_CONF_PME, 0xA0); wbsd_init() 1702 wbsd_lock_config(host); wbsd_init() 1713 wbsd_init_device(host); wbsd_init() 1718 if (host->chip_id != 0) wbsd_init() 1719 printk(" id %x", (int)host->chip_id); wbsd_init() 1720 printk(" at 0x%x irq %d", (int)host->base, (int)host->irq); wbsd_init() 1721 if (host->dma >= 0) wbsd_init() 1722 printk(" dma %d", (int)host->dma); wbsd_init() 1735 struct wbsd_host *host; wbsd_shutdown() local 1740 host = mmc_priv(mmc); wbsd_shutdown() 1748 wbsd_chip_poweroff(host); wbsd_shutdown() 1750 wbsd_release_resources(host); wbsd_shutdown() 1815 struct wbsd_host *host; wbsd_platform_suspend() local 1822 host = mmc_priv(mmc); wbsd_platform_suspend() 1824 wbsd_chip_poweroff(host); wbsd_platform_suspend() 1831 struct wbsd_host *host; wbsd_platform_resume() local 1838 host = mmc_priv(mmc); wbsd_platform_resume() 1840 wbsd_chip_config(host); wbsd_platform_resume() 1847 wbsd_init_device(host); wbsd_platform_resume() 1867 struct wbsd_host *host; wbsd_pnp_resume() local 1874 host = mmc_priv(mmc); wbsd_pnp_resume() 1879 if (host->config != 0) { wbsd_pnp_resume() 1880 if (!wbsd_chip_validate(host)) { wbsd_pnp_resume() 1882 wbsd_chip_config(host); wbsd_pnp_resume() 1891 wbsd_init_device(host); wbsd_pnp_resume()
|
H A D | bfin_sdh.c | 18 #include <linux/mmc/host.h> 86 static void sdh_stop_clock(struct sdh_host *host) sdh_stop_clock() argument 92 static void sdh_enable_stat_irq(struct sdh_host *host, unsigned int mask) sdh_enable_stat_irq() argument 96 spin_lock_irqsave(&host->lock, flags); sdh_enable_stat_irq() 97 host->imask |= mask; sdh_enable_stat_irq() 100 spin_unlock_irqrestore(&host->lock, flags); sdh_enable_stat_irq() 103 static void sdh_disable_stat_irq(struct sdh_host *host, unsigned int mask) sdh_disable_stat_irq() argument 107 spin_lock_irqsave(&host->lock, flags); sdh_disable_stat_irq() 108 host->imask &= ~mask; sdh_disable_stat_irq() 109 bfin_write_SDH_MASK0(host->imask); sdh_disable_stat_irq() 111 spin_unlock_irqrestore(&host->lock, flags); sdh_disable_stat_irq() 114 static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data) sdh_setup_data() argument 121 dev_dbg(mmc_dev(host->mmc), "%s enter flags: 0x%x\n", __func__, data->flags); sdh_setup_data() 122 host->data = data; sdh_setup_data() 144 /* the time of a host clock period in ns */ sdh_setup_data() 145 cycle_ns = 1000000000 / (host->sclk / (2 * (host->clk_div + 1))); sdh_setup_data() 152 host->dma_dir = DMA_FROM_DEVICE; sdh_setup_data() 155 host->dma_dir = DMA_TO_DEVICE; sdh_setup_data() 157 sdh_enable_stat_irq(host, (DAT_CRC_FAIL | DAT_TIME_OUT | DAT_END)); sdh_setup_data() 158 host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma_dir); sdh_setup_data() 169 for_each_sg(data->sg, sg, host->dma_len, i) { sdh_setup_data() 170 host->sg_cpu[i].start_addr = sg_dma_address(sg); sdh_setup_data() 171 host->sg_cpu[i].cfg = dma_cfg; sdh_setup_data() 172 host->sg_cpu[i].x_count = sg_dma_len(sg) / 4; sdh_setup_data() 173 host->sg_cpu[i].x_modify = 4; sdh_setup_data() 174 dev_dbg(mmc_dev(host->mmc), "%d: start_addr:0x%lx, " sdh_setup_data() 176 i, host->sg_cpu[i].start_addr, sdh_setup_data() 177 host->sg_cpu[i].cfg, host->sg_cpu[i].x_count, sdh_setup_data() 178 host->sg_cpu[i].x_modify); sdh_setup_data() 181 flush_dcache_range((unsigned int)host->sg_cpu, sdh_setup_data() 182 (unsigned int)host->sg_cpu + sdh_setup_data() 183 host->dma_len * sizeof(struct dma_desc_array)); sdh_setup_data() 185 host->sg_cpu[host->dma_len - 1].cfg &= ~(DMAFLOW | NDSIZE); sdh_setup_data() 186 host->sg_cpu[host->dma_len - 1].cfg |= DI_EN; sdh_setup_data() 188 set_dma_curr_desc_addr(host->dma_ch, (unsigned long *)host->sg_dma); sdh_setup_data() 189 set_dma_x_count(host->dma_ch, 0); sdh_setup_data() 190 set_dma_x_modify(host->dma_ch, 0); sdh_setup_data() 192 set_dma_config(host->dma_ch, dma_cfg); sdh_setup_data() 196 set_dma_start_addr(host->dma_ch, sg_dma_address(&data->sg[0])); sdh_setup_data() 197 set_dma_x_count(host->dma_ch, length / 4); sdh_setup_data() 198 set_dma_x_modify(host->dma_ch, 4); sdh_setup_data() 200 set_dma_config(host->dma_ch, dma_cfg); sdh_setup_data() 206 dev_dbg(mmc_dev(host->mmc), "%s exit\n", __func__); sdh_setup_data() 210 static void sdh_start_cmd(struct sdh_host *host, struct mmc_command *cmd) sdh_start_cmd() argument 215 dev_dbg(mmc_dev(host->mmc), "%s enter cmd: 0x%p\n", __func__, cmd); sdh_start_cmd() 216 WARN_ON(host->cmd != NULL); sdh_start_cmd() 217 host->cmd = cmd; sdh_start_cmd() 236 sdh_enable_stat_irq(host, stat_mask); sdh_start_cmd() 244 static void sdh_finish_request(struct sdh_host *host, struct mmc_request *mrq) sdh_finish_request() argument 246 dev_dbg(mmc_dev(host->mmc), "%s enter\n", __func__); sdh_finish_request() 247 host->mrq = NULL; sdh_finish_request() 248 host->cmd = NULL; sdh_finish_request() 249 host->data = NULL; sdh_finish_request() 250 mmc_request_done(host->mmc, mrq); sdh_finish_request() 253 static int sdh_cmd_done(struct sdh_host *host, unsigned int stat) sdh_cmd_done() argument 255 struct mmc_command *cmd = host->cmd; sdh_cmd_done() 258 dev_dbg(mmc_dev(host->mmc), "%s enter cmd: %p\n", __func__, cmd); sdh_cmd_done() 262 host->cmd = NULL; sdh_cmd_done() 277 sdh_disable_stat_irq(host, (CMD_SENT | CMD_RESP_END | CMD_TIME_OUT | CMD_CRC_FAIL)); sdh_cmd_done() 279 if (host->data && !cmd->error) { sdh_cmd_done() 280 if (host->data->flags & MMC_DATA_WRITE) { sdh_cmd_done() 281 ret = sdh_setup_data(host, host->data); sdh_cmd_done() 286 sdh_enable_stat_irq(host, DAT_END | RX_OVERRUN | TX_UNDERRUN | DAT_TIME_OUT); sdh_cmd_done() 288 sdh_finish_request(host, host->mrq); sdh_cmd_done() 293 static int sdh_data_done(struct sdh_host *host, unsigned int stat) sdh_data_done() argument 295 struct mmc_data *data = host->data; sdh_data_done() 297 dev_dbg(mmc_dev(host->mmc), "%s enter stat: 0x%x\n", __func__, stat); sdh_data_done() 301 disable_dma(host->dma_ch); sdh_data_done() 302 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, sdh_data_done() 303 host->dma_dir); sdh_data_done() 322 host->data = NULL; sdh_data_done() 323 if (host->mrq->stop) { sdh_data_done() 324 sdh_stop_clock(host); sdh_data_done() 325 sdh_start_cmd(host, host->mrq->stop); sdh_data_done() 327 sdh_finish_request(host, host->mrq); sdh_data_done() 335 struct sdh_host *host = mmc_priv(mmc); sdh_request() local 338 dev_dbg(mmc_dev(host->mmc), "%s enter, mrp:%p, cmd:%p\n", __func__, mrq, mrq->cmd); sdh_request() 339 WARN_ON(host->mrq != NULL); sdh_request() 341 spin_lock(&host->lock); sdh_request() 342 host->mrq = mrq; sdh_request() 343 host->data = mrq->data; sdh_request() 346 ret = sdh_setup_data(host, mrq->data); sdh_request() 351 sdh_start_cmd(host, mrq->cmd); sdh_request() 353 spin_unlock(&host->lock); sdh_request() 358 struct sdh_host *host; sdh_set_ios() local 364 host = mmc_priv(mmc); sdh_set_ios() 366 spin_lock(&host->lock); sdh_set_ios() 396 host->power_mode = ios->power_mode; sdh_set_ios() 434 host->clk_div = clk_div; sdh_set_ios() 437 sdh_stop_clock(host); sdh_set_ios() 448 spin_unlock(&host->lock); sdh_set_ios() 450 dev_dbg(mmc_dev(host->mmc), "SDH: clk_div = 0x%x actual clock:%ld expected clock:%d\n", sdh_set_ios() 451 host->clk_div, sdh_set_ios() 452 host->clk_div ? get_sclk() / (2 * (host->clk_div + 1)) : 0, sdh_set_ios() 463 struct sdh_host *host = devid; sdh_dma_irq() local 465 dev_dbg(mmc_dev(host->mmc), "%s enter, irq_stat: 0x%04lx\n", __func__, sdh_dma_irq() 466 get_dma_curr_irqstat(host->dma_ch)); sdh_dma_irq() 467 clear_dma_irqstat(host->dma_ch); sdh_dma_irq() 475 struct sdh_host *host = devid; sdh_stat_irq() local 479 dev_dbg(mmc_dev(host->mmc), "%s enter\n", __func__); sdh_stat_irq() 481 spin_lock(&host->lock); sdh_stat_irq() 485 mmc_detect_change(host->mmc, 0); sdh_stat_irq() 490 handled |= sdh_cmd_done(host, status); sdh_stat_irq() 498 handled |= sdh_data_done(host, status); sdh_stat_irq() 500 spin_unlock(&host->lock); sdh_stat_irq() 502 dev_dbg(mmc_dev(host->mmc), "%s exit\n\n", __func__); sdh_stat_irq() 527 struct sdh_host *host; sdh_probe() local 561 host = mmc_priv(mmc); sdh_probe() 562 host->mmc = mmc; sdh_probe() 563 host->sclk = get_sclk(); sdh_probe() 565 spin_lock_init(&host->lock); sdh_probe() 566 host->irq = drv_data->irq_int0; sdh_probe() 567 host->dma_ch = drv_data->dma_chan; sdh_probe() 569 ret = request_dma(host->dma_ch, DRIVER_NAME "DMA"); sdh_probe() 575 ret = set_dma_callback(host->dma_ch, sdh_dma_irq, host); sdh_probe() 581 host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &host->sg_dma, GFP_KERNEL); sdh_probe() 582 if (host->sg_cpu == NULL) { sdh_probe() 589 ret = request_irq(host->irq, sdh_stat_irq, 0, "SDH Status IRQ", host); sdh_probe() 607 free_irq(host->irq, host); sdh_probe() 610 dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); sdh_probe() 612 free_dma(host->dma_ch); sdh_probe() 624 struct sdh_host *host = mmc_priv(mmc); sdh_remove() local 628 sdh_stop_clock(host); sdh_remove() 629 free_irq(host->irq, host); sdh_remove() 630 free_dma(host->dma_ch); sdh_remove() 631 dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); sdh_remove()
|
H A D | tmio_mmc_pio.c | 2 * linux/drivers/mmc/host/tmio_mmc_pio.c 37 #include <linux/mmc/host.h> 54 void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i) tmio_mmc_enable_mmc_irqs() argument 56 host->sdcard_irq_mask &= ~(i & TMIO_MASK_IRQ); tmio_mmc_enable_mmc_irqs() 57 sd_ctrl_write32(host, CTL_IRQ_MASK, host->sdcard_irq_mask); tmio_mmc_enable_mmc_irqs() 60 void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i) tmio_mmc_disable_mmc_irqs() argument 62 host->sdcard_irq_mask |= (i & TMIO_MASK_IRQ); tmio_mmc_disable_mmc_irqs() 63 sd_ctrl_write32(host, CTL_IRQ_MASK, host->sdcard_irq_mask); tmio_mmc_disable_mmc_irqs() 66 static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i) tmio_mmc_ack_mmc_irqs() argument 68 sd_ctrl_write32(host, CTL_STATUS, ~i); tmio_mmc_ack_mmc_irqs() 71 static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data) tmio_mmc_init_sg() argument 73 host->sg_len = data->sg_len; tmio_mmc_init_sg() 74 host->sg_ptr = data->sg; tmio_mmc_init_sg() 75 host->sg_orig = data->sg; tmio_mmc_init_sg() 76 host->sg_off = 0; tmio_mmc_init_sg() 79 static int tmio_mmc_next_sg(struct tmio_mmc_host *host) tmio_mmc_next_sg() argument 81 host->sg_ptr = sg_next(host->sg_ptr); tmio_mmc_next_sg() 82 host->sg_off = 0; tmio_mmc_next_sg() 83 return --host->sg_len; tmio_mmc_next_sg() 131 struct tmio_mmc_host *host = mmc_priv(mmc); tmio_mmc_enable_sdio_irq() local 133 if (enable && !host->sdio_irq_enabled) { tmio_mmc_enable_sdio_irq() 136 host->sdio_irq_enabled = true; tmio_mmc_enable_sdio_irq() 138 host->sdio_irq_mask = TMIO_SDIO_MASK_ALL & tmio_mmc_enable_sdio_irq() 140 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001); tmio_mmc_enable_sdio_irq() 141 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask); tmio_mmc_enable_sdio_irq() 142 } else if (!enable && host->sdio_irq_enabled) { tmio_mmc_enable_sdio_irq() 143 host->sdio_irq_mask = TMIO_SDIO_MASK_ALL; tmio_mmc_enable_sdio_irq() 144 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask); tmio_mmc_enable_sdio_irq() 145 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000); tmio_mmc_enable_sdio_irq() 147 host->sdio_irq_enabled = false; tmio_mmc_enable_sdio_irq() 153 static void tmio_mmc_set_clock(struct tmio_mmc_host *host, tmio_mmc_set_clock() argument 159 for (clock = host->mmc->f_min, clk = 0x80000080; tmio_mmc_set_clock() 164 if ((host->pdata->flags & TMIO_MMC_CLK_ACTUAL) && tmio_mmc_set_clock() 169 if (host->set_clk_div) tmio_mmc_set_clock() 170 host->set_clk_div(host->pdev, (clk>>22) & 1); tmio_mmc_set_clock() 172 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff); tmio_mmc_set_clock() 176 static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) tmio_mmc_clk_stop() argument 179 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) { tmio_mmc_clk_stop() 180 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000); tmio_mmc_clk_stop() 184 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 & tmio_mmc_clk_stop() 185 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); tmio_mmc_clk_stop() 189 static void tmio_mmc_clk_start(struct tmio_mmc_host *host) tmio_mmc_clk_start() argument 191 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 | tmio_mmc_clk_start() 192 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); tmio_mmc_clk_start() 196 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) { tmio_mmc_clk_start() 197 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100); tmio_mmc_clk_start() 202 static void tmio_mmc_reset(struct tmio_mmc_host *host) tmio_mmc_reset() argument 205 sd_ctrl_write16(host, CTL_RESET_SD, 0x0000); tmio_mmc_reset() 207 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) tmio_mmc_reset() 208 sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000); tmio_mmc_reset() 210 sd_ctrl_write16(host, CTL_RESET_SD, 0x0001); tmio_mmc_reset() 211 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) tmio_mmc_reset() 212 sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001); tmio_mmc_reset() 218 struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host, tmio_mmc_reset_work() local 223 spin_lock_irqsave(&host->lock, flags); tmio_mmc_reset_work() 224 mrq = host->mrq; tmio_mmc_reset_work() 229 * us, so, have to check for IS_ERR(host->mrq) tmio_mmc_reset_work() 232 || time_is_after_jiffies(host->last_req_ts + tmio_mmc_reset_work() 234 spin_unlock_irqrestore(&host->lock, flags); tmio_mmc_reset_work() 238 dev_warn(&host->pdev->dev, tmio_mmc_reset_work() 242 if (host->data) tmio_mmc_reset_work() 243 host->data->error = -ETIMEDOUT; tmio_mmc_reset_work() 244 else if (host->cmd) tmio_mmc_reset_work() 245 host->cmd->error = -ETIMEDOUT; tmio_mmc_reset_work() 249 host->cmd = NULL; tmio_mmc_reset_work() 250 host->data = NULL; tmio_mmc_reset_work() 251 host->force_pio = false; tmio_mmc_reset_work() 253 spin_unlock_irqrestore(&host->lock, flags); tmio_mmc_reset_work() 255 tmio_mmc_reset(host); tmio_mmc_reset_work() 258 host->mrq = NULL; tmio_mmc_reset_work() 260 tmio_mmc_abort_dma(host); tmio_mmc_reset_work() 261 mmc_request_done(host->mmc, mrq); tmio_mmc_reset_work() 263 pm_runtime_mark_last_busy(mmc_dev(host->mmc)); tmio_mmc_reset_work() 264 pm_runtime_put_autosuspend(mmc_dev(host->mmc)); tmio_mmc_reset_work() 267 /* called with host->lock held, interrupts disabled */ tmio_mmc_finish_request() 268 static void tmio_mmc_finish_request(struct tmio_mmc_host *host) tmio_mmc_finish_request() argument 273 spin_lock_irqsave(&host->lock, flags); tmio_mmc_finish_request() 275 mrq = host->mrq; tmio_mmc_finish_request() 277 spin_unlock_irqrestore(&host->lock, flags); tmio_mmc_finish_request() 281 host->cmd = NULL; tmio_mmc_finish_request() 282 host->data = NULL; tmio_mmc_finish_request() 283 host->force_pio = false; tmio_mmc_finish_request() 285 cancel_delayed_work(&host->delayed_reset_work); tmio_mmc_finish_request() 287 host->mrq = NULL; tmio_mmc_finish_request() 288 spin_unlock_irqrestore(&host->lock, flags); tmio_mmc_finish_request() 291 tmio_mmc_abort_dma(host); tmio_mmc_finish_request() 293 mmc_request_done(host->mmc, mrq); tmio_mmc_finish_request() 295 pm_runtime_mark_last_busy(mmc_dev(host->mmc)); tmio_mmc_finish_request() 296 pm_runtime_put_autosuspend(mmc_dev(host->mmc)); tmio_mmc_finish_request() 301 struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host, tmio_mmc_done_work() local 303 tmio_mmc_finish_request(host); tmio_mmc_done_work() 320 static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd) tmio_mmc_start_command() argument 322 struct mmc_data *data = host->data; tmio_mmc_start_command() 328 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001); tmio_mmc_start_command() 343 host->cmd = cmd; tmio_mmc_start_command() 353 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100); tmio_mmc_start_command() 360 if ((host->pdata->flags & TMIO_MMC_HAVE_CMD12_CTRL) && tmio_mmc_start_command() 368 if (!host->native_hotplug) tmio_mmc_start_command() 370 tmio_mmc_enable_mmc_irqs(host, irq_mask); tmio_mmc_start_command() 373 sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg); tmio_mmc_start_command() 374 sd_ctrl_write16(host, CTL_SD_CMD, c); tmio_mmc_start_command() 379 static void tmio_mmc_transfer_data(struct tmio_mmc_host *host, tmio_mmc_transfer_data() argument 383 int is_read = host->data->flags & MMC_DATA_READ; tmio_mmc_transfer_data() 390 sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); tmio_mmc_transfer_data() 392 sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); tmio_mmc_transfer_data() 408 *buf8 = sd_ctrl_read16(host, CTL_SD_DATA_PORT) & 0xff; tmio_mmc_transfer_data() 410 sd_ctrl_write16(host, CTL_SD_DATA_PORT, *buf8); tmio_mmc_transfer_data() 418 static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) tmio_mmc_pio_irq() argument 420 struct mmc_data *data = host->data; tmio_mmc_pio_irq() 426 if ((host->chan_tx || host->chan_rx) && !host->force_pio) { tmio_mmc_pio_irq() 434 sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags); tmio_mmc_pio_irq() 435 buf = (unsigned short *)(sg_virt + host->sg_off); tmio_mmc_pio_irq() 437 count = host->sg_ptr->length - host->sg_off; tmio_mmc_pio_irq() 442 count, host->sg_off, data->flags); tmio_mmc_pio_irq() 445 tmio_mmc_transfer_data(host, buf, count); tmio_mmc_pio_irq() 447 host->sg_off += count; tmio_mmc_pio_irq() 449 tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt); tmio_mmc_pio_irq() 451 if (host->sg_off == host->sg_ptr->length) tmio_mmc_pio_irq() 452 tmio_mmc_next_sg(host); tmio_mmc_pio_irq() 457 static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host) tmio_mmc_check_bounce_buffer() argument 459 if (host->sg_ptr == &host->bounce_sg) { tmio_mmc_check_bounce_buffer() 461 void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags); tmio_mmc_check_bounce_buffer() 462 memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length); tmio_mmc_check_bounce_buffer() 463 tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr); tmio_mmc_check_bounce_buffer() 467 /* needs to be called with host->lock held */ tmio_mmc_do_data_irq() 468 void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) tmio_mmc_do_data_irq() argument 470 struct mmc_data *data = host->data; tmio_mmc_do_data_irq() 473 host->data = NULL; tmio_mmc_do_data_irq() 476 dev_warn(&host->pdev->dev, "Spurious data end IRQ\n"); tmio_mmc_do_data_irq() 499 if (host->chan_rx && !host->force_pio) tmio_mmc_do_data_irq() 500 tmio_mmc_check_bounce_buffer(host); tmio_mmc_do_data_irq() 501 dev_dbg(&host->pdev->dev, "Complete Rx request %p\n", tmio_mmc_do_data_irq() 502 host->mrq); tmio_mmc_do_data_irq() 504 dev_dbg(&host->pdev->dev, "Complete Tx request %p\n", tmio_mmc_do_data_irq() 505 host->mrq); tmio_mmc_do_data_irq() 510 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000); tmio_mmc_do_data_irq() 515 schedule_work(&host->done); tmio_mmc_do_data_irq() 518 static void tmio_mmc_data_irq(struct tmio_mmc_host *host) tmio_mmc_data_irq() argument 521 spin_lock(&host->lock); tmio_mmc_data_irq() 522 data = host->data; tmio_mmc_data_irq() 527 if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) { tmio_mmc_data_irq() 528 u32 status = sd_ctrl_read32(host, CTL_STATUS); tmio_mmc_data_irq() 539 if (host->pdata->flags & TMIO_MMC_HAS_IDLE_WAIT) { tmio_mmc_data_irq() 548 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); tmio_mmc_data_irq() 549 tasklet_schedule(&host->dma_complete); tmio_mmc_data_irq() 551 } else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) { tmio_mmc_data_irq() 552 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); tmio_mmc_data_irq() 553 tasklet_schedule(&host->dma_complete); tmio_mmc_data_irq() 555 tmio_mmc_do_data_irq(host); tmio_mmc_data_irq() 556 tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP); tmio_mmc_data_irq() 559 spin_unlock(&host->lock); tmio_mmc_data_irq() 562 static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, tmio_mmc_cmd_irq() argument 565 struct mmc_command *cmd = host->cmd; tmio_mmc_cmd_irq() 568 spin_lock(&host->lock); tmio_mmc_cmd_irq() 570 if (!host->cmd) { tmio_mmc_cmd_irq() 575 host->cmd = NULL; tmio_mmc_cmd_irq() 583 cmd->resp[i] = sd_ctrl_read32(host, addr); tmio_mmc_cmd_irq() 603 if (host->data && !cmd->error) { tmio_mmc_cmd_irq() 604 if (host->data->flags & MMC_DATA_READ) { tmio_mmc_cmd_irq() 605 if (host->force_pio || !host->chan_rx) tmio_mmc_cmd_irq() 606 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP); tmio_mmc_cmd_irq() 608 tasklet_schedule(&host->dma_issue); tmio_mmc_cmd_irq() 610 if (host->force_pio || !host->chan_tx) tmio_mmc_cmd_irq() 611 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP); tmio_mmc_cmd_irq() 613 tasklet_schedule(&host->dma_issue); tmio_mmc_cmd_irq() 616 schedule_work(&host->done); tmio_mmc_cmd_irq() 620 spin_unlock(&host->lock); tmio_mmc_cmd_irq() 623 static void tmio_mmc_card_irq_status(struct tmio_mmc_host *host, tmio_mmc_card_irq_status() argument 626 *status = sd_ctrl_read32(host, CTL_STATUS); tmio_mmc_card_irq_status() 627 *ireg = *status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask; tmio_mmc_card_irq_status() 633 sd_ctrl_write32(host, CTL_STATUS, TMIO_MASK_IRQ); tmio_mmc_card_irq_status() 636 static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host *host, __tmio_mmc_card_detect_irq() argument 639 struct mmc_host *mmc = host->mmc; __tmio_mmc_card_detect_irq() 643 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT | __tmio_mmc_card_detect_irq() 648 mmc_detect_change(host->mmc, msecs_to_jiffies(100)); __tmio_mmc_card_detect_irq() 658 struct tmio_mmc_host *host = devid; tmio_mmc_card_detect_irq() local 660 tmio_mmc_card_irq_status(host, &ireg, &status); tmio_mmc_card_detect_irq() 661 __tmio_mmc_card_detect_irq(host, ireg, status); tmio_mmc_card_detect_irq() 667 static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host, __tmio_mmc_sdcard_irq() argument 672 tmio_mmc_ack_mmc_irqs(host, __tmio_mmc_sdcard_irq() 675 tmio_mmc_cmd_irq(host, status); __tmio_mmc_sdcard_irq() 681 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ); __tmio_mmc_sdcard_irq() 682 tmio_mmc_pio_irq(host); __tmio_mmc_sdcard_irq() 688 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND); __tmio_mmc_sdcard_irq() 689 tmio_mmc_data_irq(host); __tmio_mmc_sdcard_irq() 699 struct tmio_mmc_host *host = devid; tmio_mmc_sdcard_irq() local 701 tmio_mmc_card_irq_status(host, &ireg, &status); tmio_mmc_sdcard_irq() 702 __tmio_mmc_sdcard_irq(host, ireg, status); tmio_mmc_sdcard_irq() 710 struct tmio_mmc_host *host = devid; tmio_mmc_sdio_irq() local 711 struct mmc_host *mmc = host->mmc; tmio_mmc_sdio_irq() 712 struct tmio_mmc_data *pdata = host->pdata; tmio_mmc_sdio_irq() 719 status = sd_ctrl_read16(host, CTL_SDIO_STATUS); tmio_mmc_sdio_irq() 720 ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdcard_irq_mask; tmio_mmc_sdio_irq() 726 sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status); tmio_mmc_sdio_irq() 737 struct tmio_mmc_host *host = devid; tmio_mmc_irq() local 742 tmio_mmc_card_irq_status(host, &ireg, &status); tmio_mmc_irq() 743 if (__tmio_mmc_card_detect_irq(host, ireg, status)) tmio_mmc_irq() 745 if (__tmio_mmc_sdcard_irq(host, ireg, status)) tmio_mmc_irq() 754 static int tmio_mmc_start_data(struct tmio_mmc_host *host, tmio_mmc_start_data() argument 757 struct tmio_mmc_data *pdata = host->pdata; tmio_mmc_start_data() 763 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) { tmio_mmc_start_data() 768 mmc_hostname(host->mmc), data->blksz); tmio_mmc_start_data() 773 tmio_mmc_init_sg(host, data); tmio_mmc_start_data() 774 host->data = data; tmio_mmc_start_data() 777 sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); tmio_mmc_start_data() 778 sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); tmio_mmc_start_data() 780 tmio_mmc_start_dma(host, data); tmio_mmc_start_data() 788 struct tmio_mmc_host *host = mmc_priv(mmc); tmio_mmc_request() local 792 spin_lock_irqsave(&host->lock, flags); tmio_mmc_request() 794 if (host->mrq) { tmio_mmc_request() 796 if (IS_ERR(host->mrq)) { tmio_mmc_request() 797 spin_unlock_irqrestore(&host->lock, flags); tmio_mmc_request() 804 host->last_req_ts = jiffies; tmio_mmc_request() 806 host->mrq = mrq; tmio_mmc_request() 808 spin_unlock_irqrestore(&host->lock, flags); tmio_mmc_request() 813 ret = tmio_mmc_start_data(host, mrq->data); tmio_mmc_request() 818 ret = tmio_mmc_start_command(host, mrq->cmd); tmio_mmc_request() 820 schedule_delayed_work(&host->delayed_reset_work, tmio_mmc_request() 826 host->force_pio = false; tmio_mmc_request() 827 host->mrq = NULL; tmio_mmc_request() 835 static int tmio_mmc_clk_update(struct tmio_mmc_host *host) tmio_mmc_clk_update() argument 837 struct mmc_host *mmc = host->mmc; tmio_mmc_clk_update() 840 if (!host->clk_enable) tmio_mmc_clk_update() 843 ret = host->clk_enable(host->pdev, &mmc->f_max); tmio_mmc_clk_update() 850 static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd) tmio_mmc_power_on() argument 852 struct mmc_host *mmc = host->mmc; tmio_mmc_power_on() 857 if (host->set_pwr) tmio_mmc_power_on() 858 host->set_pwr(host->pdev, 1); tmio_mmc_power_on() 880 dev_dbg(&host->pdev->dev, "Regulators failed to power up: %d\n", tmio_mmc_power_on() 884 static void tmio_mmc_power_off(struct tmio_mmc_host *host) tmio_mmc_power_off() argument 886 struct mmc_host *mmc = host->mmc; tmio_mmc_power_off() 894 if (host->set_pwr) tmio_mmc_power_off() 895 host->set_pwr(host->pdev, 0); tmio_mmc_power_off() 898 static void tmio_mmc_set_bus_width(struct tmio_mmc_host *host, tmio_mmc_set_bus_width() argument 903 sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0); tmio_mmc_set_bus_width() 906 sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0); tmio_mmc_set_bus_width() 919 struct tmio_mmc_host *host = mmc_priv(mmc); tmio_mmc_set_ios() local 920 struct device *dev = &host->pdev->dev; tmio_mmc_set_ios() 925 mutex_lock(&host->ios_lock); tmio_mmc_set_ios() 927 spin_lock_irqsave(&host->lock, flags); tmio_mmc_set_ios() 928 if (host->mrq) { tmio_mmc_set_ios() 929 if (IS_ERR(host->mrq)) { tmio_mmc_set_ios() 934 host->mrq = ERR_PTR(-EINTR); tmio_mmc_set_ios() 939 host->mrq->cmd->opcode, host->last_req_ts, jiffies); tmio_mmc_set_ios() 941 spin_unlock_irqrestore(&host->lock, flags); tmio_mmc_set_ios() 943 mutex_unlock(&host->ios_lock); tmio_mmc_set_ios() 947 host->mrq = ERR_PTR(-EBUSY); tmio_mmc_set_ios() 949 spin_unlock_irqrestore(&host->lock, flags); tmio_mmc_set_ios() 953 tmio_mmc_power_off(host); tmio_mmc_set_ios() 954 tmio_mmc_clk_stop(host); tmio_mmc_set_ios() 957 tmio_mmc_set_clock(host, ios->clock); tmio_mmc_set_ios() 958 tmio_mmc_power_on(host, ios->vdd); tmio_mmc_set_ios() 959 tmio_mmc_clk_start(host); tmio_mmc_set_ios() 960 tmio_mmc_set_bus_width(host, ios->bus_width); tmio_mmc_set_ios() 963 tmio_mmc_set_clock(host, ios->clock); tmio_mmc_set_ios() 964 tmio_mmc_clk_start(host); tmio_mmc_set_ios() 965 tmio_mmc_set_bus_width(host, ios->bus_width); tmio_mmc_set_ios() 971 if (PTR_ERR(host->mrq) == -EINTR) tmio_mmc_set_ios() 972 dev_dbg(&host->pdev->dev, tmio_mmc_set_ios() 976 host->mrq = NULL; tmio_mmc_set_ios() 978 host->clk_cache = ios->clock; tmio_mmc_set_ios() 980 mutex_unlock(&host->ios_lock); tmio_mmc_set_ios() 988 struct tmio_mmc_host *host = mmc_priv(mmc); tmio_mmc_get_ro() local 989 struct tmio_mmc_data *pdata = host->pdata; tmio_mmc_get_ro() 996 (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)); tmio_mmc_get_ro() 1006 struct tmio_mmc_host *host = mmc_priv(card->host); tmio_multi_io_quirk() local 1008 if (host->multi_io_quirk) tmio_multi_io_quirk() 1009 return host->multi_io_quirk(card, direction, blk_size); tmio_multi_io_quirk() 1023 static int tmio_mmc_init_ocr(struct tmio_mmc_host *host) tmio_mmc_init_ocr() argument 1025 struct tmio_mmc_data *pdata = host->pdata; tmio_mmc_init_ocr() 1026 struct mmc_host *mmc = host->mmc; tmio_mmc_init_ocr() 1058 struct tmio_mmc_host *host; tmio_mmc_host_alloc() local 1065 host = mmc_priv(mmc); tmio_mmc_host_alloc() 1066 host->mmc = mmc; tmio_mmc_host_alloc() 1067 host->pdev = pdev; tmio_mmc_host_alloc() 1069 return host; tmio_mmc_host_alloc() 1073 void tmio_mmc_host_free(struct tmio_mmc_host *host) tmio_mmc_host_free() argument 1075 mmc_free_host(host->mmc); tmio_mmc_host_free() 1216 void tmio_mmc_host_remove(struct tmio_mmc_host *host) tmio_mmc_host_remove() argument 1218 struct platform_device *pdev = host->pdev; tmio_mmc_host_remove() 1219 struct mmc_host *mmc = host->mmc; tmio_mmc_host_remove() 1221 if (!host->native_hotplug) tmio_mmc_host_remove() 1227 cancel_work_sync(&host->done); tmio_mmc_host_remove() 1228 cancel_delayed_work_sync(&host->delayed_reset_work); tmio_mmc_host_remove() 1229 tmio_mmc_release_dma(host); tmio_mmc_host_remove() 1234 iounmap(host->ctl); tmio_mmc_host_remove() 1242 struct tmio_mmc_host *host = mmc_priv(mmc); tmio_mmc_host_runtime_suspend() local 1244 tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL); tmio_mmc_host_runtime_suspend() 1246 if (host->clk_cache) tmio_mmc_host_runtime_suspend() 1247 tmio_mmc_clk_stop(host); tmio_mmc_host_runtime_suspend() 1249 if (host->clk_disable) tmio_mmc_host_runtime_suspend() 1250 host->clk_disable(host->pdev); tmio_mmc_host_runtime_suspend() 1259 struct tmio_mmc_host *host = mmc_priv(mmc); tmio_mmc_host_runtime_resume() local 1261 tmio_mmc_reset(host); tmio_mmc_host_runtime_resume() 1262 tmio_mmc_clk_update(host); tmio_mmc_host_runtime_resume() 1264 if (host->clk_cache) { tmio_mmc_host_runtime_resume() 1265 tmio_mmc_set_clock(host, host->clk_cache); tmio_mmc_host_runtime_resume() 1266 tmio_mmc_clk_start(host); tmio_mmc_host_runtime_resume() 1269 tmio_mmc_enable_dma(host, true); tmio_mmc_host_runtime_resume()
|
H A D | sh_mmcif.c | 53 #include <linux/mmc/host.h> 260 static inline void sh_mmcif_bitset(struct sh_mmcif_host *host, sh_mmcif_bitset() argument 263 writel(val | readl(host->addr + reg), host->addr + reg); sh_mmcif_bitset() 266 static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host, sh_mmcif_bitclr() argument 269 writel(~val & readl(host->addr + reg), host->addr + reg); sh_mmcif_bitclr() 274 struct sh_mmcif_host *host = arg; mmcif_dma_complete() local 275 struct mmc_request *mrq = host->mrq; mmcif_dma_complete() 277 dev_dbg(&host->pd->dev, "Command completed\n"); mmcif_dma_complete() 280 dev_name(&host->pd->dev))) mmcif_dma_complete() 283 complete(&host->dma_complete); mmcif_dma_complete() 286 static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host) sh_mmcif_start_dma_rx() argument 288 struct mmc_data *data = host->mrq->data; sh_mmcif_start_dma_rx() 291 struct dma_chan *chan = host->chan_rx; sh_mmcif_start_dma_rx() 298 host->dma_active = true; sh_mmcif_start_dma_rx() 305 desc->callback_param = host; sh_mmcif_start_dma_rx() 307 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN); sh_mmcif_start_dma_rx() 310 dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n", sh_mmcif_start_dma_rx() 317 host->chan_rx = NULL; sh_mmcif_start_dma_rx() 318 host->dma_active = false; sh_mmcif_start_dma_rx() 321 chan = host->chan_tx; sh_mmcif_start_dma_rx() 323 host->chan_tx = NULL; sh_mmcif_start_dma_rx() 326 dev_warn(&host->pd->dev, sh_mmcif_start_dma_rx() 328 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN); sh_mmcif_start_dma_rx() 331 dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, sh_mmcif_start_dma_rx() 335 static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host) sh_mmcif_start_dma_tx() argument 337 struct mmc_data *data = host->mrq->data; sh_mmcif_start_dma_tx() 340 struct dma_chan *chan = host->chan_tx; sh_mmcif_start_dma_tx() 347 host->dma_active = true; sh_mmcif_start_dma_tx() 354 desc->callback_param = host; sh_mmcif_start_dma_tx() 356 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN); sh_mmcif_start_dma_tx() 359 dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n", sh_mmcif_start_dma_tx() 366 host->chan_tx = NULL; sh_mmcif_start_dma_tx() 367 host->dma_active = false; sh_mmcif_start_dma_tx() 370 chan = host->chan_rx; sh_mmcif_start_dma_tx() 372 host->chan_rx = NULL; sh_mmcif_start_dma_tx() 375 dev_warn(&host->pd->dev, sh_mmcif_start_dma_tx() 377 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN); sh_mmcif_start_dma_tx() 380 dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d\n", __func__, sh_mmcif_start_dma_tx() 385 sh_mmcif_request_dma_one(struct sh_mmcif_host *host, sh_mmcif_request_dma_one() argument 405 slave_data, &host->pd->dev, sh_mmcif_request_dma_one() 408 dev_dbg(&host->pd->dev, "%s: %s: got channel %p\n", __func__, sh_mmcif_request_dma_one() 414 res = platform_get_resource(host->pd, IORESOURCE_MEM, 0); sh_mmcif_request_dma_one() 435 static void sh_mmcif_request_dma(struct sh_mmcif_host *host, sh_mmcif_request_dma() argument 438 host->dma_active = false; sh_mmcif_request_dma() 443 } else if (!host->pd->dev.of_node) { sh_mmcif_request_dma() 448 host->chan_tx = sh_mmcif_request_dma_one(host, pdata, DMA_MEM_TO_DEV); sh_mmcif_request_dma() 449 if (!host->chan_tx) sh_mmcif_request_dma() 452 host->chan_rx = sh_mmcif_request_dma_one(host, pdata, DMA_DEV_TO_MEM); sh_mmcif_request_dma() 453 if (!host->chan_rx) { sh_mmcif_request_dma() 454 dma_release_channel(host->chan_tx); sh_mmcif_request_dma() 455 host->chan_tx = NULL; sh_mmcif_request_dma() 459 static void sh_mmcif_release_dma(struct sh_mmcif_host *host) sh_mmcif_release_dma() argument 461 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN); sh_mmcif_release_dma() 463 if (host->chan_tx) { sh_mmcif_release_dma() 464 struct dma_chan *chan = host->chan_tx; sh_mmcif_release_dma() 465 host->chan_tx = NULL; sh_mmcif_release_dma() 468 if (host->chan_rx) { sh_mmcif_release_dma() 469 struct dma_chan *chan = host->chan_rx; sh_mmcif_release_dma() 470 host->chan_rx = NULL; sh_mmcif_release_dma() 474 host->dma_active = false; sh_mmcif_release_dma() 477 static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk) sh_mmcif_clock_control() argument 479 struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; sh_mmcif_clock_control() 482 sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE); sh_mmcif_clock_control() 483 sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR); sh_mmcif_clock_control() 487 if (sup_pclk && clk == host->clk) sh_mmcif_clock_control() 488 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK); sh_mmcif_clock_control() 490 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR & sh_mmcif_clock_control() 491 ((fls(DIV_ROUND_UP(host->clk, sh_mmcif_clock_control() 494 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE); sh_mmcif_clock_control() 497 static void sh_mmcif_sync_reset(struct sh_mmcif_host *host) sh_mmcif_sync_reset() argument 501 tmp = 0x010f0000 & sh_mmcif_readl(host->addr, MMCIF_CE_CLK_CTRL); sh_mmcif_sync_reset() 503 sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON); sh_mmcif_sync_reset() 504 sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF); sh_mmcif_sync_reset() 505 if (host->ccs_enable) sh_mmcif_sync_reset() 507 if (host->clk_ctrl2_enable) sh_mmcif_sync_reset() 508 sh_mmcif_writel(host->addr, MMCIF_CE_CLK_CTRL2, 0x0F0F0000); sh_mmcif_sync_reset() 509 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp | sh_mmcif_sync_reset() 512 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP); sh_mmcif_sync_reset() 515 static int sh_mmcif_error_manage(struct sh_mmcif_host *host) sh_mmcif_error_manage() argument 520 host->sd_error = false; sh_mmcif_error_manage() 522 state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1); sh_mmcif_error_manage() 523 state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2); sh_mmcif_error_manage() 524 dev_dbg(&host->pd->dev, "ERR HOST_STS1 = %08x\n", state1); sh_mmcif_error_manage() 525 dev_dbg(&host->pd->dev, "ERR HOST_STS2 = %08x\n", state2); sh_mmcif_error_manage() 528 sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK); sh_mmcif_error_manage() 529 sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK); sh_mmcif_error_manage() 531 if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1) sh_mmcif_error_manage() 537 dev_err(&host->pd->dev, sh_mmcif_error_manage() 541 sh_mmcif_sync_reset(host); sh_mmcif_error_manage() 542 dev_dbg(&host->pd->dev, "Forced end of command sequence\n"); sh_mmcif_error_manage() 547 dev_err(&host->pd->dev, " CRC error: state %u, wait %u\n", sh_mmcif_error_manage() 548 host->state, host->wait_for); sh_mmcif_error_manage() 551 dev_err(&host->pd->dev, " Timeout: state %u, wait %u\n", sh_mmcif_error_manage() 552 host->state, host->wait_for); sh_mmcif_error_manage() 555 dev_dbg(&host->pd->dev, " End/Index error: state %u, wait %u\n", sh_mmcif_error_manage() 556 host->state, host->wait_for); sh_mmcif_error_manage() 562 static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p) sh_mmcif_next_block() argument 564 struct mmc_data *data = host->mrq->data; sh_mmcif_next_block() 566 host->sg_blkidx += host->blocksize; sh_mmcif_next_block() 568 /* data->sg->length must be a multiple of host->blocksize? */ sh_mmcif_next_block() 569 BUG_ON(host->sg_blkidx > data->sg->length); sh_mmcif_next_block() 571 if (host->sg_blkidx == data->sg->length) { sh_mmcif_next_block() 572 host->sg_blkidx = 0; sh_mmcif_next_block() 573 if (++host->sg_idx < data->sg_len) sh_mmcif_next_block() 574 host->pio_ptr = sg_virt(++data->sg); sh_mmcif_next_block() 576 host->pio_ptr = p; sh_mmcif_next_block() 579 return host->sg_idx != data->sg_len; sh_mmcif_next_block() 582 static void sh_mmcif_single_read(struct sh_mmcif_host *host, sh_mmcif_single_read() argument 585 host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & sh_mmcif_single_read() 588 host->wait_for = MMCIF_WAIT_FOR_READ; sh_mmcif_single_read() 591 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); sh_mmcif_single_read() 594 static bool sh_mmcif_read_block(struct sh_mmcif_host *host) sh_mmcif_read_block() argument 596 struct mmc_data *data = host->mrq->data; sh_mmcif_read_block() 600 if (host->sd_error) { sh_mmcif_read_block() 601 data->error = sh_mmcif_error_manage(host); sh_mmcif_read_block() 602 dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error); sh_mmcif_read_block() 606 for (i = 0; i < host->blocksize / 4; i++) sh_mmcif_read_block() 607 *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA); sh_mmcif_read_block() 610 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE); sh_mmcif_read_block() 611 host->wait_for = MMCIF_WAIT_FOR_READ_END; sh_mmcif_read_block() 616 static void sh_mmcif_multi_read(struct sh_mmcif_host *host, sh_mmcif_multi_read() argument 624 host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & sh_mmcif_multi_read() 627 host->wait_for = MMCIF_WAIT_FOR_MREAD; sh_mmcif_multi_read() 628 host->sg_idx = 0; sh_mmcif_multi_read() 629 host->sg_blkidx = 0; sh_mmcif_multi_read() 630 host->pio_ptr = sg_virt(data->sg); sh_mmcif_multi_read() 632 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); sh_mmcif_multi_read() 635 static bool sh_mmcif_mread_block(struct sh_mmcif_host *host) sh_mmcif_mread_block() argument 637 struct mmc_data *data = host->mrq->data; sh_mmcif_mread_block() 638 u32 *p = host->pio_ptr; sh_mmcif_mread_block() 641 if (host->sd_error) { sh_mmcif_mread_block() 642 data->error = sh_mmcif_error_manage(host); sh_mmcif_mread_block() 643 dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error); sh_mmcif_mread_block() 649 for (i = 0; i < host->blocksize / 4; i++) sh_mmcif_mread_block() 650 *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA); sh_mmcif_mread_block() 652 if (!sh_mmcif_next_block(host, p)) sh_mmcif_mread_block() 655 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); sh_mmcif_mread_block() 660 static void sh_mmcif_single_write(struct sh_mmcif_host *host, sh_mmcif_single_write() argument 663 host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & sh_mmcif_single_write() 666 host->wait_for = MMCIF_WAIT_FOR_WRITE; sh_mmcif_single_write() 669 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); sh_mmcif_single_write() 672 static bool sh_mmcif_write_block(struct sh_mmcif_host *host) sh_mmcif_write_block() argument 674 struct mmc_data *data = host->mrq->data; sh_mmcif_write_block() 678 if (host->sd_error) { sh_mmcif_write_block() 679 data->error = sh_mmcif_error_manage(host); sh_mmcif_write_block() 680 dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error); sh_mmcif_write_block() 684 for (i = 0; i < host->blocksize / 4; i++) sh_mmcif_write_block() 685 sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++); sh_mmcif_write_block() 688 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE); sh_mmcif_write_block() 689 host->wait_for = MMCIF_WAIT_FOR_WRITE_END; sh_mmcif_write_block() 694 static void sh_mmcif_multi_write(struct sh_mmcif_host *host, sh_mmcif_multi_write() argument 702 host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & sh_mmcif_multi_write() 705 host->wait_for = MMCIF_WAIT_FOR_MWRITE; sh_mmcif_multi_write() 706 host->sg_idx = 0; sh_mmcif_multi_write() 707 host->sg_blkidx = 0; sh_mmcif_multi_write() 708 host->pio_ptr = sg_virt(data->sg); sh_mmcif_multi_write() 710 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); sh_mmcif_multi_write() 713 static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host) sh_mmcif_mwrite_block() argument 715 struct mmc_data *data = host->mrq->data; sh_mmcif_mwrite_block() 716 u32 *p = host->pio_ptr; sh_mmcif_mwrite_block() 719 if (host->sd_error) { sh_mmcif_mwrite_block() 720 data->error = sh_mmcif_error_manage(host); sh_mmcif_mwrite_block() 721 dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error); sh_mmcif_mwrite_block() 727 for (i = 0; i < host->blocksize / 4; i++) sh_mmcif_mwrite_block() 728 sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++); sh_mmcif_mwrite_block() 730 if (!sh_mmcif_next_block(host, p)) sh_mmcif_mwrite_block() 733 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); sh_mmcif_mwrite_block() 738 static void sh_mmcif_get_response(struct sh_mmcif_host *host, sh_mmcif_get_response() argument 742 cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP3); sh_mmcif_get_response() 743 cmd->resp[1] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP2); sh_mmcif_get_response() 744 cmd->resp[2] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP1); sh_mmcif_get_response() 745 cmd->resp[3] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0); sh_mmcif_get_response() 747 cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0); sh_mmcif_get_response() 750 static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host, sh_mmcif_get_cmd12response() argument 753 cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP_CMD12); sh_mmcif_get_cmd12response() 756 static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host, sh_mmcif_set_cmd() argument 778 dev_err(&host->pd->dev, "Unsupported response type.\n"); sh_mmcif_set_cmd() 795 switch (host->bus_width) { sh_mmcif_set_cmd() 806 dev_err(&host->pd->dev, "Unsupported bus width.\n"); sh_mmcif_set_cmd() 809 switch (host->timing) { sh_mmcif_set_cmd() 812 * MMC core will only set this timing, if the host sh_mmcif_set_cmd() 828 sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET, sh_mmcif_set_cmd() 846 static int sh_mmcif_data_trans(struct sh_mmcif_host *host, sh_mmcif_data_trans() argument 851 sh_mmcif_multi_read(host, mrq); sh_mmcif_data_trans() 854 sh_mmcif_multi_write(host, mrq); sh_mmcif_data_trans() 857 sh_mmcif_single_write(host, mrq); sh_mmcif_data_trans() 861 sh_mmcif_single_read(host, mrq); sh_mmcif_data_trans() 864 dev_err(&host->pd->dev, "Unsupported CMD%d\n", opc); sh_mmcif_data_trans() 869 static void sh_mmcif_start_cmd(struct sh_mmcif_host *host, sh_mmcif_start_cmd() argument 892 if (host->ccs_enable) sh_mmcif_start_cmd() 896 sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0); sh_mmcif_start_cmd() 897 sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, sh_mmcif_start_cmd() 900 opc = sh_mmcif_set_cmd(host, mrq); sh_mmcif_start_cmd() 902 if (host->ccs_enable) sh_mmcif_start_cmd() 903 sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0); sh_mmcif_start_cmd() 905 sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0 | INT_CCS); sh_mmcif_start_cmd() 906 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask); sh_mmcif_start_cmd() 908 sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg); sh_mmcif_start_cmd() 910 spin_lock_irqsave(&host->lock, flags); sh_mmcif_start_cmd() 911 sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc); sh_mmcif_start_cmd() 913 host->wait_for = MMCIF_WAIT_FOR_CMD; sh_mmcif_start_cmd() 914 schedule_delayed_work(&host->timeout_work, host->timeout); sh_mmcif_start_cmd() 915 spin_unlock_irqrestore(&host->lock, flags); sh_mmcif_start_cmd() 918 static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host, sh_mmcif_stop_cmd() argument 923 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE); sh_mmcif_stop_cmd() 926 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE); sh_mmcif_stop_cmd() 929 dev_err(&host->pd->dev, "unsupported stop cmd\n"); sh_mmcif_stop_cmd() 930 mrq->stop->error = sh_mmcif_error_manage(host); sh_mmcif_stop_cmd() 934 host->wait_for = MMCIF_WAIT_FOR_STOP; sh_mmcif_stop_cmd() 939 struct sh_mmcif_host *host = mmc_priv(mmc); sh_mmcif_request() local 942 spin_lock_irqsave(&host->lock, flags); sh_mmcif_request() 943 if (host->state != STATE_IDLE) { sh_mmcif_request() 944 dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state); sh_mmcif_request() 945 spin_unlock_irqrestore(&host->lock, flags); sh_mmcif_request() 951 host->state = STATE_REQUEST; sh_mmcif_request() 952 spin_unlock_irqrestore(&host->lock, flags); sh_mmcif_request() 962 host->state = STATE_IDLE; sh_mmcif_request() 970 host->mrq = mrq; sh_mmcif_request() 972 sh_mmcif_start_cmd(host, mrq); sh_mmcif_request() 975 static int sh_mmcif_clk_update(struct sh_mmcif_host *host) sh_mmcif_clk_update() argument 977 int ret = clk_prepare_enable(host->hclk); sh_mmcif_clk_update() 980 host->clk = clk_get_rate(host->hclk); sh_mmcif_clk_update() 981 host->mmc->f_max = host->clk / 2; sh_mmcif_clk_update() 982 host->mmc->f_min = host->clk / 512; sh_mmcif_clk_update() 988 static void sh_mmcif_set_power(struct sh_mmcif_host *host, struct mmc_ios *ios) sh_mmcif_set_power() argument 990 struct mmc_host *mmc = host->mmc; sh_mmcif_set_power() 1000 struct sh_mmcif_host *host = mmc_priv(mmc); sh_mmcif_set_ios() local 1003 spin_lock_irqsave(&host->lock, flags); sh_mmcif_set_ios() 1004 if (host->state != STATE_IDLE) { sh_mmcif_set_ios() 1005 dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state); sh_mmcif_set_ios() 1006 spin_unlock_irqrestore(&host->lock, flags); sh_mmcif_set_ios() 1010 host->state = STATE_IOS; sh_mmcif_set_ios() 1011 spin_unlock_irqrestore(&host->lock, flags); sh_mmcif_set_ios() 1014 if (!host->card_present) { sh_mmcif_set_ios() 1016 sh_mmcif_request_dma(host, host->pd->dev.platform_data); sh_mmcif_set_ios() 1017 host->card_present = true; sh_mmcif_set_ios() 1019 sh_mmcif_set_power(host, ios); sh_mmcif_set_ios() 1022 sh_mmcif_clock_control(host, 0); sh_mmcif_set_ios() 1024 if (host->card_present) { sh_mmcif_set_ios() 1025 sh_mmcif_release_dma(host); sh_mmcif_set_ios() 1026 host->card_present = false; sh_mmcif_set_ios() 1029 if (host->power) { sh_mmcif_set_ios() 1030 pm_runtime_put_sync(&host->pd->dev); sh_mmcif_set_ios() 1031 clk_disable_unprepare(host->hclk); sh_mmcif_set_ios() 1032 host->power = false; sh_mmcif_set_ios() 1034 sh_mmcif_set_power(host, ios); sh_mmcif_set_ios() 1036 host->state = STATE_IDLE; sh_mmcif_set_ios() 1041 if (!host->power) { sh_mmcif_set_ios() 1042 sh_mmcif_clk_update(host); sh_mmcif_set_ios() 1043 pm_runtime_get_sync(&host->pd->dev); sh_mmcif_set_ios() 1044 host->power = true; sh_mmcif_set_ios() 1045 sh_mmcif_sync_reset(host); sh_mmcif_set_ios() 1047 sh_mmcif_clock_control(host, ios->clock); sh_mmcif_set_ios() 1050 host->timing = ios->timing; sh_mmcif_set_ios() 1051 host->bus_width = ios->bus_width; sh_mmcif_set_ios() 1052 host->state = STATE_IDLE; sh_mmcif_set_ios() 1057 struct sh_mmcif_host *host = mmc_priv(mmc); sh_mmcif_get_cd() local 1058 struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; sh_mmcif_get_cd() 1067 return p->get_cd(host->pd); sh_mmcif_get_cd() 1076 static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host) sh_mmcif_end_cmd() argument 1078 struct mmc_command *cmd = host->mrq->cmd; sh_mmcif_end_cmd() 1079 struct mmc_data *data = host->mrq->data; sh_mmcif_end_cmd() 1082 if (host->sd_error) { sh_mmcif_end_cmd() 1090 cmd->error = sh_mmcif_error_manage(host); sh_mmcif_end_cmd() 1093 dev_dbg(&host->pd->dev, "CMD%d error %d\n", sh_mmcif_end_cmd() 1095 host->sd_error = false; sh_mmcif_end_cmd() 1103 sh_mmcif_get_response(host, cmd); sh_mmcif_end_cmd() 1112 init_completion(&host->dma_complete); sh_mmcif_end_cmd() 1115 if (host->chan_rx) sh_mmcif_end_cmd() 1116 sh_mmcif_start_dma_rx(host); sh_mmcif_end_cmd() 1118 if (host->chan_tx) sh_mmcif_end_cmd() 1119 sh_mmcif_start_dma_tx(host); sh_mmcif_end_cmd() 1122 if (!host->dma_active) { sh_mmcif_end_cmd() 1123 data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode); sh_mmcif_end_cmd() 1128 time = wait_for_completion_interruptible_timeout(&host->dma_complete, sh_mmcif_end_cmd() 1129 host->timeout); sh_mmcif_end_cmd() 1132 dma_unmap_sg(host->chan_rx->device->dev, sh_mmcif_end_cmd() 1136 dma_unmap_sg(host->chan_tx->device->dev, sh_mmcif_end_cmd() 1140 if (host->sd_error) { sh_mmcif_end_cmd() 1141 dev_err(host->mmc->parent, sh_mmcif_end_cmd() 1144 data->error = sh_mmcif_error_manage(host); sh_mmcif_end_cmd() 1146 dev_err(host->mmc->parent, "DMA timeout!\n"); sh_mmcif_end_cmd() 1149 dev_err(host->mmc->parent, sh_mmcif_end_cmd() 1153 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, sh_mmcif_end_cmd() 1155 host->dma_active = false; sh_mmcif_end_cmd() 1161 dmaengine_terminate_all(host->chan_rx); sh_mmcif_end_cmd() 1163 dmaengine_terminate_all(host->chan_tx); sh_mmcif_end_cmd() 1171 struct sh_mmcif_host *host = dev_id; sh_mmcif_irqt() local 1177 spin_lock_irqsave(&host->lock, flags); sh_mmcif_irqt() 1178 wait_work = host->wait_for; sh_mmcif_irqt() 1179 spin_unlock_irqrestore(&host->lock, flags); sh_mmcif_irqt() 1181 cancel_delayed_work_sync(&host->timeout_work); sh_mmcif_irqt() 1183 mutex_lock(&host->thread_lock); sh_mmcif_irqt() 1185 mrq = host->mrq; sh_mmcif_irqt() 1187 dev_dbg(&host->pd->dev, "IRQ thread state %u, wait %u: NULL mrq!\n", sh_mmcif_irqt() 1188 host->state, host->wait_for); sh_mmcif_irqt() 1189 mutex_unlock(&host->thread_lock); sh_mmcif_irqt() 1200 mutex_unlock(&host->thread_lock); sh_mmcif_irqt() 1204 wait = sh_mmcif_end_cmd(host); sh_mmcif_irqt() 1208 wait = sh_mmcif_mread_block(host); sh_mmcif_irqt() 1212 wait = sh_mmcif_read_block(host); sh_mmcif_irqt() 1216 wait = sh_mmcif_mwrite_block(host); sh_mmcif_irqt() 1220 wait = sh_mmcif_write_block(host); sh_mmcif_irqt() 1223 if (host->sd_error) { sh_mmcif_irqt() 1224 mrq->stop->error = sh_mmcif_error_manage(host); sh_mmcif_irqt() 1225 dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, mrq->stop->error); sh_mmcif_irqt() 1228 sh_mmcif_get_cmd12response(host, mrq->stop); sh_mmcif_irqt() 1233 if (host->sd_error) { sh_mmcif_irqt() 1234 mrq->data->error = sh_mmcif_error_manage(host); sh_mmcif_irqt() 1235 dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, mrq->data->error); sh_mmcif_irqt() 1243 schedule_delayed_work(&host->timeout_work, host->timeout); sh_mmcif_irqt() 1245 mutex_unlock(&host->thread_lock); sh_mmcif_irqt() 1249 if (host->wait_for != MMCIF_WAIT_FOR_STOP) { sh_mmcif_irqt() 1256 sh_mmcif_stop_cmd(host, mrq); sh_mmcif_irqt() 1258 schedule_delayed_work(&host->timeout_work, host->timeout); sh_mmcif_irqt() 1259 mutex_unlock(&host->thread_lock); sh_mmcif_irqt() 1265 host->wait_for = MMCIF_WAIT_FOR_REQUEST; sh_mmcif_irqt() 1266 host->state = STATE_IDLE; sh_mmcif_irqt() 1267 host->mrq = NULL; sh_mmcif_irqt() 1268 mmc_request_done(host->mmc, mrq); sh_mmcif_irqt() 1270 mutex_unlock(&host->thread_lock); sh_mmcif_irqt() 1277 struct sh_mmcif_host *host = dev_id; sh_mmcif_intr() local 1280 state = sh_mmcif_readl(host->addr, MMCIF_CE_INT); sh_mmcif_intr() 1281 mask = sh_mmcif_readl(host->addr, MMCIF_CE_INT_MASK); sh_mmcif_intr() 1282 if (host->ccs_enable) sh_mmcif_intr() 1283 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~(state & mask)); sh_mmcif_intr() 1285 sh_mmcif_writel(host->addr, MMCIF_CE_INT, INT_CCS | ~(state & mask)); sh_mmcif_intr() 1286 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state & MASK_CLEAN); sh_mmcif_intr() 1289 dev_dbg(&host->pd->dev, "IRQ state = 0x%08x incompletely cleared\n", sh_mmcif_intr() 1293 host->sd_error = true; sh_mmcif_intr() 1294 dev_dbg(&host->pd->dev, "int err state = 0x%08x\n", state); sh_mmcif_intr() 1297 if (!host->mrq) sh_mmcif_intr() 1298 dev_dbg(&host->pd->dev, "NULL IRQ state = 0x%08x\n", state); sh_mmcif_intr() 1299 if (!host->dma_active) sh_mmcif_intr() 1301 else if (host->sd_error) sh_mmcif_intr() 1302 mmcif_dma_complete(host); sh_mmcif_intr() 1304 dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state); sh_mmcif_intr() 1313 struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work); mmcif_timeout_work() local 1314 struct mmc_request *mrq = host->mrq; mmcif_timeout_work() 1317 if (host->dying) mmcif_timeout_work() 1321 spin_lock_irqsave(&host->lock, flags); mmcif_timeout_work() 1322 if (host->state == STATE_IDLE) { mmcif_timeout_work() 1323 spin_unlock_irqrestore(&host->lock, flags); mmcif_timeout_work() 1327 dev_err(&host->pd->dev, "Timeout waiting for %u on CMD%u\n", mmcif_timeout_work() 1328 host->wait_for, mrq->cmd->opcode); mmcif_timeout_work() 1330 host->state = STATE_TIMEOUT; mmcif_timeout_work() 1331 spin_unlock_irqrestore(&host->lock, flags); mmcif_timeout_work() 1337 switch (host->wait_for) { mmcif_timeout_work() 1339 mrq->cmd->error = sh_mmcif_error_manage(host); mmcif_timeout_work() 1342 mrq->stop->error = sh_mmcif_error_manage(host); mmcif_timeout_work() 1350 mrq->data->error = sh_mmcif_error_manage(host); mmcif_timeout_work() 1356 host->state = STATE_IDLE; mmcif_timeout_work() 1357 host->wait_for = MMCIF_WAIT_FOR_REQUEST; mmcif_timeout_work() 1358 host->mrq = NULL; mmcif_timeout_work() 1359 mmc_request_done(host->mmc, mrq); mmcif_timeout_work() 1362 static void sh_mmcif_init_ocr(struct sh_mmcif_host *host) sh_mmcif_init_ocr() argument 1364 struct sh_mmcif_plat_data *pd = host->pd->dev.platform_data; sh_mmcif_init_ocr() 1365 struct mmc_host *mmc = host->mmc; sh_mmcif_init_ocr() 1382 struct sh_mmcif_host *host; sh_mmcif_probe() local 1408 host = mmc_priv(mmc); sh_mmcif_probe() 1409 host->mmc = mmc; sh_mmcif_probe() 1410 host->addr = reg; sh_mmcif_probe() 1411 host->timeout = msecs_to_jiffies(10000); sh_mmcif_probe() 1412 host->ccs_enable = !pd || !pd->ccs_unsupported; sh_mmcif_probe() 1413 host->clk_ctrl2_enable = pd && pd->clk_ctrl2_present; sh_mmcif_probe() 1415 host->pd = pdev; sh_mmcif_probe() 1417 spin_lock_init(&host->lock); sh_mmcif_probe() 1420 sh_mmcif_init_ocr(host); sh_mmcif_probe() 1431 platform_set_drvdata(pdev, host); sh_mmcif_probe() 1434 host->power = false; sh_mmcif_probe() 1436 host->hclk = devm_clk_get(&pdev->dev, NULL); sh_mmcif_probe() 1437 if (IS_ERR(host->hclk)) { sh_mmcif_probe() 1438 ret = PTR_ERR(host->hclk); sh_mmcif_probe() 1442 ret = sh_mmcif_clk_update(host); sh_mmcif_probe() 1450 INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work); sh_mmcif_probe() 1452 sh_mmcif_sync_reset(host); sh_mmcif_probe() 1453 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); sh_mmcif_probe() 1457 sh_mmcif_irqt, 0, name, host); sh_mmcif_probe() 1465 0, "sh_mmc:int", host); sh_mmcif_probe() 1478 mutex_init(&host->thread_lock); sh_mmcif_probe() 1487 sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0xffff, sh_mmcif_probe() 1488 clk_get_rate(host->hclk) / 1000000UL); sh_mmcif_probe() 1490 clk_disable_unprepare(host->hclk); sh_mmcif_probe() 1494 clk_disable_unprepare(host->hclk); sh_mmcif_probe() 1504 struct sh_mmcif_host *host = platform_get_drvdata(pdev); sh_mmcif_remove() local 1506 host->dying = true; sh_mmcif_remove() 1507 clk_prepare_enable(host->hclk); sh_mmcif_remove() 1512 mmc_remove_host(host->mmc); sh_mmcif_remove() 1513 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); sh_mmcif_remove() 1520 cancel_delayed_work_sync(&host->timeout_work); sh_mmcif_remove() 1522 clk_disable_unprepare(host->hclk); sh_mmcif_remove() 1523 mmc_free_host(host->mmc); sh_mmcif_remove() 1533 struct sh_mmcif_host *host = dev_get_drvdata(dev); sh_mmcif_suspend() local 1535 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); sh_mmcif_suspend()
|
H A D | mmci.c | 2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver 25 #include <linux/mmc/host.h> 225 struct mmci_host *host = mmc_priv(mmc); mmci_card_busy() local 231 spin_lock_irqsave(&host->lock, flags); mmci_card_busy() 232 if (readl(host->base + MMCISTATUS) & MCI_ST_CARDBUSY) mmci_card_busy() 234 spin_unlock_irqrestore(&host->lock, flags); mmci_card_busy() 245 static int mmci_validate_data(struct mmci_host *host, mmci_validate_data() argument 252 dev_err(mmc_dev(host->mmc), mmci_validate_data() 260 static void mmci_reg_delay(struct mmci_host *host) mmci_reg_delay() argument 269 if (host->cclk < 25000000) mmci_reg_delay() 276 * This must be called with host->lock held 278 static void mmci_write_clkreg(struct mmci_host *host, u32 clk) mmci_write_clkreg() argument 280 if (host->clk_reg != clk) { mmci_write_clkreg() 281 host->clk_reg = clk; mmci_write_clkreg() 282 writel(clk, host->base + MMCICLOCK); mmci_write_clkreg() 287 * This must be called with host->lock held 289 static void mmci_write_pwrreg(struct mmci_host *host, u32 pwr) mmci_write_pwrreg() argument 291 if (host->pwr_reg != pwr) { mmci_write_pwrreg() 292 host->pwr_reg = pwr; mmci_write_pwrreg() 293 writel(pwr, host->base + MMCIPOWER); mmci_write_pwrreg() 298 * This must be called with host->lock held 300 static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl) mmci_write_datactrlreg() argument 303 datactrl |= host->datactrl_reg & MCI_ST_DPSM_BUSYMODE; mmci_write_datactrlreg() 305 if (host->datactrl_reg != datactrl) { mmci_write_datactrlreg() 306 host->datactrl_reg = datactrl; mmci_write_datactrlreg() 307 writel(datactrl, host->base + MMCIDATACTRL); mmci_write_datactrlreg() 312 * This must be called with host->lock held 314 static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) mmci_set_clkreg() argument 316 struct variant_data *variant = host->variant; mmci_set_clkreg() 320 host->cclk = 0; mmci_set_clkreg() 324 host->cclk = host->mclk; mmci_set_clkreg() 325 } else if (desired >= host->mclk) { mmci_set_clkreg() 329 host->cclk = host->mclk; mmci_set_clkreg() 337 clk = DIV_ROUND_UP(host->mclk, desired) - 2; mmci_set_clkreg() 340 host->cclk = host->mclk / (clk + 2); mmci_set_clkreg() 346 clk = host->mclk / (2 * desired) - 1; mmci_set_clkreg() 349 host->cclk = host->mclk / (2 * (clk + 1)); mmci_set_clkreg() 359 host->mmc->actual_clock = host->cclk; mmci_set_clkreg() 361 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) mmci_set_clkreg() 363 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) mmci_set_clkreg() 366 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 || mmci_set_clkreg() 367 host->mmc->ios.timing == MMC_TIMING_MMC_DDR52) mmci_set_clkreg() 370 mmci_write_clkreg(host, clk); mmci_set_clkreg() 374 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) mmci_request_end() argument 376 writel(0, host->base + MMCICOMMAND); mmci_request_end() 378 BUG_ON(host->data); mmci_request_end() 380 host->mrq = NULL; mmci_request_end() 381 host->cmd = NULL; mmci_request_end() 383 mmc_request_done(host->mmc, mrq); mmci_request_end() 385 pm_runtime_mark_last_busy(mmc_dev(host->mmc)); mmci_request_end() 386 pm_runtime_put_autosuspend(mmc_dev(host->mmc)); mmci_request_end() 389 static void mmci_set_mask1(struct mmci_host *host, unsigned int mask) mmci_set_mask1() argument 391 void __iomem *base = host->base; mmci_set_mask1() 393 if (host->singleirq) { mmci_set_mask1() 405 static void mmci_stop_data(struct mmci_host *host) mmci_stop_data() argument 407 mmci_write_datactrlreg(host, 0); mmci_stop_data() 408 mmci_set_mask1(host, 0); mmci_stop_data() 409 host->data = NULL; mmci_stop_data() 412 static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) mmci_init_sg() argument 421 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); mmci_init_sg() 430 static void mmci_dma_setup(struct mmci_host *host) mmci_dma_setup() argument 433 struct variant_data *variant = host->variant; mmci_dma_setup() 435 host->dma_rx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "rx"); mmci_dma_setup() 436 host->dma_tx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "tx"); mmci_dma_setup() 439 host->next_data.cookie = 1; mmci_dma_setup() 446 if (host->dma_rx_channel && !host->dma_tx_channel) mmci_dma_setup() 447 host->dma_tx_channel = host->dma_rx_channel; mmci_dma_setup() 449 if (host->dma_rx_channel) mmci_dma_setup() 450 rxname = dma_chan_name(host->dma_rx_channel); mmci_dma_setup() 454 if (host->dma_tx_channel) mmci_dma_setup() 455 txname = dma_chan_name(host->dma_tx_channel); mmci_dma_setup() 459 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n", mmci_dma_setup() 466 if (host->dma_tx_channel) { mmci_dma_setup() 467 struct device *dev = host->dma_tx_channel->device->dev; mmci_dma_setup() 470 if (max_seg_size < host->mmc->max_seg_size) mmci_dma_setup() 471 host->mmc->max_seg_size = max_seg_size; mmci_dma_setup() 473 if (host->dma_rx_channel) { mmci_dma_setup() 474 struct device *dev = host->dma_rx_channel->device->dev; mmci_dma_setup() 477 if (max_seg_size < host->mmc->max_seg_size) mmci_dma_setup() 478 host->mmc->max_seg_size = max_seg_size; mmci_dma_setup() 481 if (variant->qcom_dml && host->dma_rx_channel && host->dma_tx_channel) mmci_dma_setup() 482 if (dml_hw_init(host, host->mmc->parent->of_node)) mmci_dma_setup() 490 static inline void mmci_dma_release(struct mmci_host *host) mmci_dma_release() argument 492 if (host->dma_rx_channel) mmci_dma_release() 493 dma_release_channel(host->dma_rx_channel); mmci_dma_release() 494 if (host->dma_tx_channel) mmci_dma_release() 495 dma_release_channel(host->dma_tx_channel); mmci_dma_release() 496 host->dma_rx_channel = host->dma_tx_channel = NULL; mmci_dma_release() 499 static void mmci_dma_data_error(struct mmci_host *host) mmci_dma_data_error() argument 501 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); mmci_dma_data_error() 502 dmaengine_terminate_all(host->dma_current); mmci_dma_data_error() 503 host->dma_current = NULL; mmci_dma_data_error() 504 host->dma_desc_current = NULL; mmci_dma_data_error() 505 host->data->host_cookie = 0; mmci_dma_data_error() 508 static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) mmci_dma_unmap() argument 515 chan = host->dma_rx_channel; mmci_dma_unmap() 518 chan = host->dma_tx_channel; mmci_dma_unmap() 524 static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data) mmci_dma_finalize() argument 531 status = readl(host->base + MMCISTATUS); mmci_dma_finalize() 544 mmci_dma_data_error(host); mmci_dma_finalize() 550 mmci_dma_unmap(host, data); mmci_dma_finalize() 557 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n"); mmci_dma_finalize() 558 mmci_dma_release(host); mmci_dma_finalize() 561 host->dma_current = NULL; mmci_dma_finalize() 562 host->dma_desc_current = NULL; mmci_dma_finalize() 566 static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, __mmci_dma_prep_data() argument 570 struct variant_data *variant = host->variant; __mmci_dma_prep_data() 572 .src_addr = host->phybase + MMCIFIFO, __mmci_dma_prep_data() 573 .dst_addr = host->phybase + MMCIFIFO, __mmci_dma_prep_data() 590 chan = host->dma_rx_channel; __mmci_dma_prep_data() 594 chan = host->dma_tx_channel; __mmci_dma_prep_data() 610 if (host->variant->qcom_dml) __mmci_dma_prep_data() 629 static inline int mmci_dma_prep_data(struct mmci_host *host, mmci_dma_prep_data() argument 633 if (host->dma_current && host->dma_desc_current) mmci_dma_prep_data() 637 return __mmci_dma_prep_data(host, data, &host->dma_current, mmci_dma_prep_data() 638 &host->dma_desc_current); mmci_dma_prep_data() 641 static inline int mmci_dma_prep_next(struct mmci_host *host, mmci_dma_prep_next() argument 644 struct mmci_host_next *nd = &host->next_data; mmci_dma_prep_next() 645 return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc); mmci_dma_prep_next() 648 static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) mmci_dma_start_data() argument 651 struct mmc_data *data = host->data; mmci_dma_start_data() 653 ret = mmci_dma_prep_data(host, host->data); mmci_dma_start_data() 658 dev_vdbg(mmc_dev(host->mmc), mmci_dma_start_data() 661 dmaengine_submit(host->dma_desc_current); mmci_dma_start_data() 662 dma_async_issue_pending(host->dma_current); mmci_dma_start_data() 664 if (host->variant->qcom_dml) mmci_dma_start_data() 665 dml_start_xfer(host, data); mmci_dma_start_data() 670 mmci_write_datactrlreg(host, datactrl); mmci_dma_start_data() 677 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK, mmci_dma_start_data() 678 host->base + MMCIMASK0); mmci_dma_start_data() 682 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) mmci_get_next_data() argument 684 struct mmci_host_next *next = &host->next_data; mmci_get_next_data() 689 host->dma_desc_current = next->dma_desc; mmci_get_next_data() 690 host->dma_current = next->dma_chan; mmci_get_next_data() 698 struct mmci_host *host = mmc_priv(mmc); mmci_pre_request() local 700 struct mmci_host_next *nd = &host->next_data; mmci_pre_request() 707 if (mmci_validate_data(host, data)) mmci_pre_request() 710 if (!mmci_dma_prep_next(host, data)) mmci_pre_request() 717 struct mmci_host *host = mmc_priv(mmc); mmci_post_request() local 723 mmci_dma_unmap(host, data); mmci_post_request() 726 struct mmci_host_next *next = &host->next_data; mmci_post_request() 729 chan = host->dma_rx_channel; mmci_post_request() 731 chan = host->dma_tx_channel; mmci_post_request() 734 if (host->dma_desc_current == next->dma_desc) mmci_post_request() 735 host->dma_desc_current = NULL; mmci_post_request() 737 if (host->dma_current == next->dma_chan) mmci_post_request() 738 host->dma_current = NULL; mmci_post_request() 748 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) mmci_get_next_data() argument 751 static inline void mmci_dma_setup(struct mmci_host *host) mmci_dma_setup() argument 755 static inline void mmci_dma_release(struct mmci_host *host) mmci_dma_release() argument 759 static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) mmci_dma_unmap() argument 763 static inline void mmci_dma_finalize(struct mmci_host *host, mmci_dma_finalize() argument 768 static inline void mmci_dma_data_error(struct mmci_host *host) mmci_dma_data_error() argument 772 static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) mmci_dma_start_data() argument 782 static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) mmci_start_data() argument 784 struct variant_data *variant = host->variant; mmci_start_data() 790 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n", mmci_start_data() 793 host->data = data; mmci_start_data() 794 host->size = data->blksz * data->blocks; mmci_start_data() 797 clks = (unsigned long long)data->timeout_ns * host->cclk; mmci_start_data() 802 base = host->base; mmci_start_data() 804 writel(host->size, base + MMCIDATALENGTH); mmci_start_data() 819 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) { mmci_start_data() 831 (host->size < 8 || mmci_start_data() 832 (host->size <= 8 && host->mclk > 50000000))) mmci_start_data() 833 clk = host->clk_reg & ~variant->clkreg_enable; mmci_start_data() 835 clk = host->clk_reg | variant->clkreg_enable; mmci_start_data() 837 mmci_write_clkreg(host, clk); mmci_start_data() 840 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 || mmci_start_data() 841 host->mmc->ios.timing == MMC_TIMING_MMC_DDR52) mmci_start_data() 848 if (!mmci_dma_start_data(host, datactrl)) mmci_start_data() 852 mmci_init_sg(host, data); mmci_start_data() 862 if (host->size < variant->fifohalfsize) mmci_start_data() 872 mmci_write_datactrlreg(host, datactrl); mmci_start_data() 874 mmci_set_mask1(host, irqmask); mmci_start_data() 878 mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c) mmci_start_command() argument 880 void __iomem *base = host->base; mmci_start_command() 882 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n", mmci_start_command() 887 mmci_reg_delay(host); mmci_start_command() 900 c |= host->variant->data_cmd_enable; mmci_start_command() 902 host->cmd = cmd; mmci_start_command() 909 mmci_data_irq(struct mmci_host *host, struct mmc_data *data, mmci_data_irq() argument 922 if (dma_inprogress(host)) { mmci_data_irq() 923 mmci_dma_data_error(host); mmci_data_irq() 924 mmci_dma_unmap(host, data); mmci_data_irq() 930 * on the MMC bus, not on the host side. On reads, this mmci_data_irq() 934 remain = readl(host->base + MMCIDATACNT); mmci_data_irq() 937 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n", mmci_data_irq() 950 if (success > host->variant->fifosize) mmci_data_irq() 951 success -= host->variant->fifosize; mmci_data_irq() 960 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n"); mmci_data_irq() 963 if (dma_inprogress(host)) mmci_data_irq() 964 mmci_dma_finalize(host, data); mmci_data_irq() 965 mmci_stop_data(host); mmci_data_irq() 971 if (!data->stop || host->mrq->sbc) { mmci_data_irq() 972 mmci_request_end(host, data->mrq); mmci_data_irq() 974 mmci_start_command(host, data->stop, 0); mmci_data_irq() 980 mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, mmci_cmd_irq() argument 983 void __iomem *base = host->base; mmci_cmd_irq() 989 sbc = (cmd == host->mrq->sbc); mmci_cmd_irq() 990 busy_resp = host->variant->busy_detect && (cmd->flags & MMC_RSP_BUSY); mmci_cmd_irq() 992 if (!((status|host->busy_status) & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT| mmci_cmd_irq() 997 if (host->busy_status && (status & MCI_ST_CARDBUSY)) mmci_cmd_irq() 1001 if (!host->busy_status && busy_resp && mmci_cmd_irq() 1006 host->busy_status = status & (MCI_CMDSENT|MCI_CMDRESPEND); mmci_cmd_irq() 1011 if (host->busy_status) { mmci_cmd_irq() 1014 host->busy_status = 0; mmci_cmd_irq() 1017 host->cmd = NULL; mmci_cmd_irq() 1031 if (host->data) { mmci_cmd_irq() 1033 if (dma_inprogress(host)) { mmci_cmd_irq() 1034 mmci_dma_data_error(host); mmci_cmd_irq() 1035 mmci_dma_unmap(host, host->data); mmci_cmd_irq() 1037 mmci_stop_data(host); mmci_cmd_irq() 1039 mmci_request_end(host, host->mrq); mmci_cmd_irq() 1041 mmci_start_command(host, host->mrq->cmd, 0); mmci_cmd_irq() 1043 mmci_start_data(host, cmd->data); mmci_cmd_irq() 1047 static int mmci_get_rx_fifocnt(struct mmci_host *host, u32 status, int remain) mmci_get_rx_fifocnt() argument 1049 return remain - (readl(host->base + MMCIFIFOCNT) << 2); mmci_get_rx_fifocnt() 1052 static int mmci_qcom_get_rx_fifocnt(struct mmci_host *host, u32 status, int r) mmci_qcom_get_rx_fifocnt() argument 1059 return host->variant->fifohalfsize; mmci_qcom_get_rx_fifocnt() 1066 static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain) mmci_pio_read() argument 1068 void __iomem *base = host->base; mmci_pio_read() 1070 u32 status = readl(host->base + MMCISTATUS); mmci_pio_read() 1071 int host_remain = host->size; mmci_pio_read() 1074 int count = host->get_rx_fifocnt(host, status, host_remain); mmci_pio_read() 1114 static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status) mmci_pio_write() argument 1116 struct variant_data *variant = host->variant; mmci_pio_write() 1117 void __iomem *base = host->base; mmci_pio_write() 1154 struct mmci_host *host = dev_id; mmci_pio_irq() local 1155 struct sg_mapping_iter *sg_miter = &host->sg_miter; mmci_pio_irq() 1156 struct variant_data *variant = host->variant; mmci_pio_irq() 1157 void __iomem *base = host->base; mmci_pio_irq() 1163 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status); mmci_pio_irq() 1189 len = mmci_pio_read(host, buffer, remain); mmci_pio_irq() 1191 len = mmci_pio_write(host, buffer, remain, status); mmci_pio_irq() 1195 host->size -= len; mmci_pio_irq() 1212 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize) mmci_pio_irq() 1213 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK); mmci_pio_irq() 1221 if (host->size == 0) { mmci_pio_irq() 1222 mmci_set_mask1(host, 0); mmci_pio_irq() 1234 struct mmci_host *host = dev_id; mmci_irq() local 1238 spin_lock(&host->lock); mmci_irq() 1241 status = readl(host->base + MMCISTATUS); mmci_irq() 1243 if (host->singleirq) { mmci_irq() 1244 if (status & readl(host->base + MMCIMASK1)) mmci_irq() 1255 status &= readl(host->base + MMCIMASK0); mmci_irq() 1256 writel(status, host->base + MMCICLEAR); mmci_irq() 1258 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status); mmci_irq() 1260 if (host->variant->reversed_irq_handling) { mmci_irq() 1261 mmci_data_irq(host, host->data, status); mmci_irq() 1262 mmci_cmd_irq(host, host->cmd, status); mmci_irq() 1264 mmci_cmd_irq(host, host->cmd, status); mmci_irq() 1265 mmci_data_irq(host, host->data, status); mmci_irq() 1269 if (host->busy_status) mmci_irq() 1275 spin_unlock(&host->lock); mmci_irq() 1282 struct mmci_host *host = mmc_priv(mmc); mmci_request() local 1285 WARN_ON(host->mrq != NULL); mmci_request() 1287 mrq->cmd->error = mmci_validate_data(host, mrq->data); mmci_request() 1295 spin_lock_irqsave(&host->lock, flags); mmci_request() 1297 host->mrq = mrq; mmci_request() 1300 mmci_get_next_data(host, mrq->data); mmci_request() 1303 mmci_start_data(host, mrq->data); mmci_request() 1306 mmci_start_command(host, mrq->sbc, 0); mmci_request() 1308 mmci_start_command(host, mrq->cmd, 0); mmci_request() 1310 spin_unlock_irqrestore(&host->lock, flags); mmci_request() 1315 struct mmci_host *host = mmc_priv(mmc); mmci_set_ios() local 1316 struct variant_data *variant = host->variant; mmci_set_ios() 1323 if (host->plat->ios_handler && mmci_set_ios() 1324 host->plat->ios_handler(mmc_dev(mmc), ios)) mmci_set_ios() 1332 if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) { mmci_set_ios() 1334 host->vqmmc_enabled = false; mmci_set_ios() 1351 if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) { mmci_set_ios() 1357 host->vqmmc_enabled = true; mmci_set_ios() 1370 pwr |= host->pwr_reg_add; mmci_set_ios() 1381 if (host->hw_designer != AMBA_VENDOR_ST) mmci_set_ios() 1399 if (host->variant->explicit_mclk_control && mmci_set_ios() 1400 ios->clock != host->clock_cache) { mmci_set_ios() 1401 ret = clk_set_rate(host->clk, ios->clock); mmci_set_ios() 1403 dev_err(mmc_dev(host->mmc), mmci_set_ios() 1406 host->mclk = clk_get_rate(host->clk); mmci_set_ios() 1408 host->clock_cache = ios->clock; mmci_set_ios() 1410 spin_lock_irqsave(&host->lock, flags); mmci_set_ios() 1412 mmci_set_clkreg(host, ios->clock); mmci_set_ios() 1413 mmci_write_pwrreg(host, pwr); mmci_set_ios() 1414 mmci_reg_delay(host); mmci_set_ios() 1416 spin_unlock_irqrestore(&host->lock, flags); mmci_set_ios() 1424 struct mmci_host *host = mmc_priv(mmc); mmci_get_cd() local 1425 struct mmci_platform_data *plat = host->plat; mmci_get_cd() 1432 status = plat->status(mmc_dev(host->mmc)); mmci_get_cd() 1482 struct mmci_host *host = mmc_priv(mmc); mmci_of_parse() local 1489 host->pwr_reg_add |= MCI_ST_DATA0DIREN; mmci_of_parse() 1491 host->pwr_reg_add |= MCI_ST_DATA2DIREN; mmci_of_parse() 1493 host->pwr_reg_add |= MCI_ST_DATA31DIREN; mmci_of_parse() 1495 host->pwr_reg_add |= MCI_ST_DATA74DIREN; mmci_of_parse() 1497 host->pwr_reg_add |= MCI_ST_CMDDIREN; mmci_of_parse() 1499 host->pwr_reg_add |= MCI_ST_FBCLKEN; mmci_of_parse() 1515 struct mmci_host *host; mmci_probe() local 1539 host = mmc_priv(mmc); mmci_probe() 1540 host->mmc = mmc; mmci_probe() 1542 host->hw_designer = amba_manf(dev); mmci_probe() 1543 host->hw_revision = amba_rev(dev); mmci_probe() 1544 dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer); mmci_probe() 1545 dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision); mmci_probe() 1547 host->clk = devm_clk_get(&dev->dev, NULL); mmci_probe() 1548 if (IS_ERR(host->clk)) { mmci_probe() 1549 ret = PTR_ERR(host->clk); mmci_probe() 1553 ret = clk_prepare_enable(host->clk); mmci_probe() 1558 host->get_rx_fifocnt = mmci_qcom_get_rx_fifocnt; mmci_probe() 1560 host->get_rx_fifocnt = mmci_get_rx_fifocnt; mmci_probe() 1562 host->plat = plat; mmci_probe() 1563 host->variant = variant; mmci_probe() 1564 host->mclk = clk_get_rate(host->clk); mmci_probe() 1570 if (host->mclk > variant->f_max) { mmci_probe() 1571 ret = clk_set_rate(host->clk, variant->f_max); mmci_probe() 1574 host->mclk = clk_get_rate(host->clk); mmci_probe() 1576 host->mclk); mmci_probe() 1579 host->phybase = dev->res.start; mmci_probe() 1580 host->base = devm_ioremap_resource(&dev->dev, &dev->res); mmci_probe() 1581 if (IS_ERR(host->base)) { mmci_probe() 1582 ret = PTR_ERR(host->base); mmci_probe() 1593 mmc->f_min = DIV_ROUND_UP(host->mclk, 257); mmci_probe() 1595 mmc->f_min = clk_round_rate(host->clk, 100000); mmci_probe() 1597 mmc->f_min = DIV_ROUND_UP(host->mclk, 512); mmci_probe() 1607 min(host->mclk, mmc->f_max); mmci_probe() 1610 fmax : min(host->mclk, fmax); mmci_probe() 1637 mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE); mmci_probe() 1676 spin_lock_init(&host->lock); mmci_probe() 1678 writel(0, host->base + MMCIMASK0); mmci_probe() 1679 writel(0, host->base + MMCIMASK1); mmci_probe() 1680 writel(0xfff, host->base + MMCICLEAR); mmci_probe() 1714 DRIVER_NAME " (cmd)", host); mmci_probe() 1719 host->singleirq = true; mmci_probe() 1722 IRQF_SHARED, DRIVER_NAME " (pio)", host); mmci_probe() 1727 writel(MCI_IRQENABLE, host->base + MMCIMASK0); mmci_probe() 1736 mmci_dma_setup(host); mmci_probe() 1747 clk_disable_unprepare(host->clk); mmci_probe() 1758 struct mmci_host *host = mmc_priv(mmc); mmci_remove() local 1768 writel(0, host->base + MMCIMASK0); mmci_remove() 1769 writel(0, host->base + MMCIMASK1); mmci_remove() 1771 writel(0, host->base + MMCICOMMAND); mmci_remove() 1772 writel(0, host->base + MMCIDATACTRL); mmci_remove() 1774 mmci_dma_release(host); mmci_remove() 1775 clk_disable_unprepare(host->clk); mmci_remove() 1783 static void mmci_save(struct mmci_host *host) mmci_save() argument 1787 spin_lock_irqsave(&host->lock, flags); mmci_save() 1789 writel(0, host->base + MMCIMASK0); mmci_save() 1790 if (host->variant->pwrreg_nopower) { mmci_save() 1791 writel(0, host->base + MMCIDATACTRL); mmci_save() 1792 writel(0, host->base + MMCIPOWER); mmci_save() 1793 writel(0, host->base + MMCICLOCK); mmci_save() 1795 mmci_reg_delay(host); mmci_save() 1797 spin_unlock_irqrestore(&host->lock, flags); mmci_save() 1800 static void mmci_restore(struct mmci_host *host) mmci_restore() argument 1804 spin_lock_irqsave(&host->lock, flags); mmci_restore() 1806 if (host->variant->pwrreg_nopower) { mmci_restore() 1807 writel(host->clk_reg, host->base + MMCICLOCK); mmci_restore() 1808 writel(host->datactrl_reg, host->base + MMCIDATACTRL); mmci_restore() 1809 writel(host->pwr_reg, host->base + MMCIPOWER); mmci_restore() 1811 writel(MCI_IRQENABLE, host->base + MMCIMASK0); mmci_restore() 1812 mmci_reg_delay(host); mmci_restore() 1814 spin_unlock_irqrestore(&host->lock, flags); mmci_restore() 1823 struct mmci_host *host = mmc_priv(mmc); mmci_runtime_suspend() local 1825 mmci_save(host); mmci_runtime_suspend() 1826 clk_disable_unprepare(host->clk); mmci_runtime_suspend() 1838 struct mmci_host *host = mmc_priv(mmc); mmci_runtime_resume() local 1839 clk_prepare_enable(host->clk); mmci_runtime_resume() 1840 mmci_restore(host); mmci_runtime_resume()
|
H A D | dw_mmc.c | 31 #include <linux/mmc/host.h> 107 static bool dw_mci_reset(struct dw_mci *host); 108 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset); 121 spin_lock_bh(&slot->host->lock); dw_mci_req_show() 147 spin_unlock_bh(&slot->host->lock); dw_mci_req_show() 193 struct dw_mci *host = slot->host; dw_mci_init_debugfs() local 201 node = debugfs_create_file("regs", S_IRUSR, root, host, dw_mci_init_debugfs() 211 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state); dw_mci_init_debugfs() 216 (u32 *)&host->pending_events); dw_mci_init_debugfs() 221 (u32 *)&host->completed_events); dw_mci_init_debugfs() 238 struct dw_mci *host = slot->host; dw_mci_prepare_command() local 239 const struct dw_mci_drv_data *drv_data = slot->host->drv_data; dw_mci_prepare_command() 261 WARN_ON(slot->host->state != STATE_SENDING_CMD); dw_mci_prepare_command() 262 slot->host->state = STATE_SENDING_CMD11; dw_mci_prepare_command() 275 clk_en_a = mci_readl(host, CLKENA); dw_mci_prepare_command() 277 mci_writel(host, CLKENA, clk_en_a); dw_mci_prepare_command() 302 drv_data->prepare_command(slot->host, &cmdr); dw_mci_prepare_command() 307 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd) dw_mci_prep_stop_abort() argument 315 stop = &host->stop_abort; dw_mci_prep_stop_abort() 343 static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags) dw_mci_wait_while_busy() argument 357 while (mci_readl(host, STATUS) & SDMMC_STATUS_BUSY) { dw_mci_wait_while_busy() 360 dev_err(host->dev, "Busy; trying anyway\n"); dw_mci_wait_while_busy() 368 static void dw_mci_start_command(struct dw_mci *host, dw_mci_start_command() argument 371 host->cmd = cmd; dw_mci_start_command() 372 dev_vdbg(host->dev, dw_mci_start_command() 376 mci_writel(host, CMDARG, cmd->arg); dw_mci_start_command() 378 dw_mci_wait_while_busy(host, cmd_flags); dw_mci_start_command() 380 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START); dw_mci_start_command() 383 static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data) send_stop_abort() argument 385 struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort; send_stop_abort() 386 dw_mci_start_command(host, stop, host->stop_cmdr); send_stop_abort() 390 static void dw_mci_stop_dma(struct dw_mci *host) dw_mci_stop_dma() argument 392 if (host->using_dma) { dw_mci_stop_dma() 393 host->dma_ops->stop(host); dw_mci_stop_dma() 394 host->dma_ops->cleanup(host); dw_mci_stop_dma() 398 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); dw_mci_stop_dma() 410 static void dw_mci_dma_cleanup(struct dw_mci *host) dw_mci_dma_cleanup() argument 412 struct mmc_data *data = host->data; dw_mci_dma_cleanup() 416 dma_unmap_sg(host->dev, dw_mci_dma_cleanup() 422 static void dw_mci_idmac_reset(struct dw_mci *host) dw_mci_idmac_reset() argument 424 u32 bmod = mci_readl(host, BMOD); dw_mci_idmac_reset() 427 mci_writel(host, BMOD, bmod); dw_mci_idmac_reset() 430 static void dw_mci_idmac_stop_dma(struct dw_mci *host) dw_mci_idmac_stop_dma() argument 435 temp = mci_readl(host, CTRL); dw_mci_idmac_stop_dma() 438 mci_writel(host, CTRL, temp); dw_mci_idmac_stop_dma() 441 temp = mci_readl(host, BMOD); dw_mci_idmac_stop_dma() 444 mci_writel(host, BMOD, temp); dw_mci_idmac_stop_dma() 447 static void dw_mci_idmac_complete_dma(struct dw_mci *host) dw_mci_idmac_complete_dma() argument 449 struct mmc_data *data = host->data; dw_mci_idmac_complete_dma() 451 dev_vdbg(host->dev, "DMA complete\n"); dw_mci_idmac_complete_dma() 453 host->dma_ops->cleanup(host); dw_mci_idmac_complete_dma() 460 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); dw_mci_idmac_complete_dma() 461 tasklet_schedule(&host->tasklet); dw_mci_idmac_complete_dma() 465 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data, dw_mci_translate_sglist() argument 470 if (host->dma_64bit_address == 1) { dw_mci_translate_sglist() 473 desc_first = desc_last = desc = host->sg_cpu; dw_mci_translate_sglist() 517 desc_first = desc_last = desc = host->sg_cpu; dw_mci_translate_sglist() 563 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len) dw_mci_idmac_start_dma() argument 567 dw_mci_translate_sglist(host, host->data, sg_len); dw_mci_idmac_start_dma() 570 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET); dw_mci_idmac_start_dma() 571 dw_mci_idmac_reset(host); dw_mci_idmac_start_dma() 574 temp = mci_readl(host, CTRL); dw_mci_idmac_start_dma() 576 mci_writel(host, CTRL, temp); dw_mci_idmac_start_dma() 581 temp = mci_readl(host, BMOD); dw_mci_idmac_start_dma() 583 mci_writel(host, BMOD, temp); dw_mci_idmac_start_dma() 586 mci_writel(host, PLDMND, 1); dw_mci_idmac_start_dma() 589 static int dw_mci_idmac_init(struct dw_mci *host) dw_mci_idmac_init() argument 593 if (host->dma_64bit_address == 1) { dw_mci_idmac_init() 596 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc_64addr); dw_mci_idmac_init() 599 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; dw_mci_idmac_init() 601 p->des6 = (host->sg_dma + dw_mci_idmac_init() 605 p->des7 = (u64)(host->sg_dma + dw_mci_idmac_init() 615 p->des6 = host->sg_dma & 0xffffffff; dw_mci_idmac_init() 616 p->des7 = (u64)host->sg_dma >> 32; dw_mci_idmac_init() 622 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc); dw_mci_idmac_init() 625 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++) { dw_mci_idmac_init() 626 p->des3 = cpu_to_le32(host->sg_dma + dw_mci_idmac_init() 632 p->des3 = cpu_to_le32(host->sg_dma); dw_mci_idmac_init() 636 dw_mci_idmac_reset(host); dw_mci_idmac_init() 638 if (host->dma_64bit_address == 1) { dw_mci_idmac_init() 640 mci_writel(host, IDSTS64, IDMAC_INT_CLR); dw_mci_idmac_init() 641 mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI | dw_mci_idmac_init() 645 mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff); dw_mci_idmac_init() 646 mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32); dw_mci_idmac_init() 650 mci_writel(host, IDSTS, IDMAC_INT_CLR); dw_mci_idmac_init() 651 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | dw_mci_idmac_init() 655 mci_writel(host, DBADDR, host->sg_dma); dw_mci_idmac_init() 670 static int dw_mci_pre_dma_transfer(struct dw_mci *host, dw_mci_pre_dma_transfer() argument 696 sg_len = dma_map_sg(host->dev, dw_mci_pre_dma_transfer() 716 if (!slot->host->use_dma || !data) dw_mci_pre_req() 724 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0) dw_mci_pre_req() 735 if (!slot->host->use_dma || !data) dw_mci_post_req() 739 dma_unmap_sg(slot->host->dev, dw_mci_post_req() 746 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data) dw_mci_adjust_fifoth() argument 751 u32 fifo_width = 1 << host->data_shift; dw_mci_adjust_fifoth() 756 tx_wmark = (host->fifo_depth) / 2; dw_mci_adjust_fifoth() 757 tx_wmark_invers = host->fifo_depth - tx_wmark; dw_mci_adjust_fifoth() 783 mci_writel(host, FIFOTH, fifoth_val); dw_mci_adjust_fifoth() 787 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data) dw_mci_ctrl_rd_thld() argument 799 if (host->verid < DW_MMC_240A) dw_mci_ctrl_rd_thld() 802 if (host->timing != MMC_TIMING_MMC_HS200 && dw_mci_ctrl_rd_thld() 803 host->timing != MMC_TIMING_MMC_HS400 && dw_mci_ctrl_rd_thld() 804 host->timing != MMC_TIMING_UHS_SDR104) dw_mci_ctrl_rd_thld() 807 blksz_depth = blksz / (1 << host->data_shift); dw_mci_ctrl_rd_thld() 808 fifo_depth = host->fifo_depth; dw_mci_ctrl_rd_thld() 819 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1)); dw_mci_ctrl_rd_thld() 823 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0)); dw_mci_ctrl_rd_thld() 826 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) dw_mci_submit_data_dma() argument 832 host->using_dma = 0; dw_mci_submit_data_dma() 835 if (!host->use_dma) dw_mci_submit_data_dma() 838 sg_len = dw_mci_pre_dma_transfer(host, data, 0); dw_mci_submit_data_dma() 840 host->dma_ops->stop(host); dw_mci_submit_data_dma() 844 host->using_dma = 1; dw_mci_submit_data_dma() 846 dev_vdbg(host->dev, dw_mci_submit_data_dma() 848 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma, dw_mci_submit_data_dma() 856 if (host->prev_blksz != data->blksz) dw_mci_submit_data_dma() 857 dw_mci_adjust_fifoth(host, data); dw_mci_submit_data_dma() 860 temp = mci_readl(host, CTRL); dw_mci_submit_data_dma() 862 mci_writel(host, CTRL, temp); dw_mci_submit_data_dma() 865 spin_lock_irqsave(&host->irq_lock, irqflags); dw_mci_submit_data_dma() 866 temp = mci_readl(host, INTMASK); dw_mci_submit_data_dma() 868 mci_writel(host, INTMASK, temp); dw_mci_submit_data_dma() 869 spin_unlock_irqrestore(&host->irq_lock, irqflags); dw_mci_submit_data_dma() 871 host->dma_ops->start(host, sg_len); dw_mci_submit_data_dma() 876 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data) dw_mci_submit_data() argument 883 WARN_ON(host->data); dw_mci_submit_data() 884 host->sg = NULL; dw_mci_submit_data() 885 host->data = data; dw_mci_submit_data() 888 host->dir_status = DW_MCI_RECV_STATUS; dw_mci_submit_data() 889 dw_mci_ctrl_rd_thld(host, data); dw_mci_submit_data() 891 host->dir_status = DW_MCI_SEND_STATUS; dw_mci_submit_data() 894 if (dw_mci_submit_data_dma(host, data)) { dw_mci_submit_data() 896 if (host->data->flags & MMC_DATA_READ) dw_mci_submit_data() 901 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); dw_mci_submit_data() 902 host->sg = data->sg; dw_mci_submit_data() 903 host->part_buf_start = 0; dw_mci_submit_data() 904 host->part_buf_count = 0; dw_mci_submit_data() 906 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR); dw_mci_submit_data() 908 spin_lock_irqsave(&host->irq_lock, irqflags); dw_mci_submit_data() 909 temp = mci_readl(host, INTMASK); dw_mci_submit_data() 911 mci_writel(host, INTMASK, temp); dw_mci_submit_data() 912 spin_unlock_irqrestore(&host->irq_lock, irqflags); dw_mci_submit_data() 914 temp = mci_readl(host, CTRL); dw_mci_submit_data() 916 mci_writel(host, CTRL, temp); dw_mci_submit_data() 923 mci_writel(host, FIFOTH, host->fifoth_val); dw_mci_submit_data() 924 host->prev_blksz = 0; dw_mci_submit_data() 931 host->prev_blksz = data->blksz; dw_mci_submit_data() 937 struct dw_mci *host = slot->host; mci_send_cmd() local 941 mci_writel(host, CMDARG, arg); mci_send_cmd() 943 dw_mci_wait_while_busy(host, cmd); mci_send_cmd() 944 mci_writel(host, CMD, SDMMC_CMD_START | cmd); mci_send_cmd() 947 cmd_status = mci_readl(host, CMD); mci_send_cmd() 958 struct dw_mci *host = slot->host; dw_mci_setup_bus() local 965 if (host->state == STATE_WAITING_CMD11_DONE) dw_mci_setup_bus() 969 mci_writel(host, CLKENA, 0); dw_mci_setup_bus() 971 } else if (clock != host->current_speed || force_clkinit) { dw_mci_setup_bus() 972 div = host->bus_hz / clock; dw_mci_setup_bus() 973 if (host->bus_hz % clock && host->bus_hz > clock) dw_mci_setup_bus() 980 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0; dw_mci_setup_bus() 985 slot->id, host->bus_hz, clock, dw_mci_setup_bus() 986 div ? ((host->bus_hz / div) >> 1) : dw_mci_setup_bus() 987 host->bus_hz, div); dw_mci_setup_bus() 990 mci_writel(host, CLKENA, 0); dw_mci_setup_bus() 991 mci_writel(host, CLKSRC, 0); dw_mci_setup_bus() 997 mci_writel(host, CLKDIV, div); dw_mci_setup_bus() 1006 mci_writel(host, CLKENA, clk_en_a); dw_mci_setup_bus() 1015 host->current_speed = clock; dw_mci_setup_bus() 1018 mci_writel(host, CTYPE, (slot->ctype << slot->id)); dw_mci_setup_bus() 1021 static void __dw_mci_start_request(struct dw_mci *host, __dw_mci_start_request() argument 1031 host->cur_slot = slot; __dw_mci_start_request() 1032 host->mrq = mrq; __dw_mci_start_request() 1034 host->pending_events = 0; __dw_mci_start_request() 1035 host->completed_events = 0; __dw_mci_start_request() 1036 host->cmd_status = 0; __dw_mci_start_request() 1037 host->data_status = 0; __dw_mci_start_request() 1038 host->dir_status = 0; __dw_mci_start_request() 1042 mci_writel(host, TMOUT, 0xFFFFFFFF); __dw_mci_start_request() 1043 mci_writel(host, BYTCNT, data->blksz*data->blocks); __dw_mci_start_request() 1044 mci_writel(host, BLKSIZ, data->blksz); __dw_mci_start_request() 1054 dw_mci_submit_data(host, data); __dw_mci_start_request() 1058 dw_mci_start_command(host, cmd, cmdflags); __dw_mci_start_request() 1073 spin_lock_irqsave(&host->irq_lock, irqflags); __dw_mci_start_request() 1074 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) __dw_mci_start_request() 1075 mod_timer(&host->cmd11_timer, __dw_mci_start_request() 1077 spin_unlock_irqrestore(&host->irq_lock, irqflags); __dw_mci_start_request() 1081 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop); __dw_mci_start_request() 1083 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd); __dw_mci_start_request() 1086 static void dw_mci_start_request(struct dw_mci *host, dw_mci_start_request() argument 1093 __dw_mci_start_request(host, slot, cmd); dw_mci_start_request() 1096 /* must be called with host->lock held */ dw_mci_queue_request() 1097 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot, dw_mci_queue_request() argument 1101 host->state); dw_mci_queue_request() 1105 if (host->state == STATE_WAITING_CMD11_DONE) { dw_mci_queue_request() 1113 host->state = STATE_IDLE; dw_mci_queue_request() 1116 if (host->state == STATE_IDLE) { dw_mci_queue_request() 1117 host->state = STATE_SENDING_CMD; dw_mci_queue_request() 1118 dw_mci_start_request(host, slot); dw_mci_queue_request() 1120 list_add_tail(&slot->queue_node, &host->queue); dw_mci_queue_request() 1127 struct dw_mci *host = slot->host; dw_mci_request() local 1136 spin_lock_bh(&host->lock); dw_mci_request() 1139 spin_unlock_bh(&host->lock); dw_mci_request() 1145 dw_mci_queue_request(host, slot, mrq); dw_mci_request() 1147 spin_unlock_bh(&host->lock); dw_mci_request() 1153 const struct dw_mci_drv_data *drv_data = slot->host->drv_data; dw_mci_set_ios() 1169 regs = mci_readl(slot->host, UHS_REG); dw_mci_set_ios() 1178 mci_writel(slot->host, UHS_REG, regs); dw_mci_set_ios() 1179 slot->host->timing = ios->timing; dw_mci_set_ios() 1188 drv_data->set_ios(slot->host, ios); dw_mci_set_ios() 1196 dev_err(slot->host->dev, dw_mci_set_ios() 1203 regs = mci_readl(slot->host, PWREN); dw_mci_set_ios() 1205 mci_writel(slot->host, PWREN, regs); dw_mci_set_ios() 1208 if (!slot->host->vqmmc_enabled) { dw_mci_set_ios() 1212 dev_err(slot->host->dev, dw_mci_set_ios() 1215 slot->host->vqmmc_enabled = true; dw_mci_set_ios() 1219 slot->host->vqmmc_enabled = true; dw_mci_set_ios() 1223 dw_mci_ctrl_reset(slot->host, dw_mci_set_ios() 1238 if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled) dw_mci_set_ios() 1240 slot->host->vqmmc_enabled = false; dw_mci_set_ios() 1242 regs = mci_readl(slot->host, PWREN); dw_mci_set_ios() 1244 mci_writel(slot->host, PWREN, regs); dw_mci_set_ios() 1250 if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0) dw_mci_set_ios() 1251 slot->host->state = STATE_IDLE; dw_mci_set_ios() 1263 status = mci_readl(slot->host, STATUS); dw_mci_card_busy() 1271 struct dw_mci *host = slot->host; dw_mci_switch_voltage() local 1282 uhs = mci_readl(host, UHS_REG); dw_mci_switch_voltage() 1302 mci_writel(host, UHS_REG, uhs); dw_mci_switch_voltage() 1315 (slot->host->quirks & DW_MCI_QUIRK_NO_WRITE_PROTECT)) dw_mci_get_ro() 1321 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0; dw_mci_get_ro() 1333 struct dw_mci_board *brd = slot->host->pdata; dw_mci_get_cd() 1334 struct dw_mci *host = slot->host; dw_mci_get_cd() local 1344 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id)) dw_mci_get_cd() 1347 spin_lock_bh(&host->lock); dw_mci_get_cd() 1355 spin_unlock_bh(&host->lock); dw_mci_get_cd() 1363 struct dw_mci *host = slot->host; dw_mci_init_card() local 1375 clk_en_a_old = mci_readl(host, CLKENA); dw_mci_init_card() 1387 mci_writel(host, CLKENA, clk_en_a); dw_mci_init_card() 1397 struct dw_mci *host = slot->host; dw_mci_enable_sdio_irq() local 1401 spin_lock_irqsave(&host->irq_lock, irqflags); dw_mci_enable_sdio_irq() 1404 int_mask = mci_readl(host, INTMASK); dw_mci_enable_sdio_irq() 1409 mci_writel(host, INTMASK, int_mask); dw_mci_enable_sdio_irq() 1411 spin_unlock_irqrestore(&host->irq_lock, irqflags); dw_mci_enable_sdio_irq() 1417 struct dw_mci *host = slot->host; dw_mci_execute_tuning() local 1418 const struct dw_mci_drv_data *drv_data = host->drv_data; dw_mci_execute_tuning() 1429 struct dw_mci *host = slot->host; dw_mci_prepare_hs400_tuning() local 1430 const struct dw_mci_drv_data *drv_data = host->drv_data; dw_mci_prepare_hs400_tuning() 1433 return drv_data->prepare_hs400_tuning(host, ios); dw_mci_prepare_hs400_tuning() 1453 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq) 1454 __releases(&host->lock) 1455 __acquires(&host->lock) 1458 struct mmc_host *prev_mmc = host->cur_slot->mmc; 1460 WARN_ON(host->cmd || host->data); 1462 host->cur_slot->mrq = NULL; 1463 host->mrq = NULL; 1464 if (!list_empty(&host->queue)) { 1465 slot = list_entry(host->queue.next, 1468 dev_vdbg(host->dev, "list not empty: %s is next\n", 1470 host->state = STATE_SENDING_CMD; 1471 dw_mci_start_request(host, slot); 1473 dev_vdbg(host->dev, "list empty\n"); 1475 if (host->state == STATE_SENDING_CMD11) 1476 host->state = STATE_WAITING_CMD11_DONE; 1478 host->state = STATE_IDLE; 1481 spin_unlock(&host->lock); 1483 spin_lock(&host->lock); 1486 static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd) dw_mci_command_complete() argument 1488 u32 status = host->cmd_status; dw_mci_command_complete() 1490 host->cmd_status = 0; dw_mci_command_complete() 1495 cmd->resp[3] = mci_readl(host, RESP0); dw_mci_command_complete() 1496 cmd->resp[2] = mci_readl(host, RESP1); dw_mci_command_complete() 1497 cmd->resp[1] = mci_readl(host, RESP2); dw_mci_command_complete() 1498 cmd->resp[0] = mci_readl(host, RESP3); dw_mci_command_complete() 1500 cmd->resp[0] = mci_readl(host, RESP0); dw_mci_command_complete() 1518 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY) dw_mci_command_complete() 1525 static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data) dw_mci_data_complete() argument 1527 u32 status = host->data_status; dw_mci_data_complete() 1535 if (host->dir_status == dw_mci_data_complete() 1544 } else if (host->dir_status == dw_mci_data_complete() 1553 dev_dbg(host->dev, "data error, status 0x%08x\n", status); dw_mci_data_complete() 1559 dw_mci_reset(host); dw_mci_data_complete() 1570 struct dw_mci *host = (struct dw_mci *)priv; dw_mci_tasklet_func() local 1578 spin_lock(&host->lock); dw_mci_tasklet_func() 1580 state = host->state; dw_mci_tasklet_func() 1581 data = host->data; dw_mci_tasklet_func() 1582 mrq = host->mrq; dw_mci_tasklet_func() 1595 &host->pending_events)) dw_mci_tasklet_func() 1598 cmd = host->cmd; dw_mci_tasklet_func() 1599 host->cmd = NULL; dw_mci_tasklet_func() 1600 set_bit(EVENT_CMD_COMPLETE, &host->completed_events); dw_mci_tasklet_func() 1601 err = dw_mci_command_complete(host, cmd); dw_mci_tasklet_func() 1604 __dw_mci_start_request(host, host->cur_slot, dw_mci_tasklet_func() 1610 dw_mci_stop_dma(host); dw_mci_tasklet_func() 1611 send_stop_abort(host, data); dw_mci_tasklet_func() 1617 dw_mci_request_end(host, mrq); dw_mci_tasklet_func() 1634 &host->pending_events)) { dw_mci_tasklet_func() 1635 dw_mci_stop_dma(host); dw_mci_tasklet_func() 1637 !(host->data_status & (SDMMC_INT_DRTO | dw_mci_tasklet_func() 1639 send_stop_abort(host, data); dw_mci_tasklet_func() 1645 &host->pending_events)) dw_mci_tasklet_func() 1648 set_bit(EVENT_XFER_COMPLETE, &host->completed_events); dw_mci_tasklet_func() 1664 &host->pending_events)) { dw_mci_tasklet_func() 1665 dw_mci_stop_dma(host); dw_mci_tasklet_func() 1667 !(host->data_status & (SDMMC_INT_DRTO | dw_mci_tasklet_func() 1669 send_stop_abort(host, data); dw_mci_tasklet_func() 1679 &host->pending_events)) dw_mci_tasklet_func() 1682 host->data = NULL; dw_mci_tasklet_func() 1683 set_bit(EVENT_DATA_COMPLETE, &host->completed_events); dw_mci_tasklet_func() 1684 err = dw_mci_data_complete(host, data); dw_mci_tasklet_func() 1690 dw_mci_request_end(host, mrq); dw_mci_tasklet_func() 1696 send_stop_abort(host, data); dw_mci_tasklet_func() 1708 &host->pending_events)) { dw_mci_tasklet_func() 1709 host->cmd = NULL; dw_mci_tasklet_func() 1710 dw_mci_request_end(host, mrq); dw_mci_tasklet_func() 1725 &host->pending_events)) dw_mci_tasklet_func() 1730 dw_mci_reset(host); dw_mci_tasklet_func() 1732 host->cmd = NULL; dw_mci_tasklet_func() 1733 host->data = NULL; dw_mci_tasklet_func() 1736 dw_mci_command_complete(host, mrq->stop); dw_mci_tasklet_func() 1738 host->cmd_status = 0; dw_mci_tasklet_func() 1740 dw_mci_request_end(host, mrq); dw_mci_tasklet_func() 1745 &host->pending_events)) dw_mci_tasklet_func() 1753 host->state = state; dw_mci_tasklet_func() 1755 spin_unlock(&host->lock); dw_mci_tasklet_func() 1760 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt) dw_mci_set_part_bytes() argument 1762 memcpy((void *)&host->part_buf, buf, cnt); dw_mci_set_part_bytes() 1763 host->part_buf_count = cnt; dw_mci_set_part_bytes() 1767 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt) dw_mci_push_part_bytes() argument 1769 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count); dw_mci_push_part_bytes() 1770 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt); dw_mci_push_part_bytes() 1771 host->part_buf_count += cnt; dw_mci_push_part_bytes() 1776 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt) dw_mci_pull_part_bytes() argument 1778 cnt = min(cnt, (int)host->part_buf_count); dw_mci_pull_part_bytes() 1780 memcpy(buf, (void *)&host->part_buf + host->part_buf_start, dw_mci_pull_part_bytes() 1782 host->part_buf_count -= cnt; dw_mci_pull_part_bytes() 1783 host->part_buf_start += cnt; dw_mci_pull_part_bytes() 1789 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt) dw_mci_pull_final_bytes() argument 1791 memcpy(buf, &host->part_buf, cnt); dw_mci_pull_final_bytes() 1792 host->part_buf_start = cnt; dw_mci_pull_final_bytes() 1793 host->part_buf_count = (1 << host->data_shift) - cnt; dw_mci_pull_final_bytes() 1796 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt) dw_mci_push_data16() argument 1798 struct mmc_data *data = host->data; dw_mci_push_data16() 1802 if (unlikely(host->part_buf_count)) { dw_mci_push_data16() 1803 int len = dw_mci_push_part_bytes(host, buf, cnt); dw_mci_push_data16() 1806 if (host->part_buf_count == 2) { dw_mci_push_data16() 1807 mci_fifo_writew(host->fifo_reg, host->part_buf16); dw_mci_push_data16() 1808 host->part_buf_count = 0; dw_mci_push_data16() 1824 mci_fifo_writew(host->fifo_reg, aligned_buf[i]); dw_mci_push_data16() 1831 mci_fifo_writew(host->fifo_reg, *pdata++); dw_mci_push_data16() 1836 dw_mci_set_part_bytes(host, buf, cnt); dw_mci_push_data16() 1840 mci_fifo_writew(host->fifo_reg, host->part_buf16); dw_mci_push_data16() 1844 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt) dw_mci_pull_data16() argument 1855 aligned_buf[i] = mci_fifo_readw(host->fifo_reg); dw_mci_pull_data16() 1866 *pdata++ = mci_fifo_readw(host->fifo_reg); dw_mci_pull_data16() 1870 host->part_buf16 = mci_fifo_readw(host->fifo_reg); dw_mci_pull_data16() 1871 dw_mci_pull_final_bytes(host, buf, cnt); dw_mci_pull_data16() 1875 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt) dw_mci_push_data32() argument 1877 struct mmc_data *data = host->data; dw_mci_push_data32() 1881 if (unlikely(host->part_buf_count)) { dw_mci_push_data32() 1882 int len = dw_mci_push_part_bytes(host, buf, cnt); dw_mci_push_data32() 1885 if (host->part_buf_count == 4) { dw_mci_push_data32() 1886 mci_fifo_writel(host->fifo_reg, host->part_buf32); dw_mci_push_data32() 1887 host->part_buf_count = 0; dw_mci_push_data32() 1903 mci_fifo_writel(host->fifo_reg, aligned_buf[i]); dw_mci_push_data32() 1910 mci_fifo_writel(host->fifo_reg, *pdata++); dw_mci_push_data32() 1915 dw_mci_set_part_bytes(host, buf, cnt); dw_mci_push_data32() 1919 mci_fifo_writel(host->fifo_reg, host->part_buf32); dw_mci_push_data32() 1923 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt) dw_mci_pull_data32() argument 1934 aligned_buf[i] = mci_fifo_readl(host->fifo_reg); dw_mci_pull_data32() 1945 *pdata++ = mci_fifo_readl(host->fifo_reg); dw_mci_pull_data32() 1949 host->part_buf32 = mci_fifo_readl(host->fifo_reg); dw_mci_pull_data32() 1950 dw_mci_pull_final_bytes(host, buf, cnt); dw_mci_pull_data32() 1954 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt) dw_mci_push_data64() argument 1956 struct mmc_data *data = host->data; dw_mci_push_data64() 1960 if (unlikely(host->part_buf_count)) { dw_mci_push_data64() 1961 int len = dw_mci_push_part_bytes(host, buf, cnt); dw_mci_push_data64() 1965 if (host->part_buf_count == 8) { dw_mci_push_data64() 1966 mci_fifo_writeq(host->fifo_reg, host->part_buf); dw_mci_push_data64() 1967 host->part_buf_count = 0; dw_mci_push_data64() 1983 mci_fifo_writeq(host->fifo_reg, aligned_buf[i]); dw_mci_push_data64() 1990 mci_fifo_writeq(host->fifo_reg, *pdata++); dw_mci_push_data64() 1995 dw_mci_set_part_bytes(host, buf, cnt); dw_mci_push_data64() 1999 mci_fifo_writeq(host->fifo_reg, host->part_buf); dw_mci_push_data64() 2003 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt) dw_mci_pull_data64() argument 2014 aligned_buf[i] = mci_fifo_readq(host->fifo_reg); dw_mci_pull_data64() 2026 *pdata++ = mci_fifo_readq(host->fifo_reg); dw_mci_pull_data64() 2030 host->part_buf = mci_fifo_readq(host->fifo_reg); dw_mci_pull_data64() 2031 dw_mci_pull_final_bytes(host, buf, cnt); dw_mci_pull_data64() 2035 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt) dw_mci_pull_data() argument 2040 len = dw_mci_pull_part_bytes(host, buf, cnt); dw_mci_pull_data() 2047 host->pull_data(host, buf, cnt); dw_mci_pull_data() 2050 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto) dw_mci_read_data_pio() argument 2052 struct sg_mapping_iter *sg_miter = &host->sg_miter; dw_mci_read_data_pio() 2055 struct mmc_data *data = host->data; dw_mci_read_data_pio() 2056 int shift = host->data_shift; dw_mci_read_data_pio() 2065 host->sg = sg_miter->piter.sg; dw_mci_read_data_pio() 2071 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS)) dw_mci_read_data_pio() 2072 << shift) + host->part_buf_count; dw_mci_read_data_pio() 2076 dw_mci_pull_data(host, (void *)(buf + offset), len); dw_mci_read_data_pio() 2083 status = mci_readl(host, MINTSTS); dw_mci_read_data_pio() 2084 mci_writel(host, RINTSTS, SDMMC_INT_RXDR); dw_mci_read_data_pio() 2087 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS)))); dw_mci_read_data_pio() 2099 host->sg = NULL; dw_mci_read_data_pio() 2101 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); dw_mci_read_data_pio() 2104 static void dw_mci_write_data_pio(struct dw_mci *host) dw_mci_write_data_pio() argument 2106 struct sg_mapping_iter *sg_miter = &host->sg_miter; dw_mci_write_data_pio() 2109 struct mmc_data *data = host->data; dw_mci_write_data_pio() 2110 int shift = host->data_shift; dw_mci_write_data_pio() 2113 unsigned int fifo_depth = host->fifo_depth; dw_mci_write_data_pio() 2120 host->sg = sg_miter->piter.sg; dw_mci_write_data_pio() 2127 SDMMC_GET_FCNT(mci_readl(host, STATUS))) dw_mci_write_data_pio() 2128 << shift) - host->part_buf_count; dw_mci_write_data_pio() 2132 host->push_data(host, (void *)(buf + offset), len); dw_mci_write_data_pio() 2139 status = mci_readl(host, MINTSTS); dw_mci_write_data_pio() 2140 mci_writel(host, RINTSTS, SDMMC_INT_TXDR); dw_mci_write_data_pio() 2153 host->sg = NULL; dw_mci_write_data_pio() 2155 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); dw_mci_write_data_pio() 2158 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status) dw_mci_cmd_interrupt() argument 2160 if (!host->cmd_status) dw_mci_cmd_interrupt() 2161 host->cmd_status = status; dw_mci_cmd_interrupt() 2165 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); dw_mci_cmd_interrupt() 2166 tasklet_schedule(&host->tasklet); dw_mci_cmd_interrupt() 2169 static void dw_mci_handle_cd(struct dw_mci *host) dw_mci_handle_cd() argument 2173 for (i = 0; i < host->num_slots; i++) { dw_mci_handle_cd() 2174 struct dw_mci_slot *slot = host->slot[i]; dw_mci_handle_cd() 2182 msecs_to_jiffies(host->pdata->detect_delay_ms)); dw_mci_handle_cd() 2188 struct dw_mci *host = dev_id; dw_mci_interrupt() local 2192 pending = mci_readl(host, MINTSTS); /* read-only mask reg */ dw_mci_interrupt() 2198 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) { dw_mci_interrupt() 2200 ((mci_readl(host, STATUS) >> 17) & 0x1fff)) dw_mci_interrupt() 2206 if ((host->state == STATE_SENDING_CMD11) && dw_mci_interrupt() 2210 mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH); dw_mci_interrupt() 2217 spin_lock_irqsave(&host->irq_lock, irqflags); dw_mci_interrupt() 2218 dw_mci_cmd_interrupt(host, pending); dw_mci_interrupt() 2219 spin_unlock_irqrestore(&host->irq_lock, irqflags); dw_mci_interrupt() 2221 del_timer(&host->cmd11_timer); dw_mci_interrupt() 2225 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); dw_mci_interrupt() 2226 host->cmd_status = pending; dw_mci_interrupt() 2228 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); dw_mci_interrupt() 2233 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS); dw_mci_interrupt() 2234 host->data_status = pending; dw_mci_interrupt() 2236 set_bit(EVENT_DATA_ERROR, &host->pending_events); dw_mci_interrupt() 2237 tasklet_schedule(&host->tasklet); dw_mci_interrupt() 2241 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); dw_mci_interrupt() 2242 if (!host->data_status) dw_mci_interrupt() 2243 host->data_status = pending; dw_mci_interrupt() 2245 if (host->dir_status == DW_MCI_RECV_STATUS) { dw_mci_interrupt() 2246 if (host->sg != NULL) dw_mci_interrupt() 2247 dw_mci_read_data_pio(host, true); dw_mci_interrupt() 2249 set_bit(EVENT_DATA_COMPLETE, &host->pending_events); dw_mci_interrupt() 2250 tasklet_schedule(&host->tasklet); dw_mci_interrupt() 2254 mci_writel(host, RINTSTS, SDMMC_INT_RXDR); dw_mci_interrupt() 2255 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg) dw_mci_interrupt() 2256 dw_mci_read_data_pio(host, false); dw_mci_interrupt() 2260 mci_writel(host, RINTSTS, SDMMC_INT_TXDR); dw_mci_interrupt() 2261 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg) dw_mci_interrupt() 2262 dw_mci_write_data_pio(host); dw_mci_interrupt() 2266 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE); dw_mci_interrupt() 2267 dw_mci_cmd_interrupt(host, pending); dw_mci_interrupt() 2271 mci_writel(host, RINTSTS, SDMMC_INT_CD); dw_mci_interrupt() 2272 dw_mci_handle_cd(host); dw_mci_interrupt() 2276 for (i = 0; i < host->num_slots; i++) { dw_mci_interrupt() 2277 struct dw_mci_slot *slot = host->slot[i]; dw_mci_interrupt() 2283 mci_writel(host, RINTSTS, dw_mci_interrupt() 2293 if (host->dma_64bit_address == 1) { dw_mci_interrupt() 2294 pending = mci_readl(host, IDSTS64); dw_mci_interrupt() 2296 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI | dw_mci_interrupt() 2298 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI); dw_mci_interrupt() 2299 host->dma_ops->complete(host); dw_mci_interrupt() 2302 pending = mci_readl(host, IDSTS); dw_mci_interrupt() 2304 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | dw_mci_interrupt() 2306 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI); dw_mci_interrupt() 2307 host->dma_ops->complete(host); dw_mci_interrupt() 2369 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) dw_mci_init_slot() argument 2373 const struct dw_mci_drv_data *drv_data = host->drv_data; dw_mci_init_slot() 2377 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev); dw_mci_init_slot() 2383 slot->sdio_id = host->sdio_id0 + id; dw_mci_init_slot() 2385 slot->host = host; dw_mci_init_slot() 2386 host->slot[id] = slot; dw_mci_init_slot() 2388 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id); dw_mci_init_slot() 2391 if (of_property_read_u32_array(host->dev->of_node, dw_mci_init_slot() 2408 if (host->pdata->caps) dw_mci_init_slot() 2409 mmc->caps = host->pdata->caps; dw_mci_init_slot() 2411 if (host->pdata->pm_caps) dw_mci_init_slot() 2412 mmc->pm_caps = host->pdata->pm_caps; dw_mci_init_slot() 2414 if (host->dev->of_node) { dw_mci_init_slot() 2415 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc"); dw_mci_init_slot() 2419 ctrl_id = to_platform_device(host->dev)->id; dw_mci_init_slot() 2424 if (host->pdata->caps2) dw_mci_init_slot() 2425 mmc->caps2 = host->pdata->caps2; dw_mci_init_slot() 2431 if (host->pdata->blk_settings) { dw_mci_init_slot() 2432 mmc->max_segs = host->pdata->blk_settings->max_segs; dw_mci_init_slot() 2433 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size; dw_mci_init_slot() 2434 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count; dw_mci_init_slot() 2435 mmc->max_req_size = host->pdata->blk_settings->max_req_size; dw_mci_init_slot() 2436 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size; dw_mci_init_slot() 2440 mmc->max_segs = host->ring_size; dw_mci_init_slot() 2443 mmc->max_req_size = mmc->max_seg_size * host->ring_size; dw_mci_init_slot() 2478 slot->host->slot[id] = NULL; dw_mci_cleanup_slot() 2482 static void dw_mci_init_dma(struct dw_mci *host) dw_mci_init_dma() argument 2486 addr_config = (mci_readl(host, HCON) >> 27) & 0x01; dw_mci_init_dma() 2489 /* host supports IDMAC in 64-bit address mode */ dw_mci_init_dma() 2490 host->dma_64bit_address = 1; dw_mci_init_dma() 2491 dev_info(host->dev, "IDMAC supports 64-bit address mode.\n"); dw_mci_init_dma() 2492 if (!dma_set_mask(host->dev, DMA_BIT_MASK(64))) dw_mci_init_dma() 2493 dma_set_coherent_mask(host->dev, DMA_BIT_MASK(64)); dw_mci_init_dma() 2495 /* host supports IDMAC in 32-bit address mode */ dw_mci_init_dma() 2496 host->dma_64bit_address = 0; dw_mci_init_dma() 2497 dev_info(host->dev, "IDMAC supports 32-bit address mode.\n"); dw_mci_init_dma() 2501 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE, dw_mci_init_dma() 2502 &host->sg_dma, GFP_KERNEL); dw_mci_init_dma() 2503 if (!host->sg_cpu) { dw_mci_init_dma() 2504 dev_err(host->dev, "%s: could not alloc DMA memory\n", dw_mci_init_dma() 2511 host->dma_ops = &dw_mci_idmac_ops; dw_mci_init_dma() 2512 dev_info(host->dev, "Using internal DMA controller.\n"); dw_mci_init_dma() 2515 if (!host->dma_ops) dw_mci_init_dma() 2518 if (host->dma_ops->init && host->dma_ops->start && dw_mci_init_dma() 2519 host->dma_ops->stop && host->dma_ops->cleanup) { dw_mci_init_dma() 2520 if (host->dma_ops->init(host)) { dw_mci_init_dma() 2521 dev_err(host->dev, "%s: Unable to initialize " dw_mci_init_dma() 2526 dev_err(host->dev, "DMA initialization not found.\n"); dw_mci_init_dma() 2530 host->use_dma = 1; dw_mci_init_dma() 2534 dev_info(host->dev, "Using PIO mode.\n"); dw_mci_init_dma() 2535 host->use_dma = 0; dw_mci_init_dma() 2539 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset) dw_mci_ctrl_reset() argument 2544 ctrl = mci_readl(host, CTRL); dw_mci_ctrl_reset() 2546 mci_writel(host, CTRL, ctrl); dw_mci_ctrl_reset() 2550 ctrl = mci_readl(host, CTRL); dw_mci_ctrl_reset() 2555 dev_err(host->dev, dw_mci_ctrl_reset() 2562 static bool dw_mci_reset(struct dw_mci *host) dw_mci_reset() argument 2571 if (host->sg) { dw_mci_reset() 2572 sg_miter_stop(&host->sg_miter); dw_mci_reset() 2573 host->sg = NULL; dw_mci_reset() 2576 if (host->use_dma) dw_mci_reset() 2579 if (dw_mci_ctrl_reset(host, flags)) { dw_mci_reset() 2584 mci_writel(host, RINTSTS, 0xFFFFFFFF); dw_mci_reset() 2587 if (host->use_dma) { dw_mci_reset() 2591 status = mci_readl(host, STATUS); dw_mci_reset() 2598 dev_err(host->dev, dw_mci_reset() 2605 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET)) dw_mci_reset() 2610 if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) { dw_mci_reset() 2611 dev_err(host->dev, "%s: fifo/dma reset bits didn't " dw_mci_reset() 2620 dw_mci_idmac_reset(host); dw_mci_reset() 2627 mci_send_cmd(host->cur_slot, SDMMC_CMD_UPD_CLK, 0); dw_mci_reset() 2634 struct dw_mci *host = (struct dw_mci *)arg; dw_mci_cmd11_timer() local 2636 if (host->state != STATE_SENDING_CMD11) { dw_mci_cmd11_timer() 2637 dev_warn(host->dev, "Unexpected CMD11 timeout\n"); dw_mci_cmd11_timer() 2641 host->cmd_status = SDMMC_INT_RTO; dw_mci_cmd11_timer() 2642 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); dw_mci_cmd11_timer() 2643 tasklet_schedule(&host->tasklet); dw_mci_cmd11_timer() 2660 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) dw_mci_parse_dt() argument 2663 struct device *dev = host->dev; dw_mci_parse_dt() 2665 const struct dw_mci_drv_data *drv_data = host->drv_data; dw_mci_parse_dt() 2696 ret = drv_data->parse_dt(host); dw_mci_parse_dt() 2708 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) dw_mci_parse_dt() argument 2714 static void dw_mci_enable_cd(struct dw_mci *host) dw_mci_enable_cd() argument 2716 struct dw_mci_board *brd = host->pdata; dw_mci_enable_cd() 2726 for (i = 0; i < host->num_slots; i++) { dw_mci_enable_cd() 2727 struct dw_mci_slot *slot = host->slot[i]; dw_mci_enable_cd() 2732 if (i == host->num_slots) dw_mci_enable_cd() 2735 spin_lock_irqsave(&host->irq_lock, irqflags); dw_mci_enable_cd() 2736 temp = mci_readl(host, INTMASK); dw_mci_enable_cd() 2738 mci_writel(host, INTMASK, temp); dw_mci_enable_cd() 2739 spin_unlock_irqrestore(&host->irq_lock, irqflags); dw_mci_enable_cd() 2742 int dw_mci_probe(struct dw_mci *host) dw_mci_probe() argument 2744 const struct dw_mci_drv_data *drv_data = host->drv_data; dw_mci_probe() 2749 if (!host->pdata) { dw_mci_probe() 2750 host->pdata = dw_mci_parse_dt(host); dw_mci_probe() 2751 if (IS_ERR(host->pdata)) { dw_mci_probe() 2752 dev_err(host->dev, "platform data not available\n"); dw_mci_probe() 2757 if (host->pdata->num_slots > 1) { dw_mci_probe() 2758 dev_err(host->dev, dw_mci_probe() 2763 host->biu_clk = devm_clk_get(host->dev, "biu"); dw_mci_probe() 2764 if (IS_ERR(host->biu_clk)) { dw_mci_probe() 2765 dev_dbg(host->dev, "biu clock not available\n"); dw_mci_probe() 2767 ret = clk_prepare_enable(host->biu_clk); dw_mci_probe() 2769 dev_err(host->dev, "failed to enable biu clock\n"); dw_mci_probe() 2774 host->ciu_clk = devm_clk_get(host->dev, "ciu"); dw_mci_probe() 2775 if (IS_ERR(host->ciu_clk)) { dw_mci_probe() 2776 dev_dbg(host->dev, "ciu clock not available\n"); dw_mci_probe() 2777 host->bus_hz = host->pdata->bus_hz; dw_mci_probe() 2779 ret = clk_prepare_enable(host->ciu_clk); dw_mci_probe() 2781 dev_err(host->dev, "failed to enable ciu clock\n"); dw_mci_probe() 2785 if (host->pdata->bus_hz) { dw_mci_probe() 2786 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz); dw_mci_probe() 2788 dev_warn(host->dev, dw_mci_probe() 2790 host->pdata->bus_hz); dw_mci_probe() 2792 host->bus_hz = clk_get_rate(host->ciu_clk); dw_mci_probe() 2795 if (!host->bus_hz) { dw_mci_probe() 2796 dev_err(host->dev, dw_mci_probe() 2803 ret = drv_data->init(host); dw_mci_probe() 2805 dev_err(host->dev, dw_mci_probe() 2812 ret = drv_data->setup_clock(host); dw_mci_probe() 2814 dev_err(host->dev, dw_mci_probe() 2820 setup_timer(&host->cmd11_timer, dw_mci_probe() 2821 dw_mci_cmd11_timer, (unsigned long)host); dw_mci_probe() 2823 host->quirks = host->pdata->quirks; dw_mci_probe() 2825 spin_lock_init(&host->lock); dw_mci_probe() 2826 spin_lock_init(&host->irq_lock); dw_mci_probe() 2827 INIT_LIST_HEAD(&host->queue); dw_mci_probe() 2830 * Get the host data width - this assumes that HCON has been set with dw_mci_probe() 2833 i = (mci_readl(host, HCON) >> 7) & 0x7; dw_mci_probe() 2835 host->push_data = dw_mci_push_data16; dw_mci_probe() 2836 host->pull_data = dw_mci_pull_data16; dw_mci_probe() 2838 host->data_shift = 1; dw_mci_probe() 2840 host->push_data = dw_mci_push_data64; dw_mci_probe() 2841 host->pull_data = dw_mci_pull_data64; dw_mci_probe() 2843 host->data_shift = 3; dw_mci_probe() 2847 "HCON reports a reserved host data width!\n" dw_mci_probe() 2849 host->push_data = dw_mci_push_data32; dw_mci_probe() 2850 host->pull_data = dw_mci_pull_data32; dw_mci_probe() 2852 host->data_shift = 2; dw_mci_probe() 2856 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) dw_mci_probe() 2859 host->dma_ops = host->pdata->dma_ops; dw_mci_probe() 2860 dw_mci_init_dma(host); dw_mci_probe() 2862 /* Clear the interrupts for the host controller */ dw_mci_probe() 2863 mci_writel(host, RINTSTS, 0xFFFFFFFF); dw_mci_probe() 2864 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ dw_mci_probe() 2867 mci_writel(host, TMOUT, 0xFFFFFFFF); dw_mci_probe() 2873 if (!host->pdata->fifo_depth) { dw_mci_probe() 2880 fifo_size = mci_readl(host, FIFOTH); dw_mci_probe() 2883 fifo_size = host->pdata->fifo_depth; dw_mci_probe() 2885 host->fifo_depth = fifo_size; dw_mci_probe() 2886 host->fifoth_val = dw_mci_probe() 2888 mci_writel(host, FIFOTH, host->fifoth_val); dw_mci_probe() 2891 mci_writel(host, CLKENA, 0); dw_mci_probe() 2892 mci_writel(host, CLKSRC, 0); dw_mci_probe() 2898 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID)); dw_mci_probe() 2899 dev_info(host->dev, "Version ID is %04x\n", host->verid); dw_mci_probe() 2901 if (host->verid < DW_MMC_240A) dw_mci_probe() 2902 host->fifo_reg = host->regs + DATA_OFFSET; dw_mci_probe() 2904 host->fifo_reg = host->regs + DATA_240A_OFFSET; dw_mci_probe() 2906 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host); dw_mci_probe() 2907 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt, dw_mci_probe() 2908 host->irq_flags, "dw-mci", host); dw_mci_probe() 2912 if (host->pdata->num_slots) dw_mci_probe() 2913 host->num_slots = host->pdata->num_slots; dw_mci_probe() 2915 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1; dw_mci_probe() 2921 mci_writel(host, RINTSTS, 0xFFFFFFFF); dw_mci_probe() 2922 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | dw_mci_probe() 2925 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */ dw_mci_probe() 2927 dev_info(host->dev, "DW MMC controller at irq %d, " dw_mci_probe() 2928 "%d bit host data width, " dw_mci_probe() 2930 host->irq, width, fifo_size); dw_mci_probe() 2933 for (i = 0; i < host->num_slots; i++) { dw_mci_probe() 2934 ret = dw_mci_init_slot(host, i); dw_mci_probe() 2936 dev_dbg(host->dev, "slot %d init failed\n", i); dw_mci_probe() 2942 dev_info(host->dev, "%d slots initialized\n", init_slots); dw_mci_probe() 2944 dev_dbg(host->dev, "attempted to initialize %d slots, " dw_mci_probe() 2945 "but failed on all\n", host->num_slots); dw_mci_probe() 2950 dw_mci_enable_cd(host); dw_mci_probe() 2952 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) dw_mci_probe() 2953 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n"); dw_mci_probe() 2958 if (host->use_dma && host->dma_ops->exit) dw_mci_probe() 2959 host->dma_ops->exit(host); dw_mci_probe() 2962 if (!IS_ERR(host->ciu_clk)) dw_mci_probe() 2963 clk_disable_unprepare(host->ciu_clk); dw_mci_probe() 2966 if (!IS_ERR(host->biu_clk)) dw_mci_probe() 2967 clk_disable_unprepare(host->biu_clk); dw_mci_probe() 2973 void dw_mci_remove(struct dw_mci *host) dw_mci_remove() argument 2977 mci_writel(host, RINTSTS, 0xFFFFFFFF); dw_mci_remove() 2978 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ dw_mci_remove() 2980 for (i = 0; i < host->num_slots; i++) { dw_mci_remove() 2981 dev_dbg(host->dev, "remove slot %d\n", i); dw_mci_remove() 2982 if (host->slot[i]) dw_mci_remove() 2983 dw_mci_cleanup_slot(host->slot[i], i); dw_mci_remove() 2987 mci_writel(host, CLKENA, 0); dw_mci_remove() 2988 mci_writel(host, CLKSRC, 0); dw_mci_remove() 2990 if (host->use_dma && host->dma_ops->exit) dw_mci_remove() 2991 host->dma_ops->exit(host); dw_mci_remove() 2993 if (!IS_ERR(host->ciu_clk)) dw_mci_remove() 2994 clk_disable_unprepare(host->ciu_clk); dw_mci_remove() 2996 if (!IS_ERR(host->biu_clk)) dw_mci_remove() 2997 clk_disable_unprepare(host->biu_clk); dw_mci_remove() 3007 int dw_mci_suspend(struct dw_mci *host) dw_mci_suspend() argument 3013 int dw_mci_resume(struct dw_mci *host) dw_mci_resume() argument 3017 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) { dw_mci_resume() 3022 if (host->use_dma && host->dma_ops->init) dw_mci_resume() 3023 host->dma_ops->init(host); dw_mci_resume() 3029 mci_writel(host, FIFOTH, host->fifoth_val); dw_mci_resume() 3030 host->prev_blksz = 0; dw_mci_resume() 3033 mci_writel(host, TMOUT, 0xFFFFFFFF); dw_mci_resume() 3035 mci_writel(host, RINTSTS, 0xFFFFFFFF); dw_mci_resume() 3036 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | dw_mci_resume() 3039 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); dw_mci_resume() 3041 for (i = 0; i < host->num_slots; i++) { dw_mci_resume() 3042 struct dw_mci_slot *slot = host->slot[i]; dw_mci_resume() 3052 dw_mci_enable_cd(host); dw_mci_resume()
|
H A D | sdhci_f_sdh30.c | 2 * linux/drivers/mmc/host/sdhci_f_sdh30.c 52 void sdhci_f_sdh30_soft_voltage_switch(struct sdhci_host *host) sdhci_f_sdh30_soft_voltage_switch() argument 54 struct f_sdhost_priv *priv = sdhci_priv(host); sdhci_f_sdh30_soft_voltage_switch() 58 ctrl = sdhci_readl(host, F_SDH30_IO_CONTROL2); sdhci_f_sdh30_soft_voltage_switch() 60 sdhci_writel(host, ctrl, F_SDH30_IO_CONTROL2); sdhci_f_sdh30_soft_voltage_switch() 62 sdhci_writel(host, ctrl, F_SDH30_IO_CONTROL2); sdhci_f_sdh30_soft_voltage_switch() 65 sdhci_writel(host, ctrl, F_SDH30_IO_CONTROL2); sdhci_f_sdh30_soft_voltage_switch() 70 ctrl = sdhci_readl(host, F_SDH30_ESD_CONTROL); sdhci_f_sdh30_soft_voltage_switch() 72 sdhci_writel(host, ctrl, F_SDH30_ESD_CONTROL); sdhci_f_sdh30_soft_voltage_switch() 75 ctrl = sdhci_readl(host, F_SDH30_TUNING_SETTING); sdhci_f_sdh30_soft_voltage_switch() 77 sdhci_writel(host, ctrl, F_SDH30_TUNING_SETTING); sdhci_f_sdh30_soft_voltage_switch() 80 unsigned int sdhci_f_sdh30_get_min_clock(struct sdhci_host *host) sdhci_f_sdh30_get_min_clock() argument 85 void sdhci_f_sdh30_reset(struct sdhci_host *host, u8 mask) sdhci_f_sdh30_reset() argument 87 if (sdhci_readw(host, SDHCI_CLOCK_CONTROL) == 0) sdhci_f_sdh30_reset() 88 sdhci_writew(host, 0xBC01, SDHCI_CLOCK_CONTROL); sdhci_f_sdh30_reset() 90 sdhci_reset(host, mask); sdhci_f_sdh30_reset() 104 struct sdhci_host *host; sdhci_f_sdh30_probe() local 117 host = sdhci_alloc_host(dev, sizeof(struct sdhci_host) + sdhci_f_sdh30_probe() 119 if (IS_ERR(host)) sdhci_f_sdh30_probe() 120 return PTR_ERR(host); sdhci_f_sdh30_probe() 122 priv = sdhci_priv(host); sdhci_f_sdh30_probe() 125 host->quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | sdhci_f_sdh30_probe() 127 host->quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE | sdhci_f_sdh30_probe() 130 ret = mmc_of_parse(host->mmc); sdhci_f_sdh30_probe() 134 platform_set_drvdata(pdev, host); sdhci_f_sdh30_probe() 137 host->hw_name = "f_sdh30"; sdhci_f_sdh30_probe() 138 host->ops = &sdhci_f_sdh30_ops; sdhci_f_sdh30_probe() 139 host->irq = irq; sdhci_f_sdh30_probe() 142 host->ioaddr = devm_ioremap_resource(&pdev->dev, res); sdhci_f_sdh30_probe() 143 if (IS_ERR(host->ioaddr)) { sdhci_f_sdh30_probe() 144 ret = PTR_ERR(host->ioaddr); sdhci_f_sdh30_probe() 169 ctrl = sdhci_readw(host, F_SDH30_AHB_CONFIG); sdhci_f_sdh30_probe() 173 sdhci_writew(host, ctrl, F_SDH30_AHB_CONFIG); sdhci_f_sdh30_probe() 175 reg = sdhci_readl(host, F_SDH30_ESD_CONTROL); sdhci_f_sdh30_probe() 176 sdhci_writel(host, reg & ~F_SDH30_EMMC_RST, F_SDH30_ESD_CONTROL); sdhci_f_sdh30_probe() 178 sdhci_writel(host, reg | F_SDH30_EMMC_RST, F_SDH30_ESD_CONTROL); sdhci_f_sdh30_probe() 180 reg = sdhci_readl(host, SDHCI_CAPABILITIES); sdhci_f_sdh30_probe() 184 ret = sdhci_add_host(host); sdhci_f_sdh30_probe() 195 sdhci_free_host(host); sdhci_f_sdh30_probe() 201 struct sdhci_host *host = platform_get_drvdata(pdev); sdhci_f_sdh30_remove() local 202 struct f_sdhost_priv *priv = sdhci_priv(host); sdhci_f_sdh30_remove() 204 sdhci_remove_host(host, readl(host->ioaddr + SDHCI_INT_STATUS) == sdhci_f_sdh30_remove() 210 sdhci_free_host(host); sdhci_f_sdh30_remove()
|
H A D | android-goldfish.c | 34 #include <linux/mmc/host.h> 58 #define GOLDFISH_MMC_READ(host, addr) (readl(host->reg_base + addr)) 59 #define GOLDFISH_MMC_WRITE(host, addr, x) (writel(x, host->reg_base + addr)) 135 goldfish_mmc_cover_is_open(struct goldfish_mmc_host *host) goldfish_mmc_cover_is_open() argument 144 struct goldfish_mmc_host *host = dev_get_drvdata(dev); goldfish_mmc_show_cover_switch() local 146 return sprintf(buf, "%s\n", goldfish_mmc_cover_is_open(host) ? "open" : goldfish_mmc_show_cover_switch() 153 goldfish_mmc_start_command(struct goldfish_mmc_host *host, struct mmc_command *cmd) goldfish_mmc_start_command() argument 159 host->cmd = cmd; goldfish_mmc_start_command() 180 dev_err(mmc_dev(host->mmc), goldfish_mmc_start_command() 196 if (host->bus_mode == MMC_BUSMODE_OPENDRAIN) goldfish_mmc_start_command() 202 if (host->data && !(host->data->flags & MMC_DATA_WRITE)) goldfish_mmc_start_command() 205 GOLDFISH_MMC_WRITE(host, MMC_ARG, cmd->arg); goldfish_mmc_start_command() 206 GOLDFISH_MMC_WRITE(host, MMC_CMD, cmdreg); goldfish_mmc_start_command() 209 static void goldfish_mmc_xfer_done(struct goldfish_mmc_host *host, goldfish_mmc_xfer_done() argument 212 if (host->dma_in_use) { goldfish_mmc_xfer_done() 226 memcpy(dest, host->virt_base, data->sg->length); goldfish_mmc_xfer_done() 228 host->data->bytes_xfered += data->sg->length; goldfish_mmc_xfer_done() 229 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len, goldfish_mmc_xfer_done() 233 host->data = NULL; goldfish_mmc_xfer_done() 234 host->sg_len = 0; goldfish_mmc_xfer_done() 243 host->mrq = NULL; goldfish_mmc_xfer_done() 244 mmc_request_done(host->mmc, data->mrq); goldfish_mmc_xfer_done() 248 goldfish_mmc_start_command(host, data->stop); goldfish_mmc_xfer_done() 251 static void goldfish_mmc_end_of_data(struct goldfish_mmc_host *host, goldfish_mmc_end_of_data() argument 254 if (!host->dma_in_use) { goldfish_mmc_end_of_data() 255 goldfish_mmc_xfer_done(host, data); goldfish_mmc_end_of_data() 258 if (host->dma_done) goldfish_mmc_end_of_data() 259 goldfish_mmc_xfer_done(host, data); goldfish_mmc_end_of_data() 262 static void goldfish_mmc_cmd_done(struct goldfish_mmc_host *host, goldfish_mmc_cmd_done() argument 265 host->cmd = NULL; goldfish_mmc_cmd_done() 270 GOLDFISH_MMC_READ(host, MMC_RESP_0); goldfish_mmc_cmd_done() 272 GOLDFISH_MMC_READ(host, MMC_RESP_1); goldfish_mmc_cmd_done() 274 GOLDFISH_MMC_READ(host, MMC_RESP_2); goldfish_mmc_cmd_done() 276 GOLDFISH_MMC_READ(host, MMC_RESP_3); goldfish_mmc_cmd_done() 280 GOLDFISH_MMC_READ(host, MMC_RESP_0); goldfish_mmc_cmd_done() 284 if (host->data == NULL || cmd->error) { goldfish_mmc_cmd_done() 285 host->mrq = NULL; goldfish_mmc_cmd_done() 286 mmc_request_done(host->mmc, cmd->mrq); goldfish_mmc_cmd_done() 292 struct goldfish_mmc_host *host = (struct goldfish_mmc_host *)dev_id; goldfish_mmc_irq() local 300 while ((status = GOLDFISH_MMC_READ(host, MMC_INT_STATUS)) != 0) { goldfish_mmc_irq() 301 GOLDFISH_MMC_WRITE(host, MMC_INT_STATUS, status); goldfish_mmc_irq() 319 struct mmc_request *mrq = host->mrq; goldfish_mmc_irq() 321 host->mrq = NULL; goldfish_mmc_irq() 322 mmc_request_done(host->mmc, mrq); goldfish_mmc_irq() 326 goldfish_mmc_cmd_done(host, host->cmd); goldfish_mmc_irq() 329 goldfish_mmc_xfer_done(host, host->data); goldfish_mmc_irq() 331 host->dma_done = 1; goldfish_mmc_irq() 332 goldfish_mmc_end_of_data(host, host->data); goldfish_mmc_irq() 333 } else if (host->data != NULL) { goldfish_mmc_irq() 336 * during device initialization, cases where host->data is goldfish_mmc_irq() 342 host->dma_done = 1; goldfish_mmc_irq() 343 goldfish_mmc_end_of_data(host, host->data); goldfish_mmc_irq() 347 u32 state = GOLDFISH_MMC_READ(host, MMC_STATE); goldfish_mmc_irq() 350 mmc_detect_change(host->mmc, 0); goldfish_mmc_irq() 355 status = GOLDFISH_MMC_READ(host, MMC_INT_STATUS); goldfish_mmc_irq() 356 dev_info(mmc_dev(host->mmc),"spurious irq 0x%04x\n", status); goldfish_mmc_irq() 358 GOLDFISH_MMC_WRITE(host, MMC_INT_STATUS, status); goldfish_mmc_irq() 359 GOLDFISH_MMC_WRITE(host, MMC_INT_ENABLE, 0); goldfish_mmc_irq() 366 static void goldfish_mmc_prepare_data(struct goldfish_mmc_host *host, goldfish_mmc_prepare_data() argument 374 host->data = data; goldfish_mmc_prepare_data() 376 GOLDFISH_MMC_WRITE(host, MMC_BLOCK_LENGTH, 0); goldfish_mmc_prepare_data() 377 GOLDFISH_MMC_WRITE(host, MMC_BLOCK_COUNT, 0); goldfish_mmc_prepare_data() 378 host->dma_in_use = 0; goldfish_mmc_prepare_data() 384 GOLDFISH_MMC_WRITE(host, MMC_BLOCK_COUNT, data->blocks - 1); goldfish_mmc_prepare_data() 385 GOLDFISH_MMC_WRITE(host, MMC_BLOCK_LENGTH, block_size - 1); goldfish_mmc_prepare_data() 398 host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, goldfish_mmc_prepare_data() 400 host->dma_done = 0; goldfish_mmc_prepare_data() 401 host->dma_in_use = 1; goldfish_mmc_prepare_data() 409 memcpy(host->virt_base, src, data->sg->length); goldfish_mmc_prepare_data() 415 struct goldfish_mmc_host *host = mmc_priv(mmc); goldfish_mmc_request() local 417 WARN_ON(host->mrq != NULL); goldfish_mmc_request() 419 host->mrq = req; goldfish_mmc_request() 420 goldfish_mmc_prepare_data(host, req); goldfish_mmc_request() 421 goldfish_mmc_start_command(host, req->cmd); goldfish_mmc_request() 434 struct goldfish_mmc_host *host = mmc_priv(mmc); goldfish_mmc_set_ios() local 436 host->bus_mode = ios->bus_mode; goldfish_mmc_set_ios() 437 host->hw_bus_mode = host->bus_mode; goldfish_mmc_set_ios() 443 struct goldfish_mmc_host *host = mmc_priv(mmc); goldfish_mmc_get_ro() local 445 state = GOLDFISH_MMC_READ(host, MMC_STATE); goldfish_mmc_get_ro() 458 struct goldfish_mmc_host *host = NULL; goldfish_mmc_probe() local 475 host = mmc_priv(mmc); goldfish_mmc_probe() 476 host->mmc = mmc; goldfish_mmc_probe() 479 host->reg_base = ioremap(res->start, resource_size(res)); goldfish_mmc_probe() 480 if (host->reg_base == NULL) { goldfish_mmc_probe() 484 host->virt_base = dma_alloc_coherent(&pdev->dev, BUFFER_SIZE, goldfish_mmc_probe() 487 if (host->virt_base == 0) { goldfish_mmc_probe() 491 host->phys_base = buf_addr; goldfish_mmc_probe() 493 host->id = pdev->id; goldfish_mmc_probe() 494 host->irq = irq; goldfish_mmc_probe() 512 ret = request_irq(host->irq, goldfish_mmc_irq, 0, DRIVER_NAME, host); goldfish_mmc_probe() 518 host->dev = &pdev->dev; goldfish_mmc_probe() 519 platform_set_drvdata(pdev, host); goldfish_mmc_probe() 523 dev_warn(mmc_dev(host->mmc), goldfish_mmc_probe() 526 GOLDFISH_MMC_WRITE(host, MMC_SET_BUFFER, host->phys_base); goldfish_mmc_probe() 527 GOLDFISH_MMC_WRITE(host, MMC_INT_ENABLE, goldfish_mmc_probe() 535 dma_free_coherent(&pdev->dev, BUFFER_SIZE, host->virt_base, goldfish_mmc_probe() 536 host->phys_base); goldfish_mmc_probe() 538 iounmap(host->reg_base); goldfish_mmc_probe() 540 mmc_free_host(host->mmc); goldfish_mmc_probe() 547 struct goldfish_mmc_host *host = platform_get_drvdata(pdev); goldfish_mmc_remove() local 549 BUG_ON(host == NULL); goldfish_mmc_remove() 551 mmc_remove_host(host->mmc); goldfish_mmc_remove() 552 free_irq(host->irq, host); goldfish_mmc_remove() 553 dma_free_coherent(&pdev->dev, BUFFER_SIZE, host->virt_base, host->phys_base); goldfish_mmc_remove() 554 iounmap(host->reg_base); goldfish_mmc_remove() 555 mmc_free_host(host->mmc); goldfish_mmc_remove()
|
H A D | mvsdio.c | 26 #include <linux/mmc/host.h> 62 static int mvsd_setup_data(struct mvsd_host *host, struct mmc_data *data) mvsd_setup_data() argument 64 void __iomem *iobase = host->base; mvsd_setup_data() 83 dev_warn(host->dev, "FIFO_EMPTY bit missing\n"); mvsd_setup_data() 88 dev_dbg(host->dev, "*** wait for FIFO_EMPTY bit " mvsd_setup_data() 94 tmout = DIV_ROUND_UP(data->timeout_ns, host->ns_per_clk); mvsd_setup_data() 102 dev_dbg(host->dev, "data %s at 0x%08x: blocks=%d blksz=%d tmout=%u (%d)\n", mvsd_setup_data() 107 host->ctrl &= ~MVSD_HOST_CTRL_TMOUT_MASK; mvsd_setup_data() 108 host->ctrl |= MVSD_HOST_CTRL_TMOUT(tmout_index); mvsd_setup_data() 109 mvsd_write(MVSD_HOST_CTRL, host->ctrl); mvsd_setup_data() 119 * It also appears the host to card DMA can corrupt mvsd_setup_data() 123 host->pio_size = data->blocks * data->blksz; mvsd_setup_data() 124 host->pio_ptr = sg_virt(data->sg); mvsd_setup_data() 126 dev_dbg(host->dev, "fallback to PIO for data at 0x%p size %d\n", mvsd_setup_data() 127 host->pio_ptr, host->pio_size); mvsd_setup_data() 133 host->sg_frags = dma_map_sg(mmc_dev(host->mmc), data->sg, mvsd_setup_data() 144 struct mvsd_host *host = mmc_priv(mmc); mvsd_request() local 145 void __iomem *iobase = host->base; mvsd_request() 150 BUG_ON(host->mrq != NULL); mvsd_request() 151 host->mrq = mrq; mvsd_request() 153 dev_dbg(host->dev, "cmd %d (hw state 0x%04x)\n", mvsd_request() 187 pio = mvsd_setup_data(host, data); mvsd_request() 193 else if (host->pio_size > 32) mvsd_request() 225 spin_lock_irqsave(&host->lock, flags); mvsd_request() 227 host->xfer_mode &= MVSD_XFER_MODE_INT_CHK_EN; mvsd_request() 228 host->xfer_mode |= xfer; mvsd_request() 229 mvsd_write(MVSD_XFER_MODE, host->xfer_mode); mvsd_request() 235 host->intr_en &= MVSD_NOR_CARD_INT; mvsd_request() 236 host->intr_en |= intr | MVSD_NOR_ERROR; mvsd_request() 237 mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); mvsd_request() 240 mod_timer(&host->timer, jiffies + 5 * HZ); mvsd_request() 242 spin_unlock_irqrestore(&host->lock, flags); mvsd_request() 245 static u32 mvsd_finish_cmd(struct mvsd_host *host, struct mmc_command *cmd, mvsd_finish_cmd() argument 248 void __iomem *iobase = host->base; mvsd_finish_cmd() 290 static u32 mvsd_finish_data(struct mvsd_host *host, struct mmc_data *data, mvsd_finish_data() argument 293 void __iomem *iobase = host->base; mvsd_finish_data() 295 if (host->pio_ptr) { mvsd_finish_data() 296 host->pio_ptr = NULL; mvsd_finish_data() 297 host->pio_size = 0; mvsd_finish_data() 299 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_frags, mvsd_finish_data() 313 dev_dbg(host->dev, "data done: blocks_left=%d, bytes_left=%d\n", mvsd_finish_data() 335 dev_dbg(host->dev, "c12err 0x%04x\n", err_cmd12); mvsd_finish_data() 351 struct mvsd_host *host = dev; mvsd_irq() local 352 void __iomem *iobase = host->base; mvsd_irq() 357 dev_dbg(host->dev, "intr 0x%04x intr_en 0x%04x hw_state 0x%04x\n", mvsd_irq() 367 dev_dbg(host->dev, "spurious irq detected intr 0x%04x intr_en 0x%04x erri 0x%04x erri_en 0x%04x\n", mvsd_irq() 375 spin_lock(&host->lock); mvsd_irq() 378 if (host->pio_size && mvsd_irq() 379 (intr_status & host->intr_en & mvsd_irq() 381 u16 *p = host->pio_ptr; mvsd_irq() 382 int s = host->pio_size; mvsd_irq() 410 host->intr_en &= mvsd_irq() 412 mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); mvsd_irq() 413 } else if (host->intr_en & MVSD_NOR_RX_FIFO_8W) { mvsd_irq() 414 host->intr_en &= ~MVSD_NOR_RX_FIFO_8W; mvsd_irq() 415 host->intr_en |= MVSD_NOR_RX_READY; mvsd_irq() 416 mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); mvsd_irq() 419 dev_dbg(host->dev, "pio %d intr 0x%04x hw_state 0x%04x\n", mvsd_irq() 421 host->pio_ptr = p; mvsd_irq() 422 host->pio_size = s; mvsd_irq() 424 } else if (host->pio_size && mvsd_irq() 425 (intr_status & host->intr_en & mvsd_irq() 427 u16 *p = host->pio_ptr; mvsd_irq() 428 int s = host->pio_size; mvsd_irq() 451 host->intr_en &= mvsd_irq() 453 mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); mvsd_irq() 456 dev_dbg(host->dev, "pio %d intr 0x%04x hw_state 0x%04x\n", mvsd_irq() 458 host->pio_ptr = p; mvsd_irq() 459 host->pio_size = s; mvsd_irq() 467 if (intr_status & host->intr_en & ~intr_done_mask) { mvsd_irq() 468 struct mmc_request *mrq = host->mrq; mvsd_irq() 472 del_timer(&host->timer); mvsd_irq() 473 host->mrq = NULL; mvsd_irq() 475 host->intr_en &= MVSD_NOR_CARD_INT; mvsd_irq() 476 mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); mvsd_irq() 479 spin_unlock(&host->lock); mvsd_irq() 485 dev_dbg(host->dev, "err 0x%04x\n", err_status); mvsd_irq() 488 err_status = mvsd_finish_cmd(host, cmd, err_status); mvsd_irq() 490 err_status = mvsd_finish_data(host, mrq->data, err_status); mvsd_irq() 492 dev_err(host->dev, "unhandled error status %#04x\n", mvsd_irq() 497 mmc_request_done(host->mmc, mrq); mvsd_irq() 500 spin_unlock(&host->lock); mvsd_irq() 503 mmc_signal_sdio_irq(host->mmc); mvsd_irq() 510 dev_err(host->dev, "unhandled interrupt status=0x%04x en=0x%04x pio=%d\n", mvsd_irq() 511 intr_status, host->intr_en, host->pio_size); mvsd_irq() 517 struct mvsd_host *host = (struct mvsd_host *)data; mvsd_timeout_timer() local 518 void __iomem *iobase = host->base; mvsd_timeout_timer() 522 spin_lock_irqsave(&host->lock, flags); mvsd_timeout_timer() 523 mrq = host->mrq; mvsd_timeout_timer() 525 dev_err(host->dev, "Timeout waiting for hardware interrupt.\n"); mvsd_timeout_timer() 526 dev_err(host->dev, "hw_state=0x%04x, intr_status=0x%04x intr_en=0x%04x\n", mvsd_timeout_timer() 531 host->mrq = NULL; mvsd_timeout_timer() 535 host->xfer_mode &= MVSD_XFER_MODE_INT_CHK_EN; mvsd_timeout_timer() 536 mvsd_write(MVSD_XFER_MODE, host->xfer_mode); mvsd_timeout_timer() 538 host->intr_en &= MVSD_NOR_CARD_INT; mvsd_timeout_timer() 539 mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); mvsd_timeout_timer() 544 mvsd_finish_cmd(host, mrq->cmd, 0); mvsd_timeout_timer() 547 mvsd_finish_data(host, mrq->data, 0); mvsd_timeout_timer() 550 spin_unlock_irqrestore(&host->lock, flags); mvsd_timeout_timer() 553 mmc_request_done(host->mmc, mrq); mvsd_timeout_timer() 558 struct mvsd_host *host = mmc_priv(mmc); mvsd_enable_sdio_irq() local 559 void __iomem *iobase = host->base; mvsd_enable_sdio_irq() 562 spin_lock_irqsave(&host->lock, flags); mvsd_enable_sdio_irq() 564 host->xfer_mode |= MVSD_XFER_MODE_INT_CHK_EN; mvsd_enable_sdio_irq() 565 host->intr_en |= MVSD_NOR_CARD_INT; mvsd_enable_sdio_irq() 567 host->xfer_mode &= ~MVSD_XFER_MODE_INT_CHK_EN; mvsd_enable_sdio_irq() 568 host->intr_en &= ~MVSD_NOR_CARD_INT; mvsd_enable_sdio_irq() 570 mvsd_write(MVSD_XFER_MODE, host->xfer_mode); mvsd_enable_sdio_irq() 571 mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); mvsd_enable_sdio_irq() 572 spin_unlock_irqrestore(&host->lock, flags); mvsd_enable_sdio_irq() 575 static void mvsd_power_up(struct mvsd_host *host) mvsd_power_up() argument 577 void __iomem *iobase = host->base; mvsd_power_up() 578 dev_dbg(host->dev, "power up\n"); mvsd_power_up() 589 static void mvsd_power_down(struct mvsd_host *host) mvsd_power_down() argument 591 void __iomem *iobase = host->base; mvsd_power_down() 592 dev_dbg(host->dev, "power down\n"); mvsd_power_down() 605 struct mvsd_host *host = mmc_priv(mmc); mvsd_set_ios() local 606 void __iomem *iobase = host->base; mvsd_set_ios() 610 mvsd_power_up(host); mvsd_set_ios() 615 host->clock = 0; mvsd_set_ios() 616 dev_dbg(host->dev, "clock off\n"); mvsd_set_ios() 617 } else if (ios->clock != host->clock) { mvsd_set_ios() 618 u32 m = DIV_ROUND_UP(host->base_clock, ios->clock) - 1; mvsd_set_ios() 622 host->clock = ios->clock; mvsd_set_ios() 623 host->ns_per_clk = 1000000000 / (host->base_clock / (m+1)); mvsd_set_ios() 624 dev_dbg(host->dev, "clock=%d (%d), div=0x%04x\n", mvsd_set_ios() 625 ios->clock, host->base_clock / (m+1), m); mvsd_set_ios() 655 host->ctrl = ctrl_reg; mvsd_set_ios() 657 dev_dbg(host->dev, "ctrl 0x%04x: %s %s %s\n", ctrl_reg, mvsd_set_ios() 666 mvsd_power_down(host); mvsd_set_ios() 677 mv_conf_mbus_windows(struct mvsd_host *host, mv_conf_mbus_windows() argument 680 void __iomem *iobase = host->base; mv_conf_mbus_windows() 702 struct mvsd_host *host = NULL; mvsd_probe() local 718 host = mmc_priv(mmc); mvsd_probe() 719 host->mmc = mmc; mvsd_probe() 720 host->dev = &pdev->dev; mvsd_probe() 729 host->clk = devm_clk_get(&pdev->dev, NULL); mvsd_probe() 730 if (!IS_ERR(host->clk)) mvsd_probe() 731 clk_prepare_enable(host->clk); mvsd_probe() 737 mmc->f_min = DIV_ROUND_UP(host->base_clock, MVSD_BASE_DIV_MAX); mvsd_probe() 748 if (IS_ERR(host->clk)) { mvsd_probe() 754 host->base_clock = clk_get_rate(host->clk) / 2; mvsd_probe() 768 host->base_clock = mvsd_data->clock / 2; mvsd_probe() 789 spin_lock_init(&host->lock); mvsd_probe() 791 host->base = devm_ioremap_resource(&pdev->dev, r); mvsd_probe() 792 if (IS_ERR(host->base)) { mvsd_probe() 793 ret = PTR_ERR(host->base); mvsd_probe() 800 mv_conf_mbus_windows(host, dram); mvsd_probe() 802 mvsd_power_down(host); mvsd_probe() 804 ret = devm_request_irq(&pdev->dev, irq, mvsd_irq, 0, DRIVER_NAME, host); mvsd_probe() 810 setup_timer(&host->timer, mvsd_timeout_timer, (unsigned long)host); mvsd_probe() 825 if (!IS_ERR(host->clk)) mvsd_probe() 826 clk_disable_unprepare(host->clk); mvsd_probe() 837 struct mvsd_host *host = mmc_priv(mmc); mvsd_remove() local 840 del_timer_sync(&host->timer); mvsd_remove() 841 mvsd_power_down(host); mvsd_remove() 843 if (!IS_ERR(host->clk)) mvsd_remove() 844 clk_disable_unprepare(host->clk); mvsd_remove()
|
H A D | atmel-mci.c | 35 #include <linux/mmc/host.h> 226 u32 (*prepare_data)(struct atmel_mci *host, struct mmc_data *data); 227 void (*submit_data)(struct atmel_mci *host, struct mmc_data *data); 228 void (*stop_transfer)(struct atmel_mci *host); 234 * @host: The MMC controller this slot is using. 241 * @clock: Clock rate configured by set_ios(). Protected by host->lock. 252 struct atmel_mci *host; member in struct:atmel_mci_slot 273 #define atmci_test_and_clear_pending(host, event) \ 274 test_and_clear_bit(event, &host->pending_events) 275 #define atmci_set_completed(host, event) \ 276 set_bit(event, &host->completed_events) 277 #define atmci_set_pending(host, event) \ 278 set_bit(event, &host->pending_events) 293 spin_lock_bh(&slot->host->lock); atmci_req_show() 319 spin_unlock_bh(&slot->host->lock); atmci_req_show() 385 struct atmel_mci *host = s->private; atmci_regs_show() local 394 pm_runtime_get_sync(&host->pdev->dev); atmci_regs_show() 401 spin_lock_bh(&host->lock); atmci_regs_show() 402 memcpy_fromio(buf, host->regs, ATMCI_REGS_SIZE); atmci_regs_show() 403 spin_unlock_bh(&host->lock); atmci_regs_show() 405 pm_runtime_mark_last_busy(&host->pdev->dev); atmci_regs_show() 406 pm_runtime_put_autosuspend(&host->pdev->dev); atmci_regs_show() 412 if (host->caps.has_odd_clk_div) atmci_regs_show() 426 if (host->caps.has_cstor_reg) atmci_regs_show() 434 if (host->caps.has_dma_conf_reg) { atmci_regs_show() 444 if (host->caps.has_cfg_reg) { atmci_regs_show() 477 struct atmel_mci *host = slot->host; atmci_init_debugfs() local 485 node = debugfs_create_file("regs", S_IRUSR, root, host, atmci_init_debugfs() 496 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state); atmci_init_debugfs() 501 (u32 *)&host->pending_events); atmci_init_debugfs() 506 (u32 *)&host->completed_events); atmci_init_debugfs() 583 static inline unsigned int atmci_get_version(struct atmel_mci *host) atmci_get_version() argument 585 return atmci_readl(host, ATMCI_VERSION) & 0x00000fff; atmci_get_version() 590 struct atmel_mci *host; atmci_timeout_timer() local 592 host = (struct atmel_mci *)data; atmci_timeout_timer() 594 dev_dbg(&host->pdev->dev, "software timeout\n"); atmci_timeout_timer() 596 if (host->mrq->cmd->data) { atmci_timeout_timer() 597 host->mrq->cmd->data->error = -ETIMEDOUT; atmci_timeout_timer() 598 host->data = NULL; atmci_timeout_timer() 604 if (host->state == STATE_DATA_XFER) atmci_timeout_timer() 605 host->stop_transfer(host); atmci_timeout_timer() 607 host->mrq->cmd->error = -ETIMEDOUT; atmci_timeout_timer() 608 host->cmd = NULL; atmci_timeout_timer() 610 host->need_reset = 1; atmci_timeout_timer() 611 host->state = STATE_END_REQUEST; atmci_timeout_timer() 613 tasklet_schedule(&host->tasklet); atmci_timeout_timer() 616 static inline unsigned int atmci_ns_to_clocks(struct atmel_mci *host, atmci_ns_to_clocks() argument 625 /* Maximum clock frequency is host->bus_hz/2 */ atmci_ns_to_clocks() 626 return us * (DIV_ROUND_UP(host->bus_hz, 2000000)); atmci_ns_to_clocks() 629 static void atmci_set_timeout(struct atmel_mci *host, atmci_set_timeout() argument 639 timeout = atmci_ns_to_clocks(host, data->timeout_ns) atmci_set_timeout() 656 atmci_writel(host, ATMCI_DTOR, (ATMCI_DTOMUL(dtomul) | ATMCI_DTOCYC(dtocyc))); atmci_set_timeout() 711 static void atmci_send_command(struct atmel_mci *host, atmci_send_command() argument 714 WARN_ON(host->cmd); atmci_send_command() 715 host->cmd = cmd; atmci_send_command() 717 dev_vdbg(&host->pdev->dev, atmci_send_command() 721 atmci_writel(host, ATMCI_ARGR, cmd->arg); atmci_send_command() 722 atmci_writel(host, ATMCI_CMDR, cmd_flags); atmci_send_command() 725 static void atmci_send_stop_cmd(struct atmel_mci *host, struct mmc_data *data) atmci_send_stop_cmd() argument 727 dev_dbg(&host->pdev->dev, "send stop command\n"); atmci_send_stop_cmd() 728 atmci_send_command(host, data->stop, host->stop_cmdr); atmci_send_stop_cmd() 729 atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY); atmci_send_stop_cmd() 734 * Update host->data_size and host->sg. 736 static void atmci_pdc_set_single_buf(struct atmel_mci *host, atmci_pdc_set_single_buf() argument 755 if (!host->caps.has_rwproof) { atmci_pdc_set_single_buf() 756 buf_size = host->buf_size; atmci_pdc_set_single_buf() 757 atmci_writel(host, pointer_reg, host->buf_phys_addr); atmci_pdc_set_single_buf() 759 buf_size = sg_dma_len(host->sg); atmci_pdc_set_single_buf() 760 atmci_writel(host, pointer_reg, sg_dma_address(host->sg)); atmci_pdc_set_single_buf() 763 if (host->data_size <= buf_size) { atmci_pdc_set_single_buf() 764 if (host->data_size & 0x3) { atmci_pdc_set_single_buf() 766 atmci_writel(host, counter_reg, host->data_size); atmci_pdc_set_single_buf() 767 atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCFBYTE); atmci_pdc_set_single_buf() 770 atmci_writel(host, counter_reg, host->data_size / 4); atmci_pdc_set_single_buf() 772 host->data_size = 0; atmci_pdc_set_single_buf() 775 atmci_writel(host, counter_reg, sg_dma_len(host->sg) / 4); atmci_pdc_set_single_buf() 776 host->data_size -= sg_dma_len(host->sg); atmci_pdc_set_single_buf() 777 if (host->data_size) atmci_pdc_set_single_buf() 778 host->sg = sg_next(host->sg); atmci_pdc_set_single_buf() 787 static void atmci_pdc_set_both_buf(struct atmel_mci *host, int dir) atmci_pdc_set_both_buf() argument 789 atmci_pdc_set_single_buf(host, dir, PDC_FIRST_BUF); atmci_pdc_set_both_buf() 790 if (host->data_size) atmci_pdc_set_both_buf() 791 atmci_pdc_set_single_buf(host, dir, PDC_SECOND_BUF); atmci_pdc_set_both_buf() 797 static void atmci_pdc_cleanup(struct atmel_mci *host) atmci_pdc_cleanup() argument 799 struct mmc_data *data = host->data; atmci_pdc_cleanup() 802 dma_unmap_sg(&host->pdev->dev, atmci_pdc_cleanup() 813 static void atmci_pdc_complete(struct atmel_mci *host) atmci_pdc_complete() argument 815 int transfer_size = host->data->blocks * host->data->blksz; atmci_pdc_complete() 818 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS); atmci_pdc_complete() 820 if ((!host->caps.has_rwproof) atmci_pdc_complete() 821 && (host->data->flags & MMC_DATA_READ)) { atmci_pdc_complete() 822 if (host->caps.has_bad_data_ordering) atmci_pdc_complete() 824 host->buffer[i] = swab32(host->buffer[i]); atmci_pdc_complete() 825 sg_copy_from_buffer(host->data->sg, host->data->sg_len, atmci_pdc_complete() 826 host->buffer, transfer_size); atmci_pdc_complete() 829 atmci_pdc_cleanup(host); atmci_pdc_complete() 831 dev_dbg(&host->pdev->dev, "(%s) set pending xfer complete\n", __func__); atmci_pdc_complete() 832 atmci_set_pending(host, EVENT_XFER_COMPLETE); atmci_pdc_complete() 833 tasklet_schedule(&host->tasklet); atmci_pdc_complete() 836 static void atmci_dma_cleanup(struct atmel_mci *host) atmci_dma_cleanup() argument 838 struct mmc_data *data = host->data; atmci_dma_cleanup() 841 dma_unmap_sg(host->dma.chan->device->dev, atmci_dma_cleanup() 852 struct atmel_mci *host = arg; atmci_dma_complete() local 853 struct mmc_data *data = host->data; atmci_dma_complete() 855 dev_vdbg(&host->pdev->dev, "DMA complete\n"); atmci_dma_complete() 857 if (host->caps.has_dma_conf_reg) atmci_dma_complete() 859 atmci_writel(host, ATMCI_DMA, atmci_readl(host, ATMCI_DMA) & ~ATMCI_DMAEN); atmci_dma_complete() 861 atmci_dma_cleanup(host); atmci_dma_complete() 868 dev_dbg(&host->pdev->dev, atmci_dma_complete() 870 atmci_set_pending(host, EVENT_XFER_COMPLETE); atmci_dma_complete() 871 tasklet_schedule(&host->tasklet); atmci_dma_complete() 893 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); atmci_dma_complete() 901 static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data) atmci_prepare_data() argument 907 host->sg = data->sg; atmci_prepare_data() 908 host->sg_len = data->sg_len; atmci_prepare_data() 909 host->data = data; atmci_prepare_data() 910 host->data_chan = NULL; atmci_prepare_data() 923 host->need_reset = true; atmci_prepare_data() 925 host->pio_offset = 0; atmci_prepare_data() 941 atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data) atmci_prepare_data_pdc() argument 950 host->data = data; atmci_prepare_data_pdc() 951 host->sg = data->sg; atmci_prepare_data_pdc() 955 atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCMODE); atmci_prepare_data_pdc() 966 tmp = atmci_readl(host, ATMCI_MR); atmci_prepare_data_pdc() 969 atmci_writel(host, ATMCI_MR, tmp); atmci_prepare_data_pdc() 972 host->data_size = data->blocks * data->blksz; atmci_prepare_data_pdc() 973 sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, dir); atmci_prepare_data_pdc() 975 if ((!host->caps.has_rwproof) atmci_prepare_data_pdc() 976 && (host->data->flags & MMC_DATA_WRITE)) { atmci_prepare_data_pdc() 977 sg_copy_to_buffer(host->data->sg, host->data->sg_len, atmci_prepare_data_pdc() 978 host->buffer, host->data_size); atmci_prepare_data_pdc() 979 if (host->caps.has_bad_data_ordering) atmci_prepare_data_pdc() 980 for (i = 0; i < host->data_size; i++) atmci_prepare_data_pdc() 981 host->buffer[i] = swab32(host->buffer[i]); atmci_prepare_data_pdc() 984 if (host->data_size) atmci_prepare_data_pdc() 985 atmci_pdc_set_both_buf(host, atmci_prepare_data_pdc() 992 atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data) atmci_prepare_data_dma() argument 1006 WARN_ON(host->data); atmci_prepare_data_dma() 1007 host->sg = NULL; atmci_prepare_data_dma() 1008 host->data = data; atmci_prepare_data_dma() 1018 return atmci_prepare_data(host, data); atmci_prepare_data_dma() 1020 return atmci_prepare_data(host, data); atmci_prepare_data_dma() 1024 return atmci_prepare_data(host, data); atmci_prepare_data_dma() 1028 chan = host->dma.chan; atmci_prepare_data_dma() 1030 host->data_chan = chan; atmci_prepare_data_dma() 1037 host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM; atmci_prepare_data_dma() 1038 maxburst = atmci_convert_chksize(host->dma_conf.src_maxburst); atmci_prepare_data_dma() 1041 host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV; atmci_prepare_data_dma() 1042 maxburst = atmci_convert_chksize(host->dma_conf.dst_maxburst); atmci_prepare_data_dma() 1045 if (host->caps.has_dma_conf_reg) atmci_prepare_data_dma() 1046 atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(maxburst) | atmci_prepare_data_dma() 1052 dmaengine_slave_config(chan, &host->dma_conf); atmci_prepare_data_dma() 1059 host->dma.data_desc = desc; atmci_prepare_data_dma() 1061 desc->callback_param = host; atmci_prepare_data_dma() 1070 atmci_submit_data(struct atmel_mci *host, struct mmc_data *data) atmci_submit_data() argument 1079 atmci_submit_data_pdc(struct atmel_mci *host, struct mmc_data *data) atmci_submit_data_pdc() argument 1082 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN); atmci_submit_data_pdc() 1084 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); atmci_submit_data_pdc() 1088 atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data) atmci_submit_data_dma() argument 1090 struct dma_chan *chan = host->data_chan; atmci_submit_data_dma() 1091 struct dma_async_tx_descriptor *desc = host->dma.data_desc; atmci_submit_data_dma() 1099 static void atmci_stop_transfer(struct atmel_mci *host) atmci_stop_transfer() argument 1101 dev_dbg(&host->pdev->dev, atmci_stop_transfer() 1103 atmci_set_pending(host, EVENT_XFER_COMPLETE); atmci_stop_transfer() 1104 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); atmci_stop_transfer() 1110 static void atmci_stop_transfer_pdc(struct atmel_mci *host) atmci_stop_transfer_pdc() argument 1112 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS); atmci_stop_transfer_pdc() 1115 static void atmci_stop_transfer_dma(struct atmel_mci *host) atmci_stop_transfer_dma() argument 1117 struct dma_chan *chan = host->data_chan; atmci_stop_transfer_dma() 1121 atmci_dma_cleanup(host); atmci_stop_transfer_dma() 1124 dev_dbg(&host->pdev->dev, atmci_stop_transfer_dma() 1126 atmci_set_pending(host, EVENT_XFER_COMPLETE); atmci_stop_transfer_dma() 1127 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); atmci_stop_transfer_dma() 1135 static void atmci_start_request(struct atmel_mci *host, atmci_start_request() argument 1145 host->cur_slot = slot; atmci_start_request() 1146 host->mrq = mrq; atmci_start_request() 1148 host->pending_events = 0; atmci_start_request() 1149 host->completed_events = 0; atmci_start_request() 1150 host->cmd_status = 0; atmci_start_request() 1151 host->data_status = 0; atmci_start_request() 1153 dev_dbg(&host->pdev->dev, "start request: cmd %u\n", mrq->cmd->opcode); atmci_start_request() 1155 if (host->need_reset || host->caps.need_reset_after_xfer) { atmci_start_request() 1156 iflags = atmci_readl(host, ATMCI_IMR); atmci_start_request() 1158 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST); atmci_start_request() 1159 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN); atmci_start_request() 1160 atmci_writel(host, ATMCI_MR, host->mode_reg); atmci_start_request() 1161 if (host->caps.has_cfg_reg) atmci_start_request() 1162 atmci_writel(host, ATMCI_CFG, host->cfg_reg); atmci_start_request() 1163 atmci_writel(host, ATMCI_IER, iflags); atmci_start_request() 1164 host->need_reset = false; atmci_start_request() 1166 atmci_writel(host, ATMCI_SDCR, slot->sdc_reg); atmci_start_request() 1168 iflags = atmci_readl(host, ATMCI_IMR); atmci_start_request() 1175 atmci_writel(host, ATMCI_CMDR, ATMCI_CMDR_SPCMD_INIT); atmci_start_request() 1176 while (!(atmci_readl(host, ATMCI_SR) & ATMCI_CMDRDY)) atmci_start_request() 1182 atmci_set_timeout(host, slot, data); atmci_start_request() 1185 atmci_writel(host, ATMCI_BLKR, ATMCI_BCNT(data->blocks) atmci_start_request() 1190 iflags |= host->prepare_data(host, data); atmci_start_request() 1203 if (host->submit_data != &atmci_submit_data_dma) atmci_start_request() 1204 atmci_send_command(host, cmd, cmdflags); atmci_start_request() 1207 host->submit_data(host, data); atmci_start_request() 1209 if (host->submit_data == &atmci_submit_data_dma) atmci_start_request() 1210 atmci_send_command(host, cmd, cmdflags); atmci_start_request() 1213 host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop); atmci_start_request() 1214 host->stop_cmdr |= ATMCI_CMDR_STOP_XFER; atmci_start_request() 1216 host->stop_cmdr |= ATMCI_CMDR_TRDIR_READ; atmci_start_request() 1218 host->stop_cmdr |= ATMCI_CMDR_STREAM; atmci_start_request() 1220 host->stop_cmdr |= ATMCI_CMDR_MULTI_BLOCK; atmci_start_request() 1229 atmci_writel(host, ATMCI_IER, iflags); atmci_start_request() 1231 mod_timer(&host->timer, jiffies + msecs_to_jiffies(2000)); atmci_start_request() 1234 static void atmci_queue_request(struct atmel_mci *host, atmci_queue_request() argument 1238 host->state); atmci_queue_request() 1240 spin_lock_bh(&host->lock); atmci_queue_request() 1242 if (host->state == STATE_IDLE) { atmci_queue_request() 1243 host->state = STATE_SENDING_CMD; atmci_queue_request() 1244 atmci_start_request(host, slot); atmci_queue_request() 1246 dev_dbg(&host->pdev->dev, "queue request\n"); atmci_queue_request() 1247 list_add_tail(&slot->queue_node, &host->queue); atmci_queue_request() 1249 spin_unlock_bh(&host->lock); atmci_queue_request() 1255 struct atmel_mci *host = slot->host; atmci_request() local 1259 dev_dbg(&host->pdev->dev, "MRQ: cmd %u\n", mrq->cmd->opcode); atmci_request() 1261 pm_runtime_get_sync(&host->pdev->dev); atmci_request() 1284 atmci_queue_request(host, slot, mrq); atmci_request() 1290 struct atmel_mci *host = slot->host; atmci_set_ios() local 1293 pm_runtime_get_sync(&host->pdev->dev); atmci_set_ios() 1309 spin_lock_bh(&host->lock); atmci_set_ios() 1310 if (!host->mode_reg) { atmci_set_ios() 1311 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST); atmci_set_ios() 1312 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN); atmci_set_ios() 1313 if (host->caps.has_cfg_reg) atmci_set_ios() 1314 atmci_writel(host, ATMCI_CFG, host->cfg_reg); atmci_set_ios() 1323 if (host->slot[i] && host->slot[i]->clock atmci_set_ios() 1324 && host->slot[i]->clock < clock_min) atmci_set_ios() 1325 clock_min = host->slot[i]->clock; atmci_set_ios() 1329 if (host->caps.has_odd_clk_div) { atmci_set_ios() 1330 clkdiv = DIV_ROUND_UP(host->bus_hz, clock_min) - 2; atmci_set_ios() 1334 clock_min, host->bus_hz / 2); atmci_set_ios() 1339 clock_min, host->bus_hz / (511 + 2)); atmci_set_ios() 1342 host->mode_reg = ATMCI_MR_CLKDIV(clkdiv >> 1) atmci_set_ios() 1345 clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * clock_min) - 1; atmci_set_ios() 1349 clock_min, host->bus_hz / (2 * 256)); atmci_set_ios() 1352 host->mode_reg = ATMCI_MR_CLKDIV(clkdiv); atmci_set_ios() 1360 if (host->caps.has_rwproof) atmci_set_ios() 1361 host->mode_reg |= (ATMCI_MR_WRPROOF | ATMCI_MR_RDPROOF); atmci_set_ios() 1363 if (host->caps.has_cfg_reg) { atmci_set_ios() 1366 host->cfg_reg |= ATMCI_CFG_HSMODE; atmci_set_ios() 1368 host->cfg_reg &= ~ATMCI_CFG_HSMODE; atmci_set_ios() 1371 if (list_empty(&host->queue)) { atmci_set_ios() 1372 atmci_writel(host, ATMCI_MR, host->mode_reg); atmci_set_ios() 1373 if (host->caps.has_cfg_reg) atmci_set_ios() 1374 atmci_writel(host, ATMCI_CFG, host->cfg_reg); atmci_set_ios() 1376 host->need_clock_update = true; atmci_set_ios() 1379 spin_unlock_bh(&host->lock); atmci_set_ios() 1383 spin_lock_bh(&host->lock); atmci_set_ios() 1386 if (host->slot[i] && host->slot[i]->clock) { atmci_set_ios() 1392 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS); atmci_set_ios() 1393 if (host->mode_reg) { atmci_set_ios() 1394 atmci_readl(host, ATMCI_MR); atmci_set_ios() 1396 host->mode_reg = 0; atmci_set_ios() 1398 spin_unlock_bh(&host->lock); atmci_set_ios() 1427 pm_runtime_mark_last_busy(&host->pdev->dev); atmci_set_ios() 1428 pm_runtime_put_autosuspend(&host->pdev->dev); atmci_set_ios() 1463 struct atmel_mci *host = slot->host; atmci_enable_sdio_irq() local 1466 atmci_writel(host, ATMCI_IER, slot->sdio_irq); atmci_enable_sdio_irq() 1468 atmci_writel(host, ATMCI_IDR, slot->sdio_irq); atmci_enable_sdio_irq() 1479 /* Called with host->lock held */ 1480 static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq) 1481 __releases(&host->lock) 1482 __acquires(&host->lock) 1485 struct mmc_host *prev_mmc = host->cur_slot->mmc; 1487 WARN_ON(host->cmd || host->data); 1494 if (host->need_clock_update) { 1495 atmci_writel(host, ATMCI_MR, host->mode_reg); 1496 if (host->caps.has_cfg_reg) 1497 atmci_writel(host, ATMCI_CFG, host->cfg_reg); 1500 host->cur_slot->mrq = NULL; 1501 host->mrq = NULL; 1502 if (!list_empty(&host->queue)) { 1503 slot = list_entry(host->queue.next, 1506 dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n", 1508 host->state = STATE_SENDING_CMD; 1509 atmci_start_request(host, slot); 1511 dev_vdbg(&host->pdev->dev, "list empty\n"); 1512 host->state = STATE_IDLE; 1515 del_timer(&host->timer); 1517 spin_unlock(&host->lock); 1519 spin_lock(&host->lock); 1521 pm_runtime_mark_last_busy(&host->pdev->dev); 1522 pm_runtime_put_autosuspend(&host->pdev->dev); 1525 static void atmci_command_complete(struct atmel_mci *host, atmci_command_complete() argument 1528 u32 status = host->cmd_status; atmci_command_complete() 1531 cmd->resp[0] = atmci_readl(host, ATMCI_RSPR); atmci_command_complete() 1532 cmd->resp[1] = atmci_readl(host, ATMCI_RSPR); atmci_command_complete() 1533 cmd->resp[2] = atmci_readl(host, ATMCI_RSPR); atmci_command_complete() 1534 cmd->resp[3] = atmci_readl(host, ATMCI_RSPR); atmci_command_complete() 1542 else if (host->mrq->data && (host->mrq->data->blksz & 3)) { atmci_command_complete() 1543 if (host->caps.need_blksz_mul_4) { atmci_command_complete() 1545 host->need_reset = 1; atmci_command_complete() 1576 struct atmel_mci *host = slot->host; atmci_detect_change() local 1582 spin_lock(&host->lock); atmci_detect_change() 1592 if (mrq == host->mrq) { atmci_detect_change() 1597 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST); atmci_detect_change() 1598 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN); atmci_detect_change() 1599 atmci_writel(host, ATMCI_MR, host->mode_reg); atmci_detect_change() 1600 if (host->caps.has_cfg_reg) atmci_detect_change() 1601 atmci_writel(host, ATMCI_CFG, host->cfg_reg); atmci_detect_change() 1603 host->data = NULL; atmci_detect_change() 1604 host->cmd = NULL; atmci_detect_change() 1606 switch (host->state) { atmci_detect_change() 1612 host->stop_transfer(host); atmci_detect_change() 1616 host->stop_transfer(host); atmci_detect_change() 1628 atmci_request_end(host, mrq); atmci_detect_change() 1637 spin_unlock(&host->lock); atmci_detect_change() 1639 spin_lock(&host->lock); atmci_detect_change() 1642 spin_unlock(&host->lock); atmci_detect_change() 1650 struct atmel_mci *host = (struct atmel_mci *)priv; atmci_tasklet_func() local 1651 struct mmc_request *mrq = host->mrq; atmci_tasklet_func() 1652 struct mmc_data *data = host->data; atmci_tasklet_func() 1653 enum atmel_mci_state state = host->state; atmci_tasklet_func() 1657 spin_lock(&host->lock); atmci_tasklet_func() 1659 state = host->state; atmci_tasklet_func() 1661 dev_vdbg(&host->pdev->dev, atmci_tasklet_func() 1663 state, host->pending_events, host->completed_events, atmci_tasklet_func() 1664 atmci_readl(host, ATMCI_IMR)); atmci_tasklet_func() 1668 dev_dbg(&host->pdev->dev, "FSM: state=%d\n", state); atmci_tasklet_func() 1681 dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n"); atmci_tasklet_func() 1682 if (!atmci_test_and_clear_pending(host, atmci_tasklet_func() 1686 dev_dbg(&host->pdev->dev, "set completed cmd ready\n"); atmci_tasklet_func() 1687 host->cmd = NULL; atmci_tasklet_func() 1688 atmci_set_completed(host, EVENT_CMD_RDY); atmci_tasklet_func() 1689 atmci_command_complete(host, mrq->cmd); atmci_tasklet_func() 1691 dev_dbg(&host->pdev->dev, atmci_tasklet_func() 1698 host->stop_transfer(host); atmci_tasklet_func() 1699 host->data = NULL; atmci_tasklet_func() 1700 atmci_writel(host, ATMCI_IDR, atmci_tasklet_func() 1707 dev_dbg(&host->pdev->dev, atmci_tasklet_func() 1709 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); atmci_tasklet_func() 1717 if (atmci_test_and_clear_pending(host, atmci_tasklet_func() 1719 dev_dbg(&host->pdev->dev, "set completed data error\n"); atmci_tasklet_func() 1720 atmci_set_completed(host, EVENT_DATA_ERROR); atmci_tasklet_func() 1732 dev_dbg(&host->pdev->dev, "FSM: xfer complete?\n"); atmci_tasklet_func() 1733 if (!atmci_test_and_clear_pending(host, atmci_tasklet_func() 1737 dev_dbg(&host->pdev->dev, atmci_tasklet_func() 1740 atmci_set_completed(host, EVENT_XFER_COMPLETE); atmci_tasklet_func() 1742 if (host->caps.need_notbusy_for_read_ops || atmci_tasklet_func() 1743 (host->data->flags & MMC_DATA_WRITE)) { atmci_tasklet_func() 1744 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); atmci_tasklet_func() 1746 } else if (host->mrq->stop) { atmci_tasklet_func() 1747 atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY); atmci_tasklet_func() 1748 atmci_send_stop_cmd(host, data); atmci_tasklet_func() 1751 host->data = NULL; atmci_tasklet_func() 1765 dev_dbg(&host->pdev->dev, "FSM: not busy?\n"); atmci_tasklet_func() 1766 if (!atmci_test_and_clear_pending(host, atmci_tasklet_func() 1770 dev_dbg(&host->pdev->dev, "set completed not busy\n"); atmci_tasklet_func() 1771 atmci_set_completed(host, EVENT_NOTBUSY); atmci_tasklet_func() 1773 if (host->data) { atmci_tasklet_func() 1779 if (host->mrq->stop) { atmci_tasklet_func() 1780 atmci_writel(host, ATMCI_IER, atmci_tasklet_func() 1782 atmci_send_stop_cmd(host, data); atmci_tasklet_func() 1785 host->data = NULL; atmci_tasklet_func() 1797 * In this state, it is important to set host->data to atmci_tasklet_func() 1802 dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n"); atmci_tasklet_func() 1803 if (!atmci_test_and_clear_pending(host, atmci_tasklet_func() 1807 dev_dbg(&host->pdev->dev, "FSM: cmd ready\n"); atmci_tasklet_func() 1808 host->cmd = NULL; atmci_tasklet_func() 1811 atmci_command_complete(host, mrq->stop); atmci_tasklet_func() 1813 host->stop_transfer(host); atmci_tasklet_func() 1814 atmci_writel(host, ATMCI_IDR, atmci_tasklet_func() 1819 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); atmci_tasklet_func() 1822 host->data = NULL; atmci_tasklet_func() 1826 atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY | ATMCI_RXRDY atmci_tasklet_func() 1828 status = host->data_status; atmci_tasklet_func() 1830 host->stop_transfer(host); atmci_tasklet_func() 1831 host->data = NULL; atmci_tasklet_func() 1843 atmci_request_end(host, host->mrq); atmci_tasklet_func() 1849 host->state = state; atmci_tasklet_func() 1851 spin_unlock(&host->lock); atmci_tasklet_func() 1854 static void atmci_read_data_pio(struct atmel_mci *host) atmci_read_data_pio() argument 1856 struct scatterlist *sg = host->sg; atmci_read_data_pio() 1858 unsigned int offset = host->pio_offset; atmci_read_data_pio() 1859 struct mmc_data *data = host->data; atmci_read_data_pio() 1865 value = atmci_readl(host, ATMCI_RDR); atmci_read_data_pio() 1874 host->sg = sg = sg_next(sg); atmci_read_data_pio() 1875 host->sg_len--; atmci_read_data_pio() 1876 if (!sg || !host->sg_len) atmci_read_data_pio() 1888 host->sg = sg = sg_next(sg); atmci_read_data_pio() 1889 host->sg_len--; atmci_read_data_pio() 1890 if (!sg || !host->sg_len) atmci_read_data_pio() 1899 status = atmci_readl(host, ATMCI_SR); atmci_read_data_pio() 1901 atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_RXRDY atmci_read_data_pio() 1903 host->data_status = status; atmci_read_data_pio() 1909 host->pio_offset = offset; atmci_read_data_pio() 1915 atmci_writel(host, ATMCI_IDR, ATMCI_RXRDY); atmci_read_data_pio() 1916 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); atmci_read_data_pio() 1919 atmci_set_pending(host, EVENT_XFER_COMPLETE); atmci_read_data_pio() 1922 static void atmci_write_data_pio(struct atmel_mci *host) atmci_write_data_pio() argument 1924 struct scatterlist *sg = host->sg; atmci_write_data_pio() 1926 unsigned int offset = host->pio_offset; atmci_write_data_pio() 1927 struct mmc_data *data = host->data; atmci_write_data_pio() 1935 atmci_writel(host, ATMCI_TDR, value); atmci_write_data_pio() 1940 host->sg = sg = sg_next(sg); atmci_write_data_pio() 1941 host->sg_len--; atmci_write_data_pio() 1942 if (!sg || !host->sg_len) atmci_write_data_pio() 1955 host->sg = sg = sg_next(sg); atmci_write_data_pio() 1956 host->sg_len--; atmci_write_data_pio() 1957 if (!sg || !host->sg_len) { atmci_write_data_pio() 1958 atmci_writel(host, ATMCI_TDR, value); atmci_write_data_pio() 1965 atmci_writel(host, ATMCI_TDR, value); atmci_write_data_pio() 1969 status = atmci_readl(host, ATMCI_SR); atmci_write_data_pio() 1971 atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_TXRDY atmci_write_data_pio() 1973 host->data_status = status; atmci_write_data_pio() 1979 host->pio_offset = offset; atmci_write_data_pio() 1985 atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY); atmci_write_data_pio() 1986 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); atmci_write_data_pio() 1989 atmci_set_pending(host, EVENT_XFER_COMPLETE); atmci_write_data_pio() 1992 static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status) atmci_sdio_interrupt() argument 1997 struct atmel_mci_slot *slot = host->slot[i]; atmci_sdio_interrupt() 2007 struct atmel_mci *host = dev_id; atmci_interrupt() local 2012 status = atmci_readl(host, ATMCI_SR); atmci_interrupt() 2013 mask = atmci_readl(host, ATMCI_IMR); atmci_interrupt() 2019 dev_dbg(&host->pdev->dev, "IRQ: data error\n"); atmci_interrupt() 2020 atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS atmci_interrupt() 2025 host->data_status = status; atmci_interrupt() 2026 dev_dbg(&host->pdev->dev, "set pending data error\n"); atmci_interrupt() 2028 atmci_set_pending(host, EVENT_DATA_ERROR); atmci_interrupt() 2029 tasklet_schedule(&host->tasklet); atmci_interrupt() 2033 dev_dbg(&host->pdev->dev, "IRQ: tx buffer empty\n"); atmci_interrupt() 2034 atmci_writel(host, ATMCI_IDR, ATMCI_TXBUFE); atmci_interrupt() 2035 atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX); atmci_interrupt() 2041 if (host->data_size) { atmci_interrupt() 2042 atmci_pdc_set_both_buf(host, XFER_TRANSMIT); atmci_interrupt() 2043 atmci_writel(host, ATMCI_IER, ATMCI_ENDTX); atmci_interrupt() 2044 atmci_writel(host, ATMCI_IER, ATMCI_TXBUFE); atmci_interrupt() 2046 atmci_pdc_complete(host); atmci_interrupt() 2049 dev_dbg(&host->pdev->dev, "IRQ: end of tx buffer\n"); atmci_interrupt() 2050 atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX); atmci_interrupt() 2052 if (host->data_size) { atmci_interrupt() 2053 atmci_pdc_set_single_buf(host, atmci_interrupt() 2055 atmci_writel(host, ATMCI_IER, ATMCI_ENDTX); atmci_interrupt() 2060 dev_dbg(&host->pdev->dev, "IRQ: rx buffer full\n"); atmci_interrupt() 2061 atmci_writel(host, ATMCI_IDR, ATMCI_RXBUFF); atmci_interrupt() 2062 atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX); atmci_interrupt() 2068 if (host->data_size) { atmci_interrupt() 2069 atmci_pdc_set_both_buf(host, XFER_RECEIVE); atmci_interrupt() 2070 atmci_writel(host, ATMCI_IER, ATMCI_ENDRX); atmci_interrupt() 2071 atmci_writel(host, ATMCI_IER, ATMCI_RXBUFF); atmci_interrupt() 2073 atmci_pdc_complete(host); atmci_interrupt() 2076 dev_dbg(&host->pdev->dev, "IRQ: end of rx buffer\n"); atmci_interrupt() 2077 atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX); atmci_interrupt() 2079 if (host->data_size) { atmci_interrupt() 2080 atmci_pdc_set_single_buf(host, atmci_interrupt() 2082 atmci_writel(host, ATMCI_IER, ATMCI_ENDRX); atmci_interrupt() 2093 dev_dbg(&host->pdev->dev, "IRQ: blke\n"); atmci_interrupt() 2094 atmci_writel(host, ATMCI_IDR, ATMCI_BLKE); atmci_interrupt() 2096 dev_dbg(&host->pdev->dev, "set pending notbusy\n"); atmci_interrupt() 2097 atmci_set_pending(host, EVENT_NOTBUSY); atmci_interrupt() 2098 tasklet_schedule(&host->tasklet); atmci_interrupt() 2102 dev_dbg(&host->pdev->dev, "IRQ: not_busy\n"); atmci_interrupt() 2103 atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY); atmci_interrupt() 2105 dev_dbg(&host->pdev->dev, "set pending notbusy\n"); atmci_interrupt() 2106 atmci_set_pending(host, EVENT_NOTBUSY); atmci_interrupt() 2107 tasklet_schedule(&host->tasklet); atmci_interrupt() 2111 atmci_read_data_pio(host); atmci_interrupt() 2113 atmci_write_data_pio(host); atmci_interrupt() 2116 dev_dbg(&host->pdev->dev, "IRQ: cmd ready\n"); atmci_interrupt() 2117 atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY); atmci_interrupt() 2118 host->cmd_status = status; atmci_interrupt() 2120 dev_dbg(&host->pdev->dev, "set pending cmd rdy\n"); atmci_interrupt() 2121 atmci_set_pending(host, EVENT_CMD_RDY); atmci_interrupt() 2122 tasklet_schedule(&host->tasklet); atmci_interrupt() 2126 atmci_sdio_interrupt(host, status); atmci_interrupt() 2148 static int atmci_init_slot(struct atmel_mci *host, atmci_init_slot() argument 2155 mmc = mmc_alloc_host(sizeof(struct atmel_mci_slot), &host->pdev->dev); atmci_init_slot() 2161 slot->host = host; atmci_init_slot() 2176 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512); atmci_init_slot() 2177 mmc->f_max = host->bus_hz / 2; atmci_init_slot() 2181 if (host->caps.has_highspeed) atmci_init_slot() 2188 if ((slot_data->bus_width >= 4) && host->caps.has_rwproof) atmci_init_slot() 2191 if (atmci_get_version(host) < 0x200) { atmci_init_slot() 2207 if (devm_gpio_request(&host->pdev->dev, slot->detect_pin, atmci_init_slot() 2225 if (devm_gpio_request(&host->pdev->dev, slot->wp_pin, atmci_init_slot() 2232 host->slot[id] = slot; atmci_init_slot() 2276 slot->host->slot[id] = NULL; atmci_cleanup_slot() 2280 static int atmci_configure_dma(struct atmel_mci *host) atmci_configure_dma() argument 2282 host->dma.chan = dma_request_slave_channel_reason(&host->pdev->dev, atmci_configure_dma() 2285 if (PTR_ERR(host->dma.chan) == -ENODEV) { atmci_configure_dma() 2286 struct mci_platform_data *pdata = host->pdev->dev.platform_data; atmci_configure_dma() 2295 host->dma.chan = dma_request_channel(mask, pdata->dma_filter, atmci_configure_dma() 2297 if (!host->dma.chan) atmci_configure_dma() 2298 host->dma.chan = ERR_PTR(-ENODEV); atmci_configure_dma() 2301 if (IS_ERR(host->dma.chan)) atmci_configure_dma() 2302 return PTR_ERR(host->dma.chan); atmci_configure_dma() 2304 dev_info(&host->pdev->dev, "using %s for DMA transfers\n", atmci_configure_dma() 2305 dma_chan_name(host->dma.chan)); atmci_configure_dma() 2307 host->dma_conf.src_addr = host->mapbase + ATMCI_RDR; atmci_configure_dma() 2308 host->dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; atmci_configure_dma() 2309 host->dma_conf.src_maxburst = 1; atmci_configure_dma() 2310 host->dma_conf.dst_addr = host->mapbase + ATMCI_TDR; atmci_configure_dma() 2311 host->dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; atmci_configure_dma() 2312 host->dma_conf.dst_maxburst = 1; atmci_configure_dma() 2313 host->dma_conf.device_fc = false; atmci_configure_dma() 2323 static void atmci_get_cap(struct atmel_mci *host) atmci_get_cap() argument 2327 version = atmci_get_version(host); atmci_get_cap() 2328 dev_info(&host->pdev->dev, atmci_get_cap() 2331 host->caps.has_dma_conf_reg = 0; atmci_get_cap() 2332 host->caps.has_pdc = ATMCI_PDC_CONNECTED; atmci_get_cap() 2333 host->caps.has_cfg_reg = 0; atmci_get_cap() 2334 host->caps.has_cstor_reg = 0; atmci_get_cap() 2335 host->caps.has_highspeed = 0; atmci_get_cap() 2336 host->caps.has_rwproof = 0; atmci_get_cap() 2337 host->caps.has_odd_clk_div = 0; atmci_get_cap() 2338 host->caps.has_bad_data_ordering = 1; atmci_get_cap() 2339 host->caps.need_reset_after_xfer = 1; atmci_get_cap() 2340 host->caps.need_blksz_mul_4 = 1; atmci_get_cap() 2341 host->caps.need_notbusy_for_read_ops = 0; atmci_get_cap() 2347 host->caps.has_odd_clk_div = 1; atmci_get_cap() 2350 host->caps.has_dma_conf_reg = 1; atmci_get_cap() 2351 host->caps.has_pdc = 0; atmci_get_cap() 2352 host->caps.has_cfg_reg = 1; atmci_get_cap() 2353 host->caps.has_cstor_reg = 1; atmci_get_cap() 2354 host->caps.has_highspeed = 1; atmci_get_cap() 2356 host->caps.has_rwproof = 1; atmci_get_cap() 2357 host->caps.need_blksz_mul_4 = 0; atmci_get_cap() 2358 host->caps.need_notbusy_for_read_ops = 1; atmci_get_cap() 2360 host->caps.has_bad_data_ordering = 0; atmci_get_cap() 2361 host->caps.need_reset_after_xfer = 0; atmci_get_cap() 2365 host->caps.has_pdc = 0; atmci_get_cap() 2366 dev_warn(&host->pdev->dev, atmci_get_cap() 2375 struct atmel_mci *host; atmci_probe() local 2397 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); atmci_probe() 2398 if (!host) atmci_probe() 2401 host->pdev = pdev; atmci_probe() 2402 spin_lock_init(&host->lock); atmci_probe() 2403 INIT_LIST_HEAD(&host->queue); atmci_probe() 2405 host->mck = devm_clk_get(&pdev->dev, "mci_clk"); atmci_probe() 2406 if (IS_ERR(host->mck)) atmci_probe() 2407 return PTR_ERR(host->mck); atmci_probe() 2409 host->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs)); atmci_probe() 2410 if (!host->regs) atmci_probe() 2413 ret = clk_prepare_enable(host->mck); atmci_probe() 2417 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST); atmci_probe() 2418 host->bus_hz = clk_get_rate(host->mck); atmci_probe() 2420 host->mapbase = regs->start; atmci_probe() 2422 tasklet_init(&host->tasklet, atmci_tasklet_func, (unsigned long)host); atmci_probe() 2424 ret = request_irq(irq, atmci_interrupt, 0, dev_name(&pdev->dev), host); atmci_probe() 2426 clk_disable_unprepare(host->mck); atmci_probe() 2431 atmci_get_cap(host); atmci_probe() 2432 ret = atmci_configure_dma(host); atmci_probe() 2436 host->prepare_data = &atmci_prepare_data_dma; atmci_probe() 2437 host->submit_data = &atmci_submit_data_dma; atmci_probe() 2438 host->stop_transfer = &atmci_stop_transfer_dma; atmci_probe() 2439 } else if (host->caps.has_pdc) { atmci_probe() 2441 host->prepare_data = &atmci_prepare_data_pdc; atmci_probe() 2442 host->submit_data = &atmci_submit_data_pdc; atmci_probe() 2443 host->stop_transfer = &atmci_stop_transfer_pdc; atmci_probe() 2446 host->prepare_data = &atmci_prepare_data; atmci_probe() 2447 host->submit_data = &atmci_submit_data; atmci_probe() 2448 host->stop_transfer = &atmci_stop_transfer; atmci_probe() 2451 platform_set_drvdata(pdev, host); atmci_probe() 2453 setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host); atmci_probe() 2465 ret = atmci_init_slot(host, &pdata->slot[0], atmci_probe() 2469 host->buf_size = host->slot[0]->mmc->max_req_size; atmci_probe() 2473 ret = atmci_init_slot(host, &pdata->slot[1], atmci_probe() 2477 if (host->slot[1]->mmc->max_req_size > host->buf_size) atmci_probe() 2478 host->buf_size = atmci_probe() 2479 host->slot[1]->mmc->max_req_size; atmci_probe() 2488 if (!host->caps.has_rwproof) { atmci_probe() 2489 host->buffer = dma_alloc_coherent(&pdev->dev, host->buf_size, atmci_probe() 2490 &host->buf_phys_addr, atmci_probe() 2492 if (!host->buffer) { atmci_probe() 2501 host->mapbase, irq, nr_slots); atmci_probe() 2503 pm_runtime_mark_last_busy(&host->pdev->dev); atmci_probe() 2510 if (host->slot[i]) atmci_probe() 2511 atmci_cleanup_slot(host->slot[i], i); atmci_probe() 2514 clk_disable_unprepare(host->mck); atmci_probe() 2519 del_timer_sync(&host->timer); atmci_probe() 2520 if (!IS_ERR(host->dma.chan)) atmci_probe() 2521 dma_release_channel(host->dma.chan); atmci_probe() 2523 free_irq(irq, host); atmci_probe() 2529 struct atmel_mci *host = platform_get_drvdata(pdev); atmci_remove() local 2534 if (host->buffer) atmci_remove() 2535 dma_free_coherent(&pdev->dev, host->buf_size, atmci_remove() 2536 host->buffer, host->buf_phys_addr); atmci_remove() 2539 if (host->slot[i]) atmci_remove() 2540 atmci_cleanup_slot(host->slot[i], i); atmci_remove() 2543 atmci_writel(host, ATMCI_IDR, ~0UL); atmci_remove() 2544 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS); atmci_remove() 2545 atmci_readl(host, ATMCI_SR); atmci_remove() 2547 del_timer_sync(&host->timer); atmci_remove() 2548 if (!IS_ERR(host->dma.chan)) atmci_remove() 2549 dma_release_channel(host->dma.chan); atmci_remove() 2551 free_irq(platform_get_irq(pdev, 0), host); atmci_remove() 2553 clk_disable_unprepare(host->mck); atmci_remove() 2564 struct atmel_mci *host = dev_get_drvdata(dev); atmci_runtime_suspend() local 2566 clk_disable_unprepare(host->mck); atmci_runtime_suspend() 2575 struct atmel_mci *host = dev_get_drvdata(dev); atmci_runtime_resume() local 2579 return clk_prepare_enable(host->mck); atmci_runtime_resume()
|
H A D | rtsx_pci_sdmmc.c | 28 #include <linux/mmc/host.h> 63 static inline struct device *sdmmc_dev(struct realtek_pci_sdmmc *host) sdmmc_dev() argument 65 return &(host->pdev->dev); sdmmc_dev() 68 static inline void sd_clear_error(struct realtek_pci_sdmmc *host) sd_clear_error() argument 70 rtsx_pci_write_register(host->pcr, CARD_STOP, sd_clear_error() 75 static void dump_reg_range(struct realtek_pci_sdmmc *host, u16 start, u16 end) dump_reg_range() argument 87 rtsx_pci_read_register(host->pcr, start + i + j, dump_reg_range() 89 dev_dbg(sdmmc_dev(host), "0x%04X(%d): %8ph\n", dump_reg_range() 94 static void sd_print_debug_regs(struct realtek_pci_sdmmc *host) sd_print_debug_regs() argument 96 dump_reg_range(host, 0xFDA0, 0xFDB3); sd_print_debug_regs() 97 dump_reg_range(host, 0xFD52, 0xFD69); sd_print_debug_regs() 100 #define sd_print_debug_regs(host) 103 static inline int sd_get_cd_int(struct realtek_pci_sdmmc *host) sd_get_cd_int() argument 105 return rtsx_pci_readl(host->pcr, RTSX_BIPR) & SD_EXIST; sd_get_cd_int() 160 static int sd_pre_dma_transfer(struct realtek_pci_sdmmc *host, sd_pre_dma_transfer() argument 163 struct rtsx_pcr *pcr = host->pcr; sd_pre_dma_transfer() 168 if (!pre && data->host_cookie && data->host_cookie != host->cookie) { sd_pre_dma_transfer() 169 dev_err(sdmmc_dev(host), sd_pre_dma_transfer() 170 "error: data->host_cookie = %d, host->cookie = %d\n", sd_pre_dma_transfer() 171 data->host_cookie, host->cookie); sd_pre_dma_transfer() 175 if (pre || data->host_cookie != host->cookie) { sd_pre_dma_transfer() 178 count = host->cookie_sg_count; sd_pre_dma_transfer() 183 host->cookie_sg_count = count; sd_pre_dma_transfer() 184 if (++host->cookie < 0) sd_pre_dma_transfer() 185 host->cookie = 1; sd_pre_dma_transfer() 186 data->host_cookie = host->cookie; sd_pre_dma_transfer() 188 host->sg_count = count; sd_pre_dma_transfer() 197 struct realtek_pci_sdmmc *host = mmc_priv(mmc); sdmmc_pre_req() local 201 dev_err(sdmmc_dev(host), sdmmc_pre_req() 207 sd_pre_dma_transfer(host, data, true); sdmmc_pre_req() 208 dev_dbg(sdmmc_dev(host), "pre dma sg: %d\n", host->cookie_sg_count); sdmmc_pre_req() 214 struct realtek_pci_sdmmc *host = mmc_priv(mmc); sdmmc_post_req() local 215 struct rtsx_pcr *pcr = host->pcr; sdmmc_post_req() 223 static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host, sd_send_cmd_get_rsp() argument 226 struct rtsx_pcr *pcr = host->pcr; sd_send_cmd_get_rsp() 237 dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n", sd_send_cmd_get_rsp() 283 sd_print_debug_regs(host); sd_send_cmd_get_rsp() 284 sd_clear_error(host); sd_send_cmd_get_rsp() 285 dev_dbg(sdmmc_dev(host), sd_send_cmd_get_rsp() 301 dev_dbg(sdmmc_dev(host), "Invalid response bit\n"); sd_send_cmd_get_rsp() 309 dev_dbg(sdmmc_dev(host), "CRC7 error\n"); sd_send_cmd_get_rsp() 324 dev_dbg(sdmmc_dev(host), "cmd->resp[%d] = 0x%08x\n", sd_send_cmd_get_rsp() 329 dev_dbg(sdmmc_dev(host), "cmd->resp[0] = 0x%08x\n", sd_send_cmd_get_rsp() 341 static int sd_read_data(struct realtek_pci_sdmmc *host, struct mmc_command *cmd, sd_read_data() argument 344 struct rtsx_pcr *pcr = host->pcr; sd_read_data() 348 dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n", sd_read_data() 376 sd_print_debug_regs(host); sd_read_data() 377 dev_dbg(sdmmc_dev(host), sd_read_data() 385 dev_dbg(sdmmc_dev(host), sd_read_data() 394 static int sd_write_data(struct realtek_pci_sdmmc *host, sd_write_data() argument 398 struct rtsx_pcr *pcr = host->pcr; sd_write_data() 401 dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n", sd_write_data() 407 sd_send_cmd_get_rsp(host, cmd); sd_write_data() 414 dev_dbg(sdmmc_dev(host), sd_write_data() 432 sd_print_debug_regs(host); sd_write_data() 433 dev_dbg(sdmmc_dev(host), sd_write_data() 441 static int sd_read_long_data(struct realtek_pci_sdmmc *host, sd_read_long_data() argument 444 struct rtsx_pcr *pcr = host->pcr; sd_read_long_data() 445 struct mmc_host *mmc = host->mmc; sd_read_long_data() 455 dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n", sd_read_long_data() 489 err = rtsx_pci_dma_transfer(pcr, data->sg, host->sg_count, 1, 10000); sd_read_long_data() 491 sd_print_debug_regs(host); sd_read_long_data() 492 sd_clear_error(host); sd_read_long_data() 499 static int sd_write_long_data(struct realtek_pci_sdmmc *host, sd_write_long_data() argument 502 struct rtsx_pcr *pcr = host->pcr; sd_write_long_data() 503 struct mmc_host *mmc = host->mmc; sd_write_long_data() 512 sd_send_cmd_get_rsp(host, cmd); sd_write_long_data() 516 dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n", sd_write_long_data() 547 err = rtsx_pci_dma_transfer(pcr, data->sg, host->sg_count, 0, 10000); sd_write_long_data() 549 sd_clear_error(host); sd_write_long_data() 556 static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq) sd_rw_multi() argument 560 if (host->sg_count < 0) { sd_rw_multi() 561 data->error = host->sg_count; sd_rw_multi() 562 dev_dbg(sdmmc_dev(host), "%s: sg_count = %d is invalid\n", sd_rw_multi() 563 __func__, host->sg_count); sd_rw_multi() 568 return sd_read_long_data(host, mrq); sd_rw_multi() 570 return sd_write_long_data(host, mrq); sd_rw_multi() 573 static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host) sd_enable_initial_mode() argument 575 rtsx_pci_write_register(host->pcr, SD_CFG1, sd_enable_initial_mode() 579 static inline void sd_disable_initial_mode(struct realtek_pci_sdmmc *host) sd_disable_initial_mode() argument 581 rtsx_pci_write_register(host->pcr, SD_CFG1, sd_disable_initial_mode() 585 static void sd_normal_rw(struct realtek_pci_sdmmc *host, sd_normal_rw() argument 599 if (host->initial_mode) sd_normal_rw() 600 sd_disable_initial_mode(host); sd_normal_rw() 602 cmd->error = sd_read_data(host, cmd, (u16)data->blksz, buf, sd_normal_rw() 605 if (host->initial_mode) sd_normal_rw() 606 sd_enable_initial_mode(host); sd_normal_rw() 612 cmd->error = sd_write_data(host, cmd, (u16)data->blksz, buf, sd_normal_rw() 619 static int sd_change_phase(struct realtek_pci_sdmmc *host, sd_change_phase() argument 622 struct rtsx_pcr *pcr = host->pcr; sd_change_phase() 625 dev_dbg(sdmmc_dev(host), "%s(%s): sample_point = %d\n", sd_change_phase() 667 static u8 sd_search_final_phase(struct realtek_pci_sdmmc *host, u32 phase_map) sd_search_final_phase() argument 674 dev_err(sdmmc_dev(host), "phase error: [map:%x]\n", phase_map); sd_search_final_phase() 688 dev_dbg(sdmmc_dev(host), "phase: [map:%x] [maxlen:%d] [final:%d]\n", sd_search_final_phase() 694 static void sd_wait_data_idle(struct realtek_pci_sdmmc *host) sd_wait_data_idle() argument 700 err = rtsx_pci_read_register(host->pcr, SD_DATA_STATE, &val); sd_wait_data_idle() 708 static int sd_tuning_rx_cmd(struct realtek_pci_sdmmc *host, sd_tuning_rx_cmd() argument 714 err = sd_change_phase(host, sample_point, true); sd_tuning_rx_cmd() 719 err = sd_read_data(host, &cmd, 0x40, NULL, 0, 100); sd_tuning_rx_cmd() 722 sd_wait_data_idle(host); sd_tuning_rx_cmd() 723 sd_clear_error(host); sd_tuning_rx_cmd() 730 static int sd_tuning_phase(struct realtek_pci_sdmmc *host, sd_tuning_phase() argument 737 err = sd_tuning_rx_cmd(host, opcode, (u8)i); sd_tuning_phase() 748 static int sd_tuning_rx(struct realtek_pci_sdmmc *host, u8 opcode) sd_tuning_rx() argument 755 err = sd_tuning_phase(host, opcode, &(raw_phase_map[i])); sd_tuning_rx() 765 dev_dbg(sdmmc_dev(host), "RX raw_phase_map[%d] = 0x%08x\n", sd_tuning_rx() 769 dev_dbg(sdmmc_dev(host), "RX phase_map = 0x%08x\n", phase_map); sd_tuning_rx() 772 final_phase = sd_search_final_phase(host, phase_map); sd_tuning_rx() 776 err = sd_change_phase(host, final_phase, true); sd_tuning_rx() 801 struct realtek_pci_sdmmc *host = container_of(work, sd_request() local 803 struct rtsx_pcr *pcr = host->pcr; sd_request() 805 struct mmc_host *mmc = host->mmc; sd_request() 806 struct mmc_request *mrq = host->mrq; sd_request() 813 if (host->eject || !sd_get_cd_int(host)) { sd_request() 818 err = rtsx_pci_card_exclusive_check(host->pcr, RTSX_SD_CARD); sd_request() 828 rtsx_pci_switch_clock(pcr, host->clock, host->ssc_depth, sd_request() 829 host->initial_mode, host->double_clk, host->vpclk); sd_request() 834 mutex_lock(&host->host_mutex); sd_request() 835 host->mrq = mrq; sd_request() 836 mutex_unlock(&host->host_mutex); sd_request() 842 sd_send_cmd_get_rsp(host, cmd); sd_request() 844 cmd->error = sd_rw_multi(host, mrq); sd_request() 845 if (!host->using_cookie) sd_request() 846 sdmmc_post_req(host->mmc, host->mrq, 0); sd_request() 849 sd_send_cmd_get_rsp(host, mrq->stop); sd_request() 851 sd_normal_rw(host, mrq); sd_request() 865 dev_dbg(sdmmc_dev(host), "CMD %d 0x%08x error(%d)\n", sd_request() 869 mutex_lock(&host->host_mutex); sd_request() 870 host->mrq = NULL; sd_request() 871 mutex_unlock(&host->host_mutex); sd_request() 878 struct realtek_pci_sdmmc *host = mmc_priv(mmc); sdmmc_request() local 881 mutex_lock(&host->host_mutex); sdmmc_request() 882 host->mrq = mrq; sdmmc_request() 883 mutex_unlock(&host->host_mutex); sdmmc_request() 886 host->using_cookie = sd_pre_dma_transfer(host, data, false); sdmmc_request() 888 queue_work(host->workq, &host->work); sdmmc_request() 891 static int sd_set_bus_width(struct realtek_pci_sdmmc *host, sd_set_bus_width() argument 902 err = rtsx_pci_write_register(host->pcr, SD_CFG1, sd_set_bus_width() 908 static int sd_power_on(struct realtek_pci_sdmmc *host) sd_power_on() argument 910 struct rtsx_pcr *pcr = host->pcr; sd_power_on() 913 if (host->power_state == SDMMC_POWER_ON) sd_power_on() 938 host->power_state = SDMMC_POWER_ON; sd_power_on() 942 static int sd_power_off(struct realtek_pci_sdmmc *host) sd_power_off() argument 944 struct rtsx_pcr *pcr = host->pcr; sd_power_off() 947 host->power_state = SDMMC_POWER_OFF; sd_power_off() 965 static int sd_set_power_mode(struct realtek_pci_sdmmc *host, sd_set_power_mode() argument 971 err = sd_power_off(host); sd_set_power_mode() 973 err = sd_power_on(host); sd_set_power_mode() 978 static int sd_set_timing(struct realtek_pci_sdmmc *host, unsigned char timing) sd_set_timing() argument 980 struct rtsx_pcr *pcr = host->pcr; sd_set_timing() 1052 struct realtek_pci_sdmmc *host = mmc_priv(mmc); sdmmc_set_ios() local 1053 struct rtsx_pcr *pcr = host->pcr; sdmmc_set_ios() 1055 if (host->eject) sdmmc_set_ios() 1058 if (rtsx_pci_card_exclusive_check(host->pcr, RTSX_SD_CARD)) sdmmc_set_ios() 1065 sd_set_bus_width(host, ios->bus_width); sdmmc_set_ios() 1066 sd_set_power_mode(host, ios->power_mode); sdmmc_set_ios() 1067 sd_set_timing(host, ios->timing); sdmmc_set_ios() 1069 host->vpclk = false; sdmmc_set_ios() 1070 host->double_clk = true; sdmmc_set_ios() 1075 host->ssc_depth = RTSX_SSC_DEPTH_2M; sdmmc_set_ios() 1076 host->vpclk = true; sdmmc_set_ios() 1077 host->double_clk = false; sdmmc_set_ios() 1082 host->ssc_depth = RTSX_SSC_DEPTH_1M; sdmmc_set_ios() 1085 host->ssc_depth = RTSX_SSC_DEPTH_500K; sdmmc_set_ios() 1089 host->initial_mode = (ios->clock <= 1000000) ? true : false; sdmmc_set_ios() 1091 host->clock = ios->clock; sdmmc_set_ios() 1092 rtsx_pci_switch_clock(pcr, ios->clock, host->ssc_depth, sdmmc_set_ios() 1093 host->initial_mode, host->double_clk, host->vpclk); sdmmc_set_ios() 1100 struct realtek_pci_sdmmc *host = mmc_priv(mmc); sdmmc_get_ro() local 1101 struct rtsx_pcr *pcr = host->pcr; sdmmc_get_ro() 1105 if (host->eject) sdmmc_get_ro() 1114 dev_dbg(sdmmc_dev(host), "%s: RTSX_BIPR = 0x%08x\n", __func__, val); sdmmc_get_ro() 1125 struct realtek_pci_sdmmc *host = mmc_priv(mmc); sdmmc_get_cd() local 1126 struct rtsx_pcr *pcr = host->pcr; sdmmc_get_cd() 1130 if (host->eject) sdmmc_get_cd() 1139 dev_dbg(sdmmc_dev(host), "%s: RTSX_BIPR = 0x%08x\n", __func__, val); sdmmc_get_cd() 1148 static int sd_wait_voltage_stable_1(struct realtek_pci_sdmmc *host) sd_wait_voltage_stable_1() argument 1150 struct rtsx_pcr *pcr = host->pcr; sd_wait_voltage_stable_1() 1181 static int sd_wait_voltage_stable_2(struct realtek_pci_sdmmc *host) sd_wait_voltage_stable_2() argument 1183 struct rtsx_pcr *pcr = host->pcr; sd_wait_voltage_stable_2() 1200 /* SD_CMD, SD_DAT[3:0] should be pulled high by host */ sd_wait_voltage_stable_2() 1210 dev_dbg(sdmmc_dev(host), sd_wait_voltage_stable_2() 1223 struct realtek_pci_sdmmc *host = mmc_priv(mmc); sdmmc_switch_voltage() local 1224 struct rtsx_pcr *pcr = host->pcr; sdmmc_switch_voltage() 1228 dev_dbg(sdmmc_dev(host), "%s: signal_voltage = %d\n", sdmmc_switch_voltage() 1231 if (host->eject) sdmmc_switch_voltage() 1234 err = rtsx_pci_card_exclusive_check(host->pcr, RTSX_SD_CARD); sdmmc_switch_voltage() 1248 err = sd_wait_voltage_stable_1(host); sdmmc_switch_voltage() 1258 err = sd_wait_voltage_stable_2(host); sdmmc_switch_voltage() 1275 struct realtek_pci_sdmmc *host = mmc_priv(mmc); sdmmc_execute_tuning() local 1276 struct rtsx_pcr *pcr = host->pcr; sdmmc_execute_tuning() 1279 if (host->eject) sdmmc_execute_tuning() 1282 err = rtsx_pci_card_exclusive_check(host->pcr, RTSX_SD_CARD); sdmmc_execute_tuning() 1293 err = sd_change_phase(host, SDR104_TX_PHASE(pcr), false); sdmmc_execute_tuning() 1297 err = sd_change_phase(host, SDR50_TX_PHASE(pcr), false); sdmmc_execute_tuning() 1301 err = sd_change_phase(host, DDR50_TX_PHASE(pcr), false); sdmmc_execute_tuning() 1314 err = sd_tuning_rx(host, opcode); sdmmc_execute_tuning() 1316 err = sd_change_phase(host, DDR50_RX_PHASE(pcr), true); sdmmc_execute_tuning() 1335 static void init_extra_caps(struct realtek_pci_sdmmc *host) init_extra_caps() argument 1337 struct mmc_host *mmc = host->mmc; init_extra_caps() 1338 struct rtsx_pcr *pcr = host->pcr; init_extra_caps() 1340 dev_dbg(sdmmc_dev(host), "pcr->extra_caps = 0x%x\n", pcr->extra_caps); init_extra_caps() 1354 static void realtek_init_host(struct realtek_pci_sdmmc *host) realtek_init_host() argument 1356 struct mmc_host *mmc = host->mmc; realtek_init_host() 1369 init_extra_caps(host); realtek_init_host() 1380 struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev); rtsx_pci_sdmmc_card_event() local 1382 host->cookie = -1; rtsx_pci_sdmmc_card_event() 1383 mmc_detect_change(host->mmc, 0); rtsx_pci_sdmmc_card_event() 1389 struct realtek_pci_sdmmc *host; rtsx_pci_sdmmc_drv_probe() local 1402 mmc = mmc_alloc_host(sizeof(*host), &pdev->dev); rtsx_pci_sdmmc_drv_probe() 1406 host = mmc_priv(mmc); rtsx_pci_sdmmc_drv_probe() 1407 host->workq = create_singlethread_workqueue(SDMMC_WORKQ_NAME); rtsx_pci_sdmmc_drv_probe() 1408 if (!host->workq) { rtsx_pci_sdmmc_drv_probe() 1412 host->pcr = pcr; rtsx_pci_sdmmc_drv_probe() 1413 host->mmc = mmc; rtsx_pci_sdmmc_drv_probe() 1414 host->pdev = pdev; rtsx_pci_sdmmc_drv_probe() 1415 host->cookie = -1; rtsx_pci_sdmmc_drv_probe() 1416 host->power_state = SDMMC_POWER_OFF; rtsx_pci_sdmmc_drv_probe() 1417 INIT_WORK(&host->work, sd_request); rtsx_pci_sdmmc_drv_probe() 1418 platform_set_drvdata(pdev, host); rtsx_pci_sdmmc_drv_probe() 1422 mutex_init(&host->host_mutex); rtsx_pci_sdmmc_drv_probe() 1424 realtek_init_host(host); rtsx_pci_sdmmc_drv_probe() 1433 struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev); rtsx_pci_sdmmc_drv_remove() local 1437 if (!host) rtsx_pci_sdmmc_drv_remove() 1440 pcr = host->pcr; rtsx_pci_sdmmc_drv_remove() 1443 mmc = host->mmc; rtsx_pci_sdmmc_drv_remove() 1445 cancel_work_sync(&host->work); rtsx_pci_sdmmc_drv_remove() 1447 mutex_lock(&host->host_mutex); rtsx_pci_sdmmc_drv_remove() 1448 if (host->mrq) { rtsx_pci_sdmmc_drv_remove() 1455 host->mrq->cmd->error = -ENOMEDIUM; rtsx_pci_sdmmc_drv_remove() 1456 if (host->mrq->stop) rtsx_pci_sdmmc_drv_remove() 1457 host->mrq->stop->error = -ENOMEDIUM; rtsx_pci_sdmmc_drv_remove() 1458 mmc_request_done(mmc, host->mrq); rtsx_pci_sdmmc_drv_remove() 1460 mutex_unlock(&host->host_mutex); rtsx_pci_sdmmc_drv_remove() 1463 host->eject = true; rtsx_pci_sdmmc_drv_remove() 1465 flush_workqueue(host->workq); rtsx_pci_sdmmc_drv_remove() 1466 destroy_workqueue(host->workq); rtsx_pci_sdmmc_drv_remove() 1467 host->workq = NULL; rtsx_pci_sdmmc_drv_remove()
|
H A D | sunxi-mmc.c | 2 * Driver for sunxi SD/MMC host controllers 36 #include <linux/mmc/host.h> 74 #define mmc_readl(host, reg) \ 75 readl((host)->reg_base + SDXC_##reg) 76 #define mmc_writel(host, reg, value) \ 77 writel((value), (host)->reg_base + SDXC_##reg) 211 #define SDXC_IDMAC_DES0_OWN BIT(31) /* 1-idma owns it, 0-host owns it */ 250 static int sunxi_mmc_reset_host(struct sunxi_mmc_host *host) sunxi_mmc_reset_host() argument 255 mmc_writel(host, REG_GCTRL, SDXC_HARDWARE_RESET); sunxi_mmc_reset_host() 257 rval = mmc_readl(host, REG_GCTRL); sunxi_mmc_reset_host() 261 dev_err(mmc_dev(host->mmc), "fatal err reset timeout\n"); sunxi_mmc_reset_host() 271 struct sunxi_mmc_host *host = mmc_priv(mmc); sunxi_mmc_init_host() local 273 if (sunxi_mmc_reset_host(host)) sunxi_mmc_init_host() 276 mmc_writel(host, REG_FTRGL, 0x20070008); sunxi_mmc_init_host() 277 mmc_writel(host, REG_TMOUT, 0xffffffff); sunxi_mmc_init_host() 278 mmc_writel(host, REG_IMASK, host->sdio_imask); sunxi_mmc_init_host() 279 mmc_writel(host, REG_RINTR, 0xffffffff); sunxi_mmc_init_host() 280 mmc_writel(host, REG_DBGC, 0xdeb); sunxi_mmc_init_host() 281 mmc_writel(host, REG_FUNS, SDXC_CEATA_ON); sunxi_mmc_init_host() 282 mmc_writel(host, REG_DLBA, host->sg_dma); sunxi_mmc_init_host() 284 rval = mmc_readl(host, REG_GCTRL); sunxi_mmc_init_host() 287 mmc_writel(host, REG_GCTRL, rval); sunxi_mmc_init_host() 292 static void sunxi_mmc_init_idma_des(struct sunxi_mmc_host *host, sunxi_mmc_init_idma_des() argument 295 struct sunxi_idma_des *pdes = (struct sunxi_idma_des *)host->sg_cpu; sunxi_mmc_init_idma_des() 296 dma_addr_t next_desc = host->sg_dma; sunxi_mmc_init_idma_des() 297 int i, max_len = (1 << host->idma_des_size_bits); sunxi_mmc_init_idma_des() 333 static int sunxi_mmc_map_dma(struct sunxi_mmc_host *host, sunxi_mmc_map_dma() argument 339 dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, sunxi_mmc_map_dma() 342 dev_err(mmc_dev(host->mmc), "dma_map_sg failed\n"); sunxi_mmc_map_dma() 348 dev_err(mmc_dev(host->mmc), sunxi_mmc_map_dma() 358 static void sunxi_mmc_start_dma(struct sunxi_mmc_host *host, sunxi_mmc_start_dma() argument 363 sunxi_mmc_init_idma_des(host, data); sunxi_mmc_start_dma() 365 rval = mmc_readl(host, REG_GCTRL); sunxi_mmc_start_dma() 367 mmc_writel(host, REG_GCTRL, rval); sunxi_mmc_start_dma() 369 mmc_writel(host, REG_GCTRL, rval); sunxi_mmc_start_dma() 371 mmc_writel(host, REG_DMAC, SDXC_IDMAC_SOFT_RESET); sunxi_mmc_start_dma() 374 mmc_writel(host, REG_IDIE, SDXC_IDMAC_RECEIVE_INTERRUPT); sunxi_mmc_start_dma() 376 mmc_writel(host, REG_DMAC, sunxi_mmc_start_dma() 380 static void sunxi_mmc_send_manual_stop(struct sunxi_mmc_host *host, sunxi_mmc_send_manual_stop() argument 398 mmc_writel(host, REG_CARG, arg); sunxi_mmc_send_manual_stop() 399 mmc_writel(host, REG_CMDR, cmd_val); sunxi_mmc_send_manual_stop() 402 ri = mmc_readl(host, REG_RINTR); sunxi_mmc_send_manual_stop() 407 dev_err(mmc_dev(host->mmc), "send stop command failed\n"); sunxi_mmc_send_manual_stop() 412 req->stop->resp[0] = mmc_readl(host, REG_RESP0); sunxi_mmc_send_manual_stop() 415 mmc_writel(host, REG_RINTR, 0xffff); sunxi_mmc_send_manual_stop() 418 static void sunxi_mmc_dump_errinfo(struct sunxi_mmc_host *host) sunxi_mmc_dump_errinfo() argument 420 struct mmc_command *cmd = host->mrq->cmd; sunxi_mmc_dump_errinfo() 421 struct mmc_data *data = host->mrq->data; sunxi_mmc_dump_errinfo() 424 if ((host->int_sum & SDXC_INTERRUPT_ERROR_BIT) == sunxi_mmc_dump_errinfo() 429 dev_err(mmc_dev(host->mmc), sunxi_mmc_dump_errinfo() 431 host->mmc->index, cmd->opcode, sunxi_mmc_dump_errinfo() 433 host->int_sum & SDXC_RESP_ERROR ? " RE" : "", sunxi_mmc_dump_errinfo() 434 host->int_sum & SDXC_RESP_CRC_ERROR ? " RCE" : "", sunxi_mmc_dump_errinfo() 435 host->int_sum & SDXC_DATA_CRC_ERROR ? " DCE" : "", sunxi_mmc_dump_errinfo() 436 host->int_sum & SDXC_RESP_TIMEOUT ? " RTO" : "", sunxi_mmc_dump_errinfo() 437 host->int_sum & SDXC_DATA_TIMEOUT ? " DTO" : "", sunxi_mmc_dump_errinfo() 438 host->int_sum & SDXC_FIFO_RUN_ERROR ? " FE" : "", sunxi_mmc_dump_errinfo() 439 host->int_sum & SDXC_HARD_WARE_LOCKED ? " HL" : "", sunxi_mmc_dump_errinfo() 440 host->int_sum & SDXC_START_BIT_ERROR ? " SBE" : "", sunxi_mmc_dump_errinfo() 441 host->int_sum & SDXC_END_BIT_ERROR ? " EBE" : "" sunxi_mmc_dump_errinfo() 446 static irqreturn_t sunxi_mmc_finalize_request(struct sunxi_mmc_host *host) sunxi_mmc_finalize_request() argument 448 struct mmc_request *mrq = host->mrq; sunxi_mmc_finalize_request() 452 mmc_writel(host, REG_IMASK, host->sdio_imask); sunxi_mmc_finalize_request() 453 mmc_writel(host, REG_IDIE, 0); sunxi_mmc_finalize_request() 455 if (host->int_sum & SDXC_INTERRUPT_ERROR_BIT) { sunxi_mmc_finalize_request() 456 sunxi_mmc_dump_errinfo(host); sunxi_mmc_finalize_request() 461 host->manual_stop_mrq = mrq; sunxi_mmc_finalize_request() 468 mrq->cmd->resp[0] = mmc_readl(host, REG_RESP3); sunxi_mmc_finalize_request() 469 mrq->cmd->resp[1] = mmc_readl(host, REG_RESP2); sunxi_mmc_finalize_request() 470 mrq->cmd->resp[2] = mmc_readl(host, REG_RESP1); sunxi_mmc_finalize_request() 471 mrq->cmd->resp[3] = mmc_readl(host, REG_RESP0); sunxi_mmc_finalize_request() 473 mrq->cmd->resp[0] = mmc_readl(host, REG_RESP0); sunxi_mmc_finalize_request() 481 mmc_writel(host, REG_IDST, 0x337); sunxi_mmc_finalize_request() 482 mmc_writel(host, REG_DMAC, 0); sunxi_mmc_finalize_request() 483 rval = mmc_readl(host, REG_GCTRL); sunxi_mmc_finalize_request() 485 mmc_writel(host, REG_GCTRL, rval); sunxi_mmc_finalize_request() 487 mmc_writel(host, REG_GCTRL, rval); sunxi_mmc_finalize_request() 489 mmc_writel(host, REG_GCTRL, rval); sunxi_mmc_finalize_request() 490 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, sunxi_mmc_finalize_request() 494 mmc_writel(host, REG_RINTR, 0xffff); sunxi_mmc_finalize_request() 496 host->mrq = NULL; sunxi_mmc_finalize_request() 497 host->int_sum = 0; sunxi_mmc_finalize_request() 498 host->wait_dma = false; sunxi_mmc_finalize_request() 500 return host->manual_stop_mrq ? IRQ_WAKE_THREAD : IRQ_HANDLED; sunxi_mmc_finalize_request() 505 struct sunxi_mmc_host *host = dev_id; sunxi_mmc_irq() local 512 spin_lock(&host->lock); sunxi_mmc_irq() 514 idma_int = mmc_readl(host, REG_IDST); sunxi_mmc_irq() 515 msk_int = mmc_readl(host, REG_MISTA); sunxi_mmc_irq() 517 dev_dbg(mmc_dev(host->mmc), "irq: rq %p mi %08x idi %08x\n", sunxi_mmc_irq() 518 host->mrq, msk_int, idma_int); sunxi_mmc_irq() 520 mrq = host->mrq; sunxi_mmc_irq() 523 host->wait_dma = false; sunxi_mmc_irq() 525 host->int_sum |= msk_int; sunxi_mmc_irq() 528 if ((host->int_sum & SDXC_RESP_TIMEOUT) && sunxi_mmc_irq() 529 !(host->int_sum & SDXC_COMMAND_DONE)) sunxi_mmc_irq() 530 mmc_writel(host, REG_IMASK, sunxi_mmc_irq() 531 host->sdio_imask | SDXC_COMMAND_DONE); sunxi_mmc_irq() 533 else if (host->int_sum & SDXC_INTERRUPT_ERROR_BIT) sunxi_mmc_irq() 535 else if ((host->int_sum & SDXC_INTERRUPT_DONE_BIT) && sunxi_mmc_irq() 536 !host->wait_dma) sunxi_mmc_irq() 543 mmc_writel(host, REG_RINTR, msk_int); sunxi_mmc_irq() 544 mmc_writel(host, REG_IDST, idma_int); sunxi_mmc_irq() 547 ret = sunxi_mmc_finalize_request(host); sunxi_mmc_irq() 549 spin_unlock(&host->lock); sunxi_mmc_irq() 552 mmc_request_done(host->mmc, mrq); sunxi_mmc_irq() 555 mmc_signal_sdio_irq(host->mmc); sunxi_mmc_irq() 562 struct sunxi_mmc_host *host = dev_id; sunxi_mmc_handle_manual_stop() local 566 spin_lock_irqsave(&host->lock, iflags); sunxi_mmc_handle_manual_stop() 567 mrq = host->manual_stop_mrq; sunxi_mmc_handle_manual_stop() 568 spin_unlock_irqrestore(&host->lock, iflags); sunxi_mmc_handle_manual_stop() 571 dev_err(mmc_dev(host->mmc), "no request for manual stop\n"); sunxi_mmc_handle_manual_stop() 575 dev_err(mmc_dev(host->mmc), "data error, sending stop command\n"); sunxi_mmc_handle_manual_stop() 580 * we've cleared host->manual_stop_mrq so we do not need to sunxi_mmc_handle_manual_stop() 585 sunxi_mmc_send_manual_stop(host, mrq); sunxi_mmc_handle_manual_stop() 587 spin_lock_irqsave(&host->lock, iflags); sunxi_mmc_handle_manual_stop() 588 host->manual_stop_mrq = NULL; sunxi_mmc_handle_manual_stop() 589 spin_unlock_irqrestore(&host->lock, iflags); sunxi_mmc_handle_manual_stop() 591 mmc_request_done(host->mmc, mrq); sunxi_mmc_handle_manual_stop() 596 static int sunxi_mmc_oclk_onoff(struct sunxi_mmc_host *host, u32 oclk_en) sunxi_mmc_oclk_onoff() argument 601 rval = mmc_readl(host, REG_CLKCR); sunxi_mmc_oclk_onoff() 607 mmc_writel(host, REG_CLKCR, rval); sunxi_mmc_oclk_onoff() 610 mmc_writel(host, REG_CMDR, rval); sunxi_mmc_oclk_onoff() 613 rval = mmc_readl(host, REG_CMDR); sunxi_mmc_oclk_onoff() 617 mmc_writel(host, REG_RINTR, sunxi_mmc_oclk_onoff() 618 mmc_readl(host, REG_RINTR) & ~SDXC_SDIO_INTERRUPT); sunxi_mmc_oclk_onoff() 621 dev_err(mmc_dev(host->mmc), "fatal err update clk timeout\n"); sunxi_mmc_oclk_onoff() 628 static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host, sunxi_mmc_clk_set_rate() argument 634 rate = clk_round_rate(host->clk_mmc, ios->clock); sunxi_mmc_clk_set_rate() 635 dev_dbg(mmc_dev(host->mmc), "setting clk to %d, rounded %d\n", sunxi_mmc_clk_set_rate() 639 ret = clk_set_rate(host->clk_mmc, rate); sunxi_mmc_clk_set_rate() 641 dev_err(mmc_dev(host->mmc), "error setting clk to %d: %d\n", sunxi_mmc_clk_set_rate() 646 ret = sunxi_mmc_oclk_onoff(host, 0); sunxi_mmc_clk_set_rate() 651 rval = mmc_readl(host, REG_CLKCR); sunxi_mmc_clk_set_rate() 653 mmc_writel(host, REG_CLKCR, rval); sunxi_mmc_clk_set_rate() 680 clk_set_phase(host->clk_sample, sclk_dly); sunxi_mmc_clk_set_rate() 681 clk_set_phase(host->clk_output, oclk_dly); sunxi_mmc_clk_set_rate() 683 return sunxi_mmc_oclk_onoff(host, 1); sunxi_mmc_clk_set_rate() 688 struct sunxi_mmc_host *host = mmc_priv(mmc); sunxi_mmc_set_ios() local 699 host->ferror = sunxi_mmc_init_host(mmc); sunxi_mmc_set_ios() 700 if (host->ferror) sunxi_mmc_set_ios() 708 sunxi_mmc_reset_host(host); sunxi_mmc_set_ios() 716 mmc_writel(host, REG_WIDTH, SDXC_WIDTH1); sunxi_mmc_set_ios() 719 mmc_writel(host, REG_WIDTH, SDXC_WIDTH4); sunxi_mmc_set_ios() 722 mmc_writel(host, REG_WIDTH, SDXC_WIDTH8); sunxi_mmc_set_ios() 727 rval = mmc_readl(host, REG_GCTRL); sunxi_mmc_set_ios() 732 mmc_writel(host, REG_GCTRL, rval); sunxi_mmc_set_ios() 736 host->ferror = sunxi_mmc_clk_set_rate(host, ios); sunxi_mmc_set_ios() 743 struct sunxi_mmc_host *host = mmc_priv(mmc); sunxi_mmc_enable_sdio_irq() local 747 spin_lock_irqsave(&host->lock, flags); sunxi_mmc_enable_sdio_irq() 749 imask = mmc_readl(host, REG_IMASK); sunxi_mmc_enable_sdio_irq() 751 host->sdio_imask = SDXC_SDIO_INTERRUPT; sunxi_mmc_enable_sdio_irq() 754 host->sdio_imask = 0; sunxi_mmc_enable_sdio_irq() 757 mmc_writel(host, REG_IMASK, imask); sunxi_mmc_enable_sdio_irq() 758 spin_unlock_irqrestore(&host->lock, flags); sunxi_mmc_enable_sdio_irq() 763 struct sunxi_mmc_host *host = mmc_priv(mmc); sunxi_mmc_hw_reset() local 764 mmc_writel(host, REG_HWRST, 0); sunxi_mmc_hw_reset() 766 mmc_writel(host, REG_HWRST, 1); sunxi_mmc_hw_reset() 772 struct sunxi_mmc_host *host = mmc_priv(mmc); sunxi_mmc_request() local 778 bool wait_dma = host->wait_dma; sunxi_mmc_request() 782 if (host->ferror) { sunxi_mmc_request() 783 mrq->cmd->error = host->ferror; sunxi_mmc_request() 789 ret = sunxi_mmc_map_dma(host, data); sunxi_mmc_request() 841 spin_lock_irqsave(&host->lock, iflags); sunxi_mmc_request() 843 if (host->mrq || host->manual_stop_mrq) { sunxi_mmc_request() 844 spin_unlock_irqrestore(&host->lock, iflags); sunxi_mmc_request() 857 mmc_writel(host, REG_BLKSZ, data->blksz); sunxi_mmc_request() 858 mmc_writel(host, REG_BCNTR, data->blksz * data->blocks); sunxi_mmc_request() 859 sunxi_mmc_start_dma(host, data); sunxi_mmc_request() 862 host->mrq = mrq; sunxi_mmc_request() 863 host->wait_dma = wait_dma; sunxi_mmc_request() 864 mmc_writel(host, REG_IMASK, host->sdio_imask | imask); sunxi_mmc_request() 865 mmc_writel(host, REG_CARG, cmd->arg); sunxi_mmc_request() 866 mmc_writel(host, REG_CMDR, cmd_val); sunxi_mmc_request() 868 spin_unlock_irqrestore(&host->lock, iflags); sunxi_mmc_request() 887 static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, sunxi_mmc_resource_request() argument 894 host->idma_des_size_bits = 13; sunxi_mmc_resource_request() 896 host->idma_des_size_bits = 16; sunxi_mmc_resource_request() 898 ret = mmc_regulator_get_supply(host->mmc); sunxi_mmc_resource_request() 905 host->reg_base = devm_ioremap_resource(&pdev->dev, sunxi_mmc_resource_request() 907 if (IS_ERR(host->reg_base)) sunxi_mmc_resource_request() 908 return PTR_ERR(host->reg_base); sunxi_mmc_resource_request() 910 host->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); sunxi_mmc_resource_request() 911 if (IS_ERR(host->clk_ahb)) { sunxi_mmc_resource_request() 913 return PTR_ERR(host->clk_ahb); sunxi_mmc_resource_request() 916 host->clk_mmc = devm_clk_get(&pdev->dev, "mmc"); sunxi_mmc_resource_request() 917 if (IS_ERR(host->clk_mmc)) { sunxi_mmc_resource_request() 919 return PTR_ERR(host->clk_mmc); sunxi_mmc_resource_request() 922 host->clk_output = devm_clk_get(&pdev->dev, "output"); sunxi_mmc_resource_request() 923 if (IS_ERR(host->clk_output)) { sunxi_mmc_resource_request() 925 return PTR_ERR(host->clk_output); sunxi_mmc_resource_request() 928 host->clk_sample = devm_clk_get(&pdev->dev, "sample"); sunxi_mmc_resource_request() 929 if (IS_ERR(host->clk_sample)) { sunxi_mmc_resource_request() 931 return PTR_ERR(host->clk_sample); sunxi_mmc_resource_request() 934 host->reset = devm_reset_control_get_optional(&pdev->dev, "ahb"); sunxi_mmc_resource_request() 935 if (PTR_ERR(host->reset) == -EPROBE_DEFER) sunxi_mmc_resource_request() 936 return PTR_ERR(host->reset); sunxi_mmc_resource_request() 938 ret = clk_prepare_enable(host->clk_ahb); sunxi_mmc_resource_request() 944 ret = clk_prepare_enable(host->clk_mmc); sunxi_mmc_resource_request() 950 ret = clk_prepare_enable(host->clk_output); sunxi_mmc_resource_request() 956 ret = clk_prepare_enable(host->clk_sample); sunxi_mmc_resource_request() 962 if (!IS_ERR(host->reset)) { sunxi_mmc_resource_request() 963 ret = reset_control_deassert(host->reset); sunxi_mmc_resource_request() 974 ret = sunxi_mmc_reset_host(host); sunxi_mmc_resource_request() 978 host->irq = platform_get_irq(pdev, 0); sunxi_mmc_resource_request() 979 return devm_request_threaded_irq(&pdev->dev, host->irq, sunxi_mmc_irq, sunxi_mmc_resource_request() 980 sunxi_mmc_handle_manual_stop, 0, "sunxi-mmc", host); sunxi_mmc_resource_request() 983 if (!IS_ERR(host->reset)) sunxi_mmc_resource_request() 984 reset_control_assert(host->reset); sunxi_mmc_resource_request() 986 clk_disable_unprepare(host->clk_sample); sunxi_mmc_resource_request() 988 clk_disable_unprepare(host->clk_output); sunxi_mmc_resource_request() 990 clk_disable_unprepare(host->clk_mmc); sunxi_mmc_resource_request() 992 clk_disable_unprepare(host->clk_ahb); sunxi_mmc_resource_request() 998 struct sunxi_mmc_host *host; sunxi_mmc_probe() local 1004 dev_err(&pdev->dev, "mmc alloc host failed\n"); sunxi_mmc_probe() 1008 host = mmc_priv(mmc); sunxi_mmc_probe() 1009 host->mmc = mmc; sunxi_mmc_probe() 1010 spin_lock_init(&host->lock); sunxi_mmc_probe() 1012 ret = sunxi_mmc_resource_request(host, pdev); sunxi_mmc_probe() 1016 host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, sunxi_mmc_probe() 1017 &host->sg_dma, GFP_KERNEL); sunxi_mmc_probe() 1018 if (!host->sg_cpu) { sunxi_mmc_probe() 1028 mmc->max_seg_size = (1 << host->idma_des_size_bits); sunxi_mmc_probe() 1044 dev_info(&pdev->dev, "base:0x%p irq:%u\n", host->reg_base, host->irq); sunxi_mmc_probe() 1049 dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); sunxi_mmc_probe() 1058 struct sunxi_mmc_host *host = mmc_priv(mmc); sunxi_mmc_remove() local 1061 disable_irq(host->irq); sunxi_mmc_remove() 1062 sunxi_mmc_reset_host(host); sunxi_mmc_remove() 1064 if (!IS_ERR(host->reset)) sunxi_mmc_remove() 1065 reset_control_assert(host->reset); sunxi_mmc_remove() 1067 clk_disable_unprepare(host->clk_mmc); sunxi_mmc_remove() 1068 clk_disable_unprepare(host->clk_ahb); sunxi_mmc_remove() 1070 dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); sunxi_mmc_remove()
|
H A D | jz4740_mmc.c | 16 #include <linux/mmc/host.h> 165 static void jz4740_mmc_release_dma_channels(struct jz4740_mmc_host *host) jz4740_mmc_release_dma_channels() argument 167 if (!host->use_dma) jz4740_mmc_release_dma_channels() 170 dma_release_channel(host->dma_tx); jz4740_mmc_release_dma_channels() 171 dma_release_channel(host->dma_rx); jz4740_mmc_release_dma_channels() 174 static int jz4740_mmc_acquire_dma_channels(struct jz4740_mmc_host *host) jz4740_mmc_acquire_dma_channels() argument 181 host->dma_tx = dma_request_channel(mask, NULL, host); jz4740_mmc_acquire_dma_channels() 182 if (!host->dma_tx) { jz4740_mmc_acquire_dma_channels() 183 dev_err(mmc_dev(host->mmc), "Failed to get dma_tx channel\n"); jz4740_mmc_acquire_dma_channels() 187 host->dma_rx = dma_request_channel(mask, NULL, host); jz4740_mmc_acquire_dma_channels() 188 if (!host->dma_rx) { jz4740_mmc_acquire_dma_channels() 189 dev_err(mmc_dev(host->mmc), "Failed to get dma_rx channel\n"); jz4740_mmc_acquire_dma_channels() 194 host->next_data.cookie = 1; jz4740_mmc_acquire_dma_channels() 199 dma_release_channel(host->dma_tx); jz4740_mmc_acquire_dma_channels() 208 static inline struct dma_chan *jz4740_mmc_get_dma_chan(struct jz4740_mmc_host *host, jz4740_mmc_get_dma_chan() argument 211 return (data->flags & MMC_DATA_READ) ? host->dma_rx : host->dma_tx; jz4740_mmc_get_dma_chan() 214 static void jz4740_mmc_dma_unmap(struct jz4740_mmc_host *host, jz4740_mmc_dma_unmap() argument 217 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data); jz4740_mmc_dma_unmap() 224 static int jz4740_mmc_prepare_dma_data(struct jz4740_mmc_host *host, jz4740_mmc_prepare_dma_data() argument 229 struct jz4740_mmc_host_next *next_data = &host->next_data; jz4740_mmc_prepare_dma_data() 234 data->host_cookie != host->next_data.cookie) { jz4740_mmc_prepare_dma_data() 235 dev_warn(mmc_dev(host->mmc), jz4740_mmc_prepare_dma_data() 236 "[%s] invalid cookie: data->host_cookie %d host->next_data.cookie %d\n", jz4740_mmc_prepare_dma_data() 239 host->next_data.cookie); jz4740_mmc_prepare_dma_data() 244 if (next || data->host_cookie != host->next_data.cookie) { jz4740_mmc_prepare_dma_data() 256 dev_err(mmc_dev(host->mmc), jz4740_mmc_prepare_dma_data() 265 host->sg_len = sg_len; jz4740_mmc_prepare_dma_data() 270 static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host, jz4740_mmc_start_dma_transfer() argument 285 conf.dst_addr = host->mem_res->start + JZ_REG_MMC_TXFIFO; jz4740_mmc_start_dma_transfer() 287 chan = host->dma_tx; jz4740_mmc_start_dma_transfer() 290 conf.src_addr = host->mem_res->start + JZ_REG_MMC_RXFIFO; jz4740_mmc_start_dma_transfer() 292 chan = host->dma_rx; jz4740_mmc_start_dma_transfer() 295 ret = jz4740_mmc_prepare_dma_data(host, data, NULL, chan); jz4740_mmc_start_dma_transfer() 302 host->sg_len, jz4740_mmc_start_dma_transfer() 306 dev_err(mmc_dev(host->mmc), jz4740_mmc_start_dma_transfer() 318 jz4740_mmc_dma_unmap(host, data); jz4740_mmc_start_dma_transfer() 326 struct jz4740_mmc_host *host = mmc_priv(mmc); jz4740_mmc_pre_request() local 328 struct jz4740_mmc_host_next *next_data = &host->next_data; jz4740_mmc_pre_request() 332 if (host->use_dma) { jz4740_mmc_pre_request() 333 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data); jz4740_mmc_pre_request() 335 if (jz4740_mmc_prepare_dma_data(host, data, next_data, chan)) jz4740_mmc_pre_request() 344 struct jz4740_mmc_host *host = mmc_priv(mmc); jz4740_mmc_post_request() local 347 if (host->use_dma && data->host_cookie) { jz4740_mmc_post_request() 348 jz4740_mmc_dma_unmap(host, data); jz4740_mmc_post_request() 353 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data); jz4740_mmc_post_request() 361 static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host, jz4740_mmc_set_irq_enabled() argument 366 spin_lock_irqsave(&host->lock, flags); jz4740_mmc_set_irq_enabled() 368 host->irq_mask &= ~irq; jz4740_mmc_set_irq_enabled() 370 host->irq_mask |= irq; jz4740_mmc_set_irq_enabled() 371 spin_unlock_irqrestore(&host->lock, flags); jz4740_mmc_set_irq_enabled() 373 writew(host->irq_mask, host->base + JZ_REG_MMC_IMASK); jz4740_mmc_set_irq_enabled() 376 static void jz4740_mmc_clock_enable(struct jz4740_mmc_host *host, jz4740_mmc_clock_enable() argument 384 writew(val, host->base + JZ_REG_MMC_STRPCL); jz4740_mmc_clock_enable() 387 static void jz4740_mmc_clock_disable(struct jz4740_mmc_host *host) jz4740_mmc_clock_disable() argument 392 writew(JZ_MMC_STRPCL_CLOCK_STOP, host->base + JZ_REG_MMC_STRPCL); jz4740_mmc_clock_disable() 394 status = readl(host->base + JZ_REG_MMC_STATUS); jz4740_mmc_clock_disable() 398 static void jz4740_mmc_reset(struct jz4740_mmc_host *host) jz4740_mmc_reset() argument 403 writew(JZ_MMC_STRPCL_RESET, host->base + JZ_REG_MMC_STRPCL); jz4740_mmc_reset() 406 status = readl(host->base + JZ_REG_MMC_STATUS); jz4740_mmc_reset() 410 static void jz4740_mmc_request_done(struct jz4740_mmc_host *host) jz4740_mmc_request_done() argument 414 req = host->req; jz4740_mmc_request_done() 415 host->req = NULL; jz4740_mmc_request_done() 417 mmc_request_done(host->mmc, req); jz4740_mmc_request_done() 420 static unsigned int jz4740_mmc_poll_irq(struct jz4740_mmc_host *host, jz4740_mmc_poll_irq() argument 427 status = readw(host->base + JZ_REG_MMC_IREG); jz4740_mmc_poll_irq() 431 set_bit(0, &host->waiting); jz4740_mmc_poll_irq() 432 mod_timer(&host->timeout_timer, jiffies + 5*HZ); jz4740_mmc_poll_irq() 433 jz4740_mmc_set_irq_enabled(host, irq, true); jz4740_mmc_poll_irq() 440 static void jz4740_mmc_transfer_check_state(struct jz4740_mmc_host *host, jz4740_mmc_transfer_check_state() argument 445 status = readl(host->base + JZ_REG_MMC_STATUS); jz4740_mmc_transfer_check_state() 448 host->req->cmd->error = -ETIMEDOUT; jz4740_mmc_transfer_check_state() 451 host->req->cmd->error = -EIO; jz4740_mmc_transfer_check_state() 456 host->req->cmd->error = -ETIMEDOUT; jz4740_mmc_transfer_check_state() 459 host->req->cmd->error = -EIO; jz4740_mmc_transfer_check_state() 465 static bool jz4740_mmc_write_data(struct jz4740_mmc_host *host, jz4740_mmc_write_data() argument 468 struct sg_mapping_iter *miter = &host->miter; jz4740_mmc_write_data() 469 void __iomem *fifo_addr = host->base + JZ_REG_MMC_TXFIFO; jz4740_mmc_write_data() 480 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ); jz4740_mmc_write_data() 496 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ); jz4740_mmc_write_data() 520 static bool jz4740_mmc_read_data(struct jz4740_mmc_host *host, jz4740_mmc_read_data() argument 523 struct sg_mapping_iter *miter = &host->miter; jz4740_mmc_read_data() 524 void __iomem *fifo_addr = host->base + JZ_REG_MMC_RXFIFO; jz4740_mmc_read_data() 537 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ); jz4740_mmc_read_data() 555 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ); jz4740_mmc_read_data() 579 status = readl(host->base + JZ_REG_MMC_STATUS); jz4740_mmc_read_data() 582 status = readl(host->base + JZ_REG_MMC_STATUS); jz4740_mmc_read_data() 597 struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)data; jz4740_mmc_timeout() local 599 if (!test_and_clear_bit(0, &host->waiting)) jz4740_mmc_timeout() 602 jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, false); jz4740_mmc_timeout() 604 host->req->cmd->error = -ETIMEDOUT; jz4740_mmc_timeout() 605 jz4740_mmc_request_done(host); jz4740_mmc_timeout() 608 static void jz4740_mmc_read_response(struct jz4740_mmc_host *host, jz4740_mmc_read_response() argument 613 void __iomem *fifo_addr = host->base + JZ_REG_MMC_RESP_FIFO; jz4740_mmc_read_response() 631 static void jz4740_mmc_send_command(struct jz4740_mmc_host *host, jz4740_mmc_send_command() argument 634 uint32_t cmdat = host->cmdat; jz4740_mmc_send_command() 636 host->cmdat &= ~JZ_MMC_CMDAT_INIT; jz4740_mmc_send_command() 637 jz4740_mmc_clock_disable(host); jz4740_mmc_send_command() 639 host->cmd = cmd; jz4740_mmc_send_command() 665 if (host->use_dma) jz4740_mmc_send_command() 668 writew(cmd->data->blksz, host->base + JZ_REG_MMC_BLKLEN); jz4740_mmc_send_command() 669 writew(cmd->data->blocks, host->base + JZ_REG_MMC_NOB); jz4740_mmc_send_command() 672 writeb(cmd->opcode, host->base + JZ_REG_MMC_CMD); jz4740_mmc_send_command() 673 writel(cmd->arg, host->base + JZ_REG_MMC_ARG); jz4740_mmc_send_command() 674 writel(cmdat, host->base + JZ_REG_MMC_CMDAT); jz4740_mmc_send_command() 676 jz4740_mmc_clock_enable(host, 1); jz4740_mmc_send_command() 679 static void jz_mmc_prepare_data_transfer(struct jz4740_mmc_host *host) jz_mmc_prepare_data_transfer() argument 681 struct mmc_command *cmd = host->req->cmd; jz_mmc_prepare_data_transfer() 690 sg_miter_start(&host->miter, data->sg, data->sg_len, direction); jz_mmc_prepare_data_transfer() 696 struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)devid; jz_mmc_irq_worker() local 697 struct mmc_command *cmd = host->req->cmd; jz_mmc_irq_worker() 698 struct mmc_request *req = host->req; jz_mmc_irq_worker() 703 host->state = JZ4740_MMC_STATE_DONE; jz_mmc_irq_worker() 705 switch (host->state) { jz_mmc_irq_worker() 708 jz4740_mmc_read_response(host, cmd); jz_mmc_irq_worker() 713 jz_mmc_prepare_data_transfer(host); jz_mmc_irq_worker() 716 if (host->use_dma) { jz_mmc_irq_worker() 723 timeout = jz4740_mmc_start_dma_transfer(host, data); jz_mmc_irq_worker() 731 timeout = jz4740_mmc_read_data(host, data); jz_mmc_irq_worker() 733 timeout = jz4740_mmc_write_data(host, data); jz_mmc_irq_worker() 736 host->state = JZ4740_MMC_STATE_TRANSFER_DATA; jz_mmc_irq_worker() 740 jz4740_mmc_transfer_check_state(host, data); jz_mmc_irq_worker() 742 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_DATA_TRAN_DONE); jz_mmc_irq_worker() 744 host->state = JZ4740_MMC_STATE_SEND_STOP; jz_mmc_irq_worker() 747 writew(JZ_MMC_IRQ_DATA_TRAN_DONE, host->base + JZ_REG_MMC_IREG); jz_mmc_irq_worker() 753 jz4740_mmc_send_command(host, req->stop); jz_mmc_irq_worker() 756 timeout = jz4740_mmc_poll_irq(host, jz_mmc_irq_worker() 759 host->state = JZ4740_MMC_STATE_DONE; jz_mmc_irq_worker() 768 jz4740_mmc_request_done(host); jz_mmc_irq_worker() 775 struct jz4740_mmc_host *host = devid; jz_mmc_irq() local 776 struct mmc_command *cmd = host->cmd; jz_mmc_irq() 779 irq_reg = readw(host->base + JZ_REG_MMC_IREG); jz_mmc_irq() 782 irq_reg &= ~host->irq_mask; jz_mmc_irq() 788 writew(tmp & ~irq_reg, host->base + JZ_REG_MMC_IREG); jz_mmc_irq() 791 writew(JZ_MMC_IRQ_SDIO, host->base + JZ_REG_MMC_IREG); jz_mmc_irq() 792 mmc_signal_sdio_irq(host->mmc); jz_mmc_irq() 796 if (host->req && cmd && irq_reg) { jz_mmc_irq() 797 if (test_and_clear_bit(0, &host->waiting)) { jz_mmc_irq() 798 del_timer(&host->timeout_timer); jz_mmc_irq() 800 status = readl(host->base + JZ_REG_MMC_STATUS); jz_mmc_irq() 813 jz4740_mmc_set_irq_enabled(host, irq_reg, false); jz_mmc_irq() 814 writew(irq_reg, host->base + JZ_REG_MMC_IREG); jz_mmc_irq() 823 static int jz4740_mmc_set_clock_rate(struct jz4740_mmc_host *host, int rate) jz4740_mmc_set_clock_rate() argument 828 jz4740_mmc_clock_disable(host); jz4740_mmc_set_clock_rate() 829 clk_set_rate(host->clk, JZ_MMC_CLK_RATE); jz4740_mmc_set_clock_rate() 831 real_rate = clk_get_rate(host->clk); jz4740_mmc_set_clock_rate() 838 writew(div, host->base + JZ_REG_MMC_CLKRT); jz4740_mmc_set_clock_rate() 844 struct jz4740_mmc_host *host = mmc_priv(mmc); jz4740_mmc_request() local 846 host->req = req; jz4740_mmc_request() 848 writew(0xffff, host->base + JZ_REG_MMC_IREG); jz4740_mmc_request() 850 writew(JZ_MMC_IRQ_END_CMD_RES, host->base + JZ_REG_MMC_IREG); jz4740_mmc_request() 851 jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, true); jz4740_mmc_request() 853 host->state = JZ4740_MMC_STATE_READ_RESPONSE; jz4740_mmc_request() 854 set_bit(0, &host->waiting); jz4740_mmc_request() 855 mod_timer(&host->timeout_timer, jiffies + 5*HZ); jz4740_mmc_request() 856 jz4740_mmc_send_command(host, req->cmd); jz4740_mmc_request() 861 struct jz4740_mmc_host *host = mmc_priv(mmc); jz4740_mmc_set_ios() local 863 jz4740_mmc_set_clock_rate(host, ios->clock); jz4740_mmc_set_ios() 867 jz4740_mmc_reset(host); jz4740_mmc_set_ios() 868 if (gpio_is_valid(host->pdata->gpio_power)) jz4740_mmc_set_ios() 869 gpio_set_value(host->pdata->gpio_power, jz4740_mmc_set_ios() 870 !host->pdata->power_active_low); jz4740_mmc_set_ios() 871 host->cmdat |= JZ_MMC_CMDAT_INIT; jz4740_mmc_set_ios() 872 clk_prepare_enable(host->clk); jz4740_mmc_set_ios() 877 if (gpio_is_valid(host->pdata->gpio_power)) jz4740_mmc_set_ios() 878 gpio_set_value(host->pdata->gpio_power, jz4740_mmc_set_ios() 879 host->pdata->power_active_low); jz4740_mmc_set_ios() 880 clk_disable_unprepare(host->clk); jz4740_mmc_set_ios() 886 host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_4BIT; jz4740_mmc_set_ios() 889 host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_4BIT; jz4740_mmc_set_ios() 898 struct jz4740_mmc_host *host = mmc_priv(mmc); jz4740_mmc_enable_sdio_irq() local 899 jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_SDIO, enable); jz4740_mmc_enable_sdio_irq() 984 static inline size_t jz4740_mmc_num_pins(struct jz4740_mmc_host *host) jz4740_mmc_num_pins() argument 987 if (host->pdata && host->pdata->data_1bit) jz4740_mmc_num_pins() 997 struct jz4740_mmc_host *host; jz4740_mmc_probe() local 1004 dev_err(&pdev->dev, "Failed to alloc mmc host structure\n"); jz4740_mmc_probe() 1008 host = mmc_priv(mmc); jz4740_mmc_probe() 1009 host->pdata = pdata; jz4740_mmc_probe() 1011 host->irq = platform_get_irq(pdev, 0); jz4740_mmc_probe() 1012 if (host->irq < 0) { jz4740_mmc_probe() 1013 ret = host->irq; jz4740_mmc_probe() 1018 host->clk = devm_clk_get(&pdev->dev, "mmc"); jz4740_mmc_probe() 1019 if (IS_ERR(host->clk)) { jz4740_mmc_probe() 1020 ret = PTR_ERR(host->clk); jz4740_mmc_probe() 1025 host->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); jz4740_mmc_probe() 1026 host->base = devm_ioremap_resource(&pdev->dev, host->mem_res); jz4740_mmc_probe() 1027 if (IS_ERR(host->base)) { jz4740_mmc_probe() 1028 ret = PTR_ERR(host->base); jz4740_mmc_probe() 1033 ret = jz_gpio_bulk_request(jz4740_mmc_pins, jz4740_mmc_num_pins(host)); jz4740_mmc_probe() 1057 host->mmc = mmc; jz4740_mmc_probe() 1058 host->pdev = pdev; jz4740_mmc_probe() 1059 spin_lock_init(&host->lock); jz4740_mmc_probe() 1060 host->irq_mask = 0xffff; jz4740_mmc_probe() 1062 ret = request_threaded_irq(host->irq, jz_mmc_irq, jz_mmc_irq_worker, 0, jz4740_mmc_probe() 1063 dev_name(&pdev->dev), host); jz4740_mmc_probe() 1069 jz4740_mmc_reset(host); jz4740_mmc_probe() 1070 jz4740_mmc_clock_disable(host); jz4740_mmc_probe() 1071 setup_timer(&host->timeout_timer, jz4740_mmc_timeout, jz4740_mmc_probe() 1072 (unsigned long)host); jz4740_mmc_probe() 1074 set_timer_slack(&host->timeout_timer, HZ); jz4740_mmc_probe() 1076 host->use_dma = true; jz4740_mmc_probe() 1077 if (host->use_dma && jz4740_mmc_acquire_dma_channels(host) != 0) jz4740_mmc_probe() 1078 host->use_dma = false; jz4740_mmc_probe() 1080 platform_set_drvdata(pdev, host); jz4740_mmc_probe() 1084 dev_err(&pdev->dev, "Failed to add mmc host: %d\n", ret); jz4740_mmc_probe() 1090 host->use_dma ? "DMA" : "PIO", jz4740_mmc_probe() 1096 free_irq(host->irq, host); jz4740_mmc_probe() 1100 if (host->use_dma) jz4740_mmc_probe() 1101 jz4740_mmc_release_dma_channels(host); jz4740_mmc_probe() 1102 jz_gpio_bulk_free(jz4740_mmc_pins, jz4740_mmc_num_pins(host)); jz4740_mmc_probe() 1111 struct jz4740_mmc_host *host = platform_get_drvdata(pdev); jz4740_mmc_remove() local 1113 del_timer_sync(&host->timeout_timer); jz4740_mmc_remove() 1114 jz4740_mmc_set_irq_enabled(host, 0xff, false); jz4740_mmc_remove() 1115 jz4740_mmc_reset(host); jz4740_mmc_remove() 1117 mmc_remove_host(host->mmc); jz4740_mmc_remove() 1119 free_irq(host->irq, host); jz4740_mmc_remove() 1122 jz_gpio_bulk_free(jz4740_mmc_pins, jz4740_mmc_num_pins(host)); jz4740_mmc_remove() 1124 if (host->use_dma) jz4740_mmc_remove() 1125 jz4740_mmc_release_dma_channels(host); jz4740_mmc_remove() 1127 mmc_free_host(host->mmc); jz4740_mmc_remove() 1136 struct jz4740_mmc_host *host = dev_get_drvdata(dev); jz4740_mmc_suspend() local 1138 jz_gpio_bulk_suspend(jz4740_mmc_pins, jz4740_mmc_num_pins(host)); jz4740_mmc_suspend() 1145 struct jz4740_mmc_host *host = dev_get_drvdata(dev); jz4740_mmc_resume() local 1147 jz_gpio_bulk_resume(jz4740_mmc_pins, jz4740_mmc_num_pins(host)); jz4740_mmc_resume()
|
H A D | sdhci-of-esdhc.c | 21 #include <linux/mmc/host.h> 27 static u32 esdhc_readl(struct sdhci_host *host, int reg) esdhc_readl() argument 31 ret = in_be32(host->ioaddr + reg); esdhc_readl() 43 u32 tmp = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS); esdhc_readl() 52 static u16 esdhc_readw(struct sdhci_host *host, int reg) esdhc_readw() argument 59 ret = in_be32(host->ioaddr + base) & 0xffff; esdhc_readw() 61 ret = (in_be32(host->ioaddr + base) >> shift) & 0xffff; esdhc_readw() 65 static u8 esdhc_readb(struct sdhci_host *host, int reg) esdhc_readb() argument 69 u8 ret = (in_be32(host->ioaddr + base) >> shift) & 0xff; esdhc_readb() 78 dma_bits = in_be32(host->ioaddr + reg); esdhc_readb() 90 static void esdhc_writel(struct sdhci_host *host, u32 val, int reg) esdhc_writel() argument 99 sdhci_be32bs_writel(host, val, reg); esdhc_writel() 102 static void esdhc_writew(struct sdhci_host *host, u16 val, int reg) esdhc_writew() argument 112 sdhci_be32bs_writew(host, val, reg); esdhc_writew() 115 static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg) esdhc_writeb() argument 125 * If host control register is not standard, exit esdhc_writeb() 128 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_HOST_CONTROL) esdhc_writeb() 133 clrsetbits_be32(host->ioaddr + reg , SDHCI_CTRL_DMA_MASK << 5, esdhc_writeb() 136 val |= in_be32(host->ioaddr + reg) & SDHCI_CTRL_DMA_MASK; esdhc_writeb() 142 sdhci_be32bs_writeb(host, val, reg); esdhc_writeb() 152 static void esdhci_of_adma_workaround(struct sdhci_host *host, u32 intmask) esdhci_of_adma_workaround() argument 159 tmp = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS); esdhci_of_adma_workaround() 168 host->data->error = 0; esdhci_of_adma_workaround() 169 dmastart = sg_dma_address(host->data->sg); esdhci_of_adma_workaround() 170 dmanow = dmastart + host->data->bytes_xfered; esdhci_of_adma_workaround() 176 host->data->bytes_xfered = dmanow - dmastart; esdhci_of_adma_workaround() 177 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS); esdhci_of_adma_workaround() 180 static int esdhc_of_enable_dma(struct sdhci_host *host) esdhc_of_enable_dma() argument 182 setbits32(host->ioaddr + ESDHC_DMA_SYSCTL, ESDHC_DMA_SNOOP); esdhc_of_enable_dma() 186 static unsigned int esdhc_of_get_max_clock(struct sdhci_host *host) esdhc_of_get_max_clock() argument 188 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); esdhc_of_get_max_clock() 193 static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host) esdhc_of_get_min_clock() argument 195 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); esdhc_of_get_min_clock() 200 static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) esdhc_of_set_clock() argument 206 host->mmc->actual_clock = 0; esdhc_of_set_clock() 219 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); esdhc_of_set_clock() 222 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); esdhc_of_set_clock() 224 while (host->max_clk / pre_div / 16 > clock && pre_div < 256) esdhc_of_set_clock() 227 while (host->max_clk / pre_div / div > clock && div < 16) esdhc_of_set_clock() 230 dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n", esdhc_of_set_clock() 231 clock, host->max_clk / pre_div / div); esdhc_of_set_clock() 236 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); esdhc_of_set_clock() 240 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); esdhc_of_set_clock() 244 static void esdhc_of_platform_init(struct sdhci_host *host) esdhc_of_platform_init() argument 248 vvn = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS); esdhc_of_platform_init() 251 host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23; esdhc_of_platform_init() 254 host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ; esdhc_of_platform_init() 257 static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width) esdhc_pltfm_set_bus_width() argument 275 clrsetbits_be32(host->ioaddr + SDHCI_HOST_CONTROL, esdhc_pltfm_set_bus_width() 279 static void esdhc_reset(struct sdhci_host *host, u8 mask) esdhc_reset() argument 281 sdhci_reset(host, mask); esdhc_reset() 283 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); esdhc_reset() 284 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); esdhc_reset() 310 struct sdhci_host *host = dev_get_drvdata(dev); esdhc_of_suspend() local 312 esdhc_proctl = sdhci_be32bs_readl(host, SDHCI_HOST_CONTROL); esdhc_of_suspend() 314 return sdhci_suspend_host(host); esdhc_of_suspend() 319 struct sdhci_host *host = dev_get_drvdata(dev); esdhc_of_resume() local 320 int ret = sdhci_resume_host(host); esdhc_of_resume() 324 esdhc_of_enable_dma(host); esdhc_of_resume() 325 sdhci_be32bs_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL); esdhc_of_resume() 353 struct sdhci_host *host; sdhci_esdhc_probe() local 357 host = sdhci_pltfm_init(pdev, &sdhci_esdhc_pdata, 0); sdhci_esdhc_probe() 358 if (IS_ERR(host)) sdhci_esdhc_probe() 359 return PTR_ERR(host); sdhci_esdhc_probe() 367 * host control register sdhci_esdhc_probe() 369 host->quirks2 |= SDHCI_QUIRK2_BROKEN_HOST_CONTROL; sdhci_esdhc_probe() 373 ret = mmc_of_parse(host->mmc); sdhci_esdhc_probe() 377 mmc_of_parse_voltage(np, &host->ocr_mask); sdhci_esdhc_probe() 379 ret = sdhci_add_host(host); sdhci_esdhc_probe()
|
H A D | sdhci-bcm-kona.c | 19 #include <linux/mmc/host.h> 60 static int sdhci_bcm_kona_sd_reset(struct sdhci_host *host) sdhci_bcm_kona_sd_reset() argument 68 /* reset the host using the top level reset */ sdhci_bcm_kona_sd_reset() 69 val = sdhci_readl(host, KONA_SDHOST_CORECTRL); sdhci_bcm_kona_sd_reset() 71 sdhci_writel(host, val, KONA_SDHOST_CORECTRL); sdhci_bcm_kona_sd_reset() 73 while (!(sdhci_readl(host, KONA_SDHOST_CORECTRL) & KONA_SDHOST_RESET)) { sdhci_bcm_kona_sd_reset() 75 pr_err("Error: sd host is stuck in reset!!!\n"); sdhci_bcm_kona_sd_reset() 80 /* bring the host out of reset */ sdhci_bcm_kona_sd_reset() 81 val = sdhci_readl(host, KONA_SDHOST_CORECTRL); sdhci_bcm_kona_sd_reset() 91 sdhci_writel(host, val, KONA_SDHOST_CORECTRL); sdhci_bcm_kona_sd_reset() 96 static void sdhci_bcm_kona_sd_init(struct sdhci_host *host) sdhci_bcm_kona_sd_init() argument 101 val = sdhci_readl(host, KONA_SDHOST_COREIMR); sdhci_bcm_kona_sd_init() 103 sdhci_writel(host, val, KONA_SDHOST_COREIMR); sdhci_bcm_kona_sd_init() 105 /* Enable the AHB clock gating module to the host */ sdhci_bcm_kona_sd_init() 106 val = sdhci_readl(host, KONA_SDHOST_CORECTRL); sdhci_bcm_kona_sd_init() 116 sdhci_writel(host, val, KONA_SDHOST_CORECTRL); sdhci_bcm_kona_sd_init() 125 static int sdhci_bcm_kona_sd_card_emulate(struct sdhci_host *host, int insert) sdhci_bcm_kona_sd_card_emulate() argument 127 struct sdhci_pltfm_host *pltfm_priv = sdhci_priv(host); sdhci_bcm_kona_sd_card_emulate() 140 val = sdhci_readl(host, KONA_SDHOST_CORESTAT); sdhci_bcm_kona_sd_card_emulate() 145 ret = mmc_gpio_get_ro(host->mmc); sdhci_bcm_kona_sd_card_emulate() 151 sdhci_writel(host, val, KONA_SDHOST_CORESTAT); sdhci_bcm_kona_sd_card_emulate() 154 sdhci_writel(host, val, KONA_SDHOST_CORESTAT); sdhci_bcm_kona_sd_card_emulate() 164 static void sdhci_bcm_kona_card_event(struct sdhci_host *host) sdhci_bcm_kona_card_event() argument 166 if (mmc_gpio_get_cd(host->mmc) > 0) { sdhci_bcm_kona_card_event() 167 dev_dbg(mmc_dev(host->mmc), sdhci_bcm_kona_card_event() 169 sdhci_bcm_kona_sd_card_emulate(host, 1); sdhci_bcm_kona_card_event() 171 dev_dbg(mmc_dev(host->mmc), sdhci_bcm_kona_card_event() 173 sdhci_bcm_kona_sd_card_emulate(host, 0); sdhci_bcm_kona_card_event() 177 static void sdhci_bcm_kona_init_74_clocks(struct sdhci_host *host, sdhci_bcm_kona_init_74_clocks() argument 221 struct sdhci_host *host; sdhci_bcm_kona_probe() local 226 host = sdhci_pltfm_init(pdev, &sdhci_pltfm_data_kona, sdhci_bcm_kona_probe() 228 if (IS_ERR(host)) sdhci_bcm_kona_probe() 229 return PTR_ERR(host); sdhci_bcm_kona_probe() 231 dev_dbg(dev, "%s: inited. IOADDR=%p\n", __func__, host->ioaddr); sdhci_bcm_kona_probe() 233 pltfm_priv = sdhci_priv(host); sdhci_bcm_kona_probe() 238 ret = mmc_of_parse(host->mmc); sdhci_bcm_kona_probe() 242 if (!host->mmc->f_max) { sdhci_bcm_kona_probe() 256 if (clk_set_rate(pltfm_priv->clk, host->mmc->f_max) != 0) { sdhci_bcm_kona_probe() 267 (host->mmc->caps & MMC_CAP_NONREMOVABLE) ? 'Y' : 'N'); sdhci_bcm_kona_probe() 269 (mmc_gpio_get_cd(host->mmc) != -ENOSYS) ? 'Y' : 'N', sdhci_bcm_kona_probe() 270 (mmc_gpio_get_ro(host->mmc) != -ENOSYS) ? 'Y' : 'N'); sdhci_bcm_kona_probe() 272 if (host->mmc->caps & MMC_CAP_NONREMOVABLE) sdhci_bcm_kona_probe() 273 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; sdhci_bcm_kona_probe() 276 (host->mmc->caps | MMC_CAP_8_BIT_DATA) ? 'Y' : 'N'); sdhci_bcm_kona_probe() 278 ret = sdhci_bcm_kona_sd_reset(host); sdhci_bcm_kona_probe() 282 sdhci_bcm_kona_sd_init(host); sdhci_bcm_kona_probe() 284 ret = sdhci_add_host(host); sdhci_bcm_kona_probe() 291 if (host->mmc->caps & MMC_CAP_NONREMOVABLE) { sdhci_bcm_kona_probe() 292 ret = sdhci_bcm_kona_sd_card_emulate(host, 1); sdhci_bcm_kona_probe() 304 if (mmc_gpio_get_cd(host->mmc) > 0) sdhci_bcm_kona_probe() 305 sdhci_bcm_kona_sd_card_emulate(host, 1); sdhci_bcm_kona_probe() 311 sdhci_remove_host(host, 0); sdhci_bcm_kona_probe() 314 sdhci_bcm_kona_sd_reset(host); sdhci_bcm_kona_probe()
|
H A D | tifm_sd.c | 16 #include <linux/mmc/host.h> 113 /* for some reason, host won't respond correctly to readw/writew */ tifm_sd_read_fifo() 114 static void tifm_sd_read_fifo(struct tifm_sd *host, struct page *pg, tifm_sd_read_fifo() argument 117 struct tifm_dev *sock = host->dev; tifm_sd_read_fifo() 122 if (host->cmd_flags & DATA_CARRY) { tifm_sd_read_fifo() 123 buf[pos++] = host->bounce_buf_data[0]; tifm_sd_read_fifo() 124 host->cmd_flags &= ~DATA_CARRY; tifm_sd_read_fifo() 131 host->bounce_buf_data[0] = (val >> 8) & 0xff; tifm_sd_read_fifo() 132 host->cmd_flags |= DATA_CARRY; tifm_sd_read_fifo() 140 static void tifm_sd_write_fifo(struct tifm_sd *host, struct page *pg, tifm_sd_write_fifo() argument 143 struct tifm_dev *sock = host->dev; tifm_sd_write_fifo() 148 if (host->cmd_flags & DATA_CARRY) { tifm_sd_write_fifo() 149 val = host->bounce_buf_data[0] | ((buf[pos++] << 8) & 0xff00); tifm_sd_write_fifo() 151 host->cmd_flags &= ~DATA_CARRY; tifm_sd_write_fifo() 157 host->bounce_buf_data[0] = val & 0xff; tifm_sd_write_fifo() 158 host->cmd_flags |= DATA_CARRY; tifm_sd_write_fifo() 167 static void tifm_sd_transfer_data(struct tifm_sd *host) tifm_sd_transfer_data() argument 169 struct mmc_data *r_data = host->req->cmd->data; tifm_sd_transfer_data() 175 if (host->sg_pos == host->sg_len) tifm_sd_transfer_data() 178 cnt = sg[host->sg_pos].length - host->block_pos; tifm_sd_transfer_data() 180 host->block_pos = 0; tifm_sd_transfer_data() 181 host->sg_pos++; tifm_sd_transfer_data() 182 if (host->sg_pos == host->sg_len) { tifm_sd_transfer_data() 184 && (host->cmd_flags & DATA_CARRY)) tifm_sd_transfer_data() 185 writel(host->bounce_buf_data[0], tifm_sd_transfer_data() 186 host->dev->addr tifm_sd_transfer_data() 191 cnt = sg[host->sg_pos].length; tifm_sd_transfer_data() 193 off = sg[host->sg_pos].offset + host->block_pos; tifm_sd_transfer_data() 195 pg = nth_page(sg_page(&sg[host->sg_pos]), off >> PAGE_SHIFT); tifm_sd_transfer_data() 202 tifm_sd_read_fifo(host, pg, p_off, p_cnt); tifm_sd_transfer_data() 204 tifm_sd_write_fifo(host, pg, p_off, p_cnt); tifm_sd_transfer_data() 207 host->block_pos += p_cnt; tifm_sd_transfer_data() 224 static void tifm_sd_bounce_block(struct tifm_sd *host, struct mmc_data *r_data) tifm_sd_bounce_block() argument 232 dev_dbg(&host->dev->dev, "bouncing block\n"); tifm_sd_bounce_block() 234 cnt = sg[host->sg_pos].length - host->block_pos; tifm_sd_bounce_block() 236 host->block_pos = 0; tifm_sd_bounce_block() 237 host->sg_pos++; tifm_sd_bounce_block() 238 if (host->sg_pos == host->sg_len) tifm_sd_bounce_block() 240 cnt = sg[host->sg_pos].length; tifm_sd_bounce_block() 242 off = sg[host->sg_pos].offset + host->block_pos; tifm_sd_bounce_block() 244 pg = nth_page(sg_page(&sg[host->sg_pos]), off >> PAGE_SHIFT); tifm_sd_bounce_block() 251 tifm_sd_copy_page(sg_page(&host->bounce_buf), tifm_sd_bounce_block() 255 tifm_sd_copy_page(pg, p_off, sg_page(&host->bounce_buf), tifm_sd_bounce_block() 259 host->block_pos += p_cnt; tifm_sd_bounce_block() 263 static int tifm_sd_set_dma_data(struct tifm_sd *host, struct mmc_data *r_data) tifm_sd_set_dma_data() argument 265 struct tifm_dev *sock = host->dev; tifm_sd_set_dma_data() 271 if (host->sg_pos == host->sg_len) tifm_sd_set_dma_data() 274 if (host->cmd_flags & DATA_CARRY) { tifm_sd_set_dma_data() 275 host->cmd_flags &= ~DATA_CARRY; tifm_sd_set_dma_data() 277 tifm_sd_bounce_block(host, r_data); tifm_sd_set_dma_data() 279 if (host->sg_pos == host->sg_len) tifm_sd_set_dma_data() 283 dma_len = sg_dma_len(&r_data->sg[host->sg_pos]) - host->block_pos; tifm_sd_set_dma_data() 285 host->block_pos = 0; tifm_sd_set_dma_data() 286 host->sg_pos++; tifm_sd_set_dma_data() 287 if (host->sg_pos == host->sg_len) tifm_sd_set_dma_data() 289 dma_len = sg_dma_len(&r_data->sg[host->sg_pos]); tifm_sd_set_dma_data() 294 dma_off = host->block_pos; tifm_sd_set_dma_data() 295 host->block_pos += dma_blk_cnt * r_data->blksz; tifm_sd_set_dma_data() 298 dma_off = host->block_pos; tifm_sd_set_dma_data() 299 host->block_pos += t_size; tifm_sd_set_dma_data() 303 sg = &r_data->sg[host->sg_pos]; tifm_sd_set_dma_data() 307 tifm_sd_bounce_block(host, r_data); tifm_sd_set_dma_data() 310 host->cmd_flags |= DATA_CARRY; tifm_sd_set_dma_data() 312 sg = &host->bounce_buf; tifm_sd_set_dma_data() 372 static void tifm_sd_exec(struct tifm_sd *host, struct mmc_command *cmd) tifm_sd_exec() argument 374 struct tifm_dev *sock = host->dev; tifm_sd_exec() 377 if (host->open_drain) tifm_sd_exec() 403 static void tifm_sd_check_status(struct tifm_sd *host) tifm_sd_check_status() argument 405 struct tifm_dev *sock = host->dev; tifm_sd_check_status() 406 struct mmc_command *cmd = host->req->cmd; tifm_sd_check_status() 411 if (!(host->cmd_flags & CMD_READY)) tifm_sd_check_status() 416 if ((host->cmd_flags & SCMD_ACTIVE) tifm_sd_check_status() 417 && !(host->cmd_flags & SCMD_READY)) tifm_sd_check_status() 423 if (!(host->cmd_flags & BRS_READY)) tifm_sd_check_status() 426 if (!(host->no_dma || (host->cmd_flags & FIFO_READY))) tifm_sd_check_status() 430 if (host->req->stop) { tifm_sd_check_status() 431 if (!(host->cmd_flags & SCMD_ACTIVE)) { tifm_sd_check_status() 432 host->cmd_flags |= SCMD_ACTIVE; tifm_sd_check_status() 438 tifm_sd_exec(host, host->req->stop); tifm_sd_check_status() 441 if (!(host->cmd_flags & SCMD_READY) tifm_sd_check_status() 442 || (host->cmd_flags & CARD_BUSY)) tifm_sd_check_status() 451 if (host->cmd_flags & CARD_BUSY) tifm_sd_check_status() 459 if (host->req->stop) { tifm_sd_check_status() 460 if (!(host->cmd_flags & SCMD_ACTIVE)) { tifm_sd_check_status() 461 host->cmd_flags |= SCMD_ACTIVE; tifm_sd_check_status() 462 tifm_sd_exec(host, host->req->stop); tifm_sd_check_status() 465 if (!(host->cmd_flags & SCMD_READY)) tifm_sd_check_status() 472 tasklet_schedule(&host->finish_tasklet); tifm_sd_check_status() 478 struct tifm_sd *host; tifm_sd_data_event() local 483 host = mmc_priv((struct mmc_host*)tifm_get_drvdata(sock)); tifm_sd_data_event() 486 fifo_status, host->cmd_flags); tifm_sd_data_event() 488 if (host->req) { tifm_sd_data_event() 489 r_data = host->req->cmd->data; tifm_sd_data_event() 492 if (tifm_sd_set_dma_data(host, r_data)) { tifm_sd_data_event() 493 host->cmd_flags |= FIFO_READY; tifm_sd_data_event() 494 tifm_sd_check_status(host); tifm_sd_data_event() 506 struct tifm_sd *host; tifm_sd_card_event() local 513 host = mmc_priv((struct mmc_host*)tifm_get_drvdata(sock)); tifm_sd_card_event() 515 dev_dbg(&sock->dev, "host event: host_status %x, flags %x\n", tifm_sd_card_event() 516 host_status, host->cmd_flags); tifm_sd_card_event() 518 if (host->req) { tifm_sd_card_event() 519 cmd = host->req->cmd; tifm_sd_card_event() 540 if (host->req->stop) { tifm_sd_card_event() 541 if (host->cmd_flags & SCMD_ACTIVE) { tifm_sd_card_event() 542 host->req->stop->error = cmd_error; tifm_sd_card_event() 543 host->cmd_flags |= SCMD_READY; tifm_sd_card_event() 546 host->cmd_flags |= SCMD_ACTIVE; tifm_sd_card_event() 547 tifm_sd_exec(host, host->req->stop); tifm_sd_card_event() 554 if (!(host->cmd_flags & CMD_READY)) { tifm_sd_card_event() 555 host->cmd_flags |= CMD_READY; tifm_sd_card_event() 557 } else if (host->cmd_flags & SCMD_ACTIVE) { tifm_sd_card_event() 558 host->cmd_flags |= SCMD_READY; tifm_sd_card_event() 559 tifm_sd_fetch_resp(host->req->stop, tifm_sd_card_event() 564 host->cmd_flags |= BRS_READY; tifm_sd_card_event() 567 if (host->no_dma && cmd->data) { tifm_sd_card_event() 575 tifm_sd_transfer_data(host); tifm_sd_card_event() 582 host->cmd_flags &= ~CARD_BUSY; tifm_sd_card_event() 584 host->cmd_flags |= CARD_BUSY; tifm_sd_card_event() 586 tifm_sd_check_status(host); tifm_sd_card_event() 593 static void tifm_sd_set_data_timeout(struct tifm_sd *host, tifm_sd_set_data_timeout() argument 596 struct tifm_dev *sock = host->dev; tifm_sd_set_data_timeout() 603 ((1000000000UL / host->clk_freq) * host->clk_div); tifm_sd_set_data_timeout() 623 struct tifm_sd *host = mmc_priv(mmc); tifm_sd_request() local 624 struct tifm_dev *sock = host->dev; tifm_sd_request() 629 if (host->eject) { tifm_sd_request() 634 if (host->req) { tifm_sd_request() 641 host->cmd_flags = 0; tifm_sd_request() 642 host->block_pos = 0; tifm_sd_request() 643 host->sg_pos = 0; tifm_sd_request() 646 host->no_dma = 1; tifm_sd_request() 648 host->no_dma = no_dma ? 1 : 0; tifm_sd_request() 651 tifm_sd_set_data_timeout(host, r_data); tifm_sd_request() 658 if (host->no_dma) { tifm_sd_request() 666 host->sg_len = r_data->sg_len; tifm_sd_request() 668 sg_init_one(&host->bounce_buf, host->bounce_buf_data, tifm_sd_request() 671 if(1 != tifm_map_sg(sock, &host->bounce_buf, 1, tifm_sd_request() 680 host->sg_len = tifm_map_sg(sock, r_data->sg, tifm_sd_request() 686 if (host->sg_len < 1) { tifm_sd_request() 689 tifm_unmap_sg(sock, &host->bounce_buf, 1, tifm_sd_request() 713 tifm_sd_set_dma_data(host, r_data); tifm_sd_request() 722 host->req = mrq; tifm_sd_request() 723 mod_timer(&host->timer, jiffies + host->timeout_jiffies); tifm_sd_request() 726 tifm_sd_exec(host, mrq->cmd); tifm_sd_request() 737 struct tifm_sd *host = (struct tifm_sd*)data; tifm_sd_end_cmd() local 738 struct tifm_dev *sock = host->dev; tifm_sd_end_cmd() 746 del_timer(&host->timer); tifm_sd_end_cmd() 747 mrq = host->req; tifm_sd_end_cmd() 748 host->req = NULL; tifm_sd_end_cmd() 759 if (host->no_dma) { tifm_sd_end_cmd() 764 tifm_unmap_sg(sock, &host->bounce_buf, 1, tifm_sd_end_cmd() 788 struct tifm_sd *host = (struct tifm_sd*)data; tifm_sd_abort() local 792 dev_name(&host->dev->dev), host->req->cmd->opcode, host->cmd_flags); tifm_sd_abort() 794 tifm_eject(host->dev); tifm_sd_abort() 799 struct tifm_sd *host = mmc_priv(mmc); tifm_sd_ios() local 800 struct tifm_dev *sock = host->dev; tifm_sd_ios() 834 host->clk_freq = 20000000; tifm_sd_ios() 835 host->clk_div = clk_div1; tifm_sd_ios() 840 host->clk_freq = 24000000; tifm_sd_ios() 841 host->clk_div = clk_div2; tifm_sd_ios() 847 host->clk_div = 0; tifm_sd_ios() 849 host->clk_div &= TIFM_MMCSD_CLKMASK; tifm_sd_ios() 850 writel(host->clk_div tifm_sd_ios() 855 host->open_drain = (ios->bus_mode == MMC_BUSMODE_OPENDRAIN); tifm_sd_ios() 867 struct tifm_sd *host = mmc_priv(mmc); tifm_sd_ro() local 868 struct tifm_dev *sock = host->dev; tifm_sd_ro() 884 static int tifm_sd_initialize_host(struct tifm_sd *host) tifm_sd_initialize_host() argument 888 struct tifm_dev *sock = host->dev; tifm_sd_initialize_host() 892 host->clk_div = 61; tifm_sd_initialize_host() 893 host->clk_freq = 20000000; tifm_sd_initialize_host() 895 writel(host->clk_div | TIFM_MMCSD_POWER, tifm_sd_initialize_host() 914 writel(host->clk_div | TIFM_MMCSD_POWER, tifm_sd_initialize_host() 950 struct tifm_sd *host; tifm_sd_probe() local 964 host = mmc_priv(mmc); tifm_sd_probe() 966 host->dev = sock; tifm_sd_probe() 967 host->timeout_jiffies = msecs_to_jiffies(1000); tifm_sd_probe() 969 tasklet_init(&host->finish_tasklet, tifm_sd_end_cmd, tifm_sd_probe() 970 (unsigned long)host); tifm_sd_probe() 971 setup_timer(&host->timer, tifm_sd_abort, (unsigned long)host); tifm_sd_probe() 987 rc = tifm_sd_initialize_host(host); tifm_sd_probe() 1001 struct tifm_sd *host = mmc_priv(mmc); tifm_sd_remove() local 1005 host->eject = 1; tifm_sd_remove() 1010 tasklet_kill(&host->finish_tasklet); tifm_sd_remove() 1013 if (host->req) { tifm_sd_remove() 1017 host->req->cmd->error = -ENOMEDIUM; tifm_sd_remove() 1018 if (host->req->stop) tifm_sd_remove() 1019 host->req->stop->error = -ENOMEDIUM; tifm_sd_remove() 1020 tasklet_schedule(&host->finish_tasklet); tifm_sd_remove() 1039 struct tifm_sd *host = mmc_priv(mmc); tifm_sd_resume() local 1042 rc = tifm_sd_initialize_host(host); tifm_sd_resume() 1046 host->eject = 1; tifm_sd_resume()
|
H A D | dw_mmc-pci.c | 18 #include <linux/mmc/host.h> 42 struct dw_mci *host; dw_mci_pci_probe() local 49 host = devm_kzalloc(&pdev->dev, sizeof(struct dw_mci), GFP_KERNEL); dw_mci_pci_probe() 50 if (!host) dw_mci_pci_probe() 53 host->irq = pdev->irq; dw_mci_pci_probe() 54 host->irq_flags = IRQF_SHARED; dw_mci_pci_probe() 55 host->dev = &pdev->dev; dw_mci_pci_probe() 56 host->pdata = &pci_board_data; dw_mci_pci_probe() 62 host->regs = pcim_iomap_table(pdev)[PCI_BAR_NO]; dw_mci_pci_probe() 66 ret = dw_mci_probe(host); dw_mci_pci_probe() 70 pci_set_drvdata(pdev, host); dw_mci_pci_probe() 77 struct dw_mci *host = pci_get_drvdata(pdev); dw_mci_pci_remove() local 79 dw_mci_remove(host); dw_mci_pci_remove() 86 struct dw_mci *host = pci_get_drvdata(pdev); dw_mci_pci_suspend() local 88 return dw_mci_suspend(host); dw_mci_pci_suspend() 94 struct dw_mci *host = pci_get_drvdata(pdev); dw_mci_pci_resume() local 96 return dw_mci_resume(host); dw_mci_pci_resume()
|
H A D | dw_mmc-k3.c | 14 #include <linux/mmc/host.h> 21 static void dw_mci_k3_set_ios(struct dw_mci *host, struct mmc_ios *ios) dw_mci_k3_set_ios() argument 25 ret = clk_set_rate(host->ciu_clk, ios->clock); dw_mci_k3_set_ios() 27 dev_warn(host->dev, "failed to set rate %uHz\n", ios->clock); dw_mci_k3_set_ios() 29 host->bus_hz = clk_get_rate(host->ciu_clk); dw_mci_k3_set_ios() 56 struct dw_mci *host = dev_get_drvdata(dev); dw_mci_k3_suspend() local 59 ret = dw_mci_suspend(host); dw_mci_k3_suspend() 61 clk_disable_unprepare(host->ciu_clk); dw_mci_k3_suspend() 68 struct dw_mci *host = dev_get_drvdata(dev); dw_mci_k3_resume() local 71 ret = clk_prepare_enable(host->ciu_clk); dw_mci_k3_resume() 73 dev_err(host->dev, "failed to enable ciu clock\n"); dw_mci_k3_resume() 77 return dw_mci_resume(host); dw_mci_k3_resume()
|
H A D | dw_mmc-pltfm.c | 20 #include <linux/mmc/host.h> 29 static void dw_mci_pltfm_prepare_command(struct dw_mci *host, u32 *cmdr) dw_mci_pltfm_prepare_command() argument 45 struct dw_mci *host; dw_mci_pltfm_register() local 48 host = devm_kzalloc(&pdev->dev, sizeof(struct dw_mci), GFP_KERNEL); dw_mci_pltfm_register() 49 if (!host) dw_mci_pltfm_register() 52 host->irq = platform_get_irq(pdev, 0); dw_mci_pltfm_register() 53 if (host->irq < 0) dw_mci_pltfm_register() 54 return host->irq; dw_mci_pltfm_register() 56 host->drv_data = drv_data; dw_mci_pltfm_register() 57 host->dev = &pdev->dev; dw_mci_pltfm_register() 58 host->irq_flags = 0; dw_mci_pltfm_register() 59 host->pdata = pdev->dev.platform_data; dw_mci_pltfm_register() 62 host->regs = devm_ioremap_resource(&pdev->dev, regs); dw_mci_pltfm_register() 63 if (IS_ERR(host->regs)) dw_mci_pltfm_register() 64 return PTR_ERR(host->regs); dw_mci_pltfm_register() 66 platform_set_drvdata(pdev, host); dw_mci_pltfm_register() 67 return dw_mci_probe(host); dw_mci_pltfm_register() 77 struct dw_mci *host = dev_get_drvdata(dev); dw_mci_pltfm_suspend() local 79 return dw_mci_suspend(host); dw_mci_pltfm_suspend() 84 struct dw_mci *host = dev_get_drvdata(dev); dw_mci_pltfm_resume() local 86 return dw_mci_resume(host); dw_mci_pltfm_resume() 118 struct dw_mci *host = platform_get_drvdata(pdev); dw_mci_pltfm_remove() local 120 dw_mci_remove(host); dw_mci_pltfm_remove()
|
H A D | sdhci-spear.c | 2 * drivers/mmc/host/sdhci-spear.c 28 #include <linux/mmc/host.h> 47 struct spear_sdhci *host) sdhci_probe_config_dt() 55 host->card_int_gpio = cd_gpio; sdhci_probe_config_dt() 60 struct sdhci_host *host; sdhci_probe() local 67 host = sdhci_alloc_host(dev, sizeof(*sdhci)); sdhci_probe() 68 if (IS_ERR(host)) { sdhci_probe() 69 ret = PTR_ERR(host); sdhci_probe() 75 host->ioaddr = devm_ioremap_resource(&pdev->dev, iomem); sdhci_probe() 76 if (IS_ERR(host->ioaddr)) { sdhci_probe() 77 ret = PTR_ERR(host->ioaddr); sdhci_probe() 82 host->hw_name = "sdhci"; sdhci_probe() 83 host->ops = &sdhci_pltfm_ops; sdhci_probe() 84 host->irq = platform_get_irq(pdev, 0); sdhci_probe() 85 host->quirks = SDHCI_QUIRK_BROKEN_ADMA; sdhci_probe() 87 sdhci = sdhci_priv(host); sdhci_probe() 115 ret = mmc_gpio_request_cd(host->mmc, sdhci->card_int_gpio, 0); sdhci_probe() 124 ret = sdhci_add_host(host); sdhci_probe() 126 dev_dbg(&pdev->dev, "error adding host\n"); sdhci_probe() 130 platform_set_drvdata(pdev, host); sdhci_probe() 137 sdhci_free_host(host); sdhci_probe() 145 struct sdhci_host *host = platform_get_drvdata(pdev); sdhci_remove() local 146 struct spear_sdhci *sdhci = sdhci_priv(host); sdhci_remove() 150 scratch = readl(host->ioaddr + SDHCI_INT_STATUS); sdhci_remove() 154 sdhci_remove_host(host, dead); sdhci_remove() 156 sdhci_free_host(host); sdhci_remove() 164 struct sdhci_host *host = dev_get_drvdata(dev); sdhci_suspend() local 165 struct spear_sdhci *sdhci = sdhci_priv(host); sdhci_suspend() 168 ret = sdhci_suspend_host(host); sdhci_suspend() 177 struct sdhci_host *host = dev_get_drvdata(dev); sdhci_resume() local 178 struct spear_sdhci *sdhci = sdhci_priv(host); sdhci_resume() 187 return sdhci_resume_host(host); sdhci_resume() 46 sdhci_probe_config_dt(struct device_node *np, struct spear_sdhci *host) sdhci_probe_config_dt() argument
|
H A D | rtsx_usb_sdmmc.c | 26 #include <linux/mmc/host.h> 70 static inline struct device *sdmmc_dev(struct rtsx_usb_sdmmc *host) sdmmc_dev() argument 72 return &(host->pdev->dev); sdmmc_dev() 75 static inline void sd_clear_error(struct rtsx_usb_sdmmc *host) sd_clear_error() argument 77 struct rtsx_ucr *ucr = host->ucr; sd_clear_error() 87 static void sd_print_debug_regs(struct rtsx_usb_sdmmc *host) sd_print_debug_regs() argument 89 struct rtsx_ucr *ucr = host->ucr; sd_print_debug_regs() 93 dev_dbg(sdmmc_dev(host), "SD_STAT1: 0x%x\n", val); sd_print_debug_regs() 95 dev_dbg(sdmmc_dev(host), "SD_STAT2: 0x%x\n", val); sd_print_debug_regs() 97 dev_dbg(sdmmc_dev(host), "SD_BUS_STAT: 0x%x\n", val); sd_print_debug_regs() 100 #define sd_print_debug_regs(host) 103 static int sd_read_data(struct rtsx_usb_sdmmc *host, struct mmc_command *cmd, sd_read_data() argument 106 struct rtsx_ucr *ucr = host->ucr; sd_read_data() 115 dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD%d\n", __func__ sd_read_data() 163 dev_dbg(sdmmc_dev(host), sd_read_data() 170 sd_print_debug_regs(host); sd_read_data() 173 dev_dbg(sdmmc_dev(host), sd_read_data() 178 dev_dbg(sdmmc_dev(host), sd_read_data() 187 dev_dbg(sdmmc_dev(host), "cmd->resp[0] = 0x%08x\n", sd_read_data() 195 dev_dbg(sdmmc_dev(host), sd_read_data() 210 static int sd_write_data(struct rtsx_usb_sdmmc *host, struct mmc_command *cmd, sd_write_data() argument 213 struct rtsx_ucr *ucr = host->ucr; sd_write_data() 223 dev_dbg(sdmmc_dev(host), sd_write_data() 234 dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD%d\n", __func__, sd_write_data() 274 dev_dbg(sdmmc_dev(host), sd_write_data() 281 sd_print_debug_regs(host); sd_write_data() 282 dev_dbg(sdmmc_dev(host), sd_write_data() 289 dev_dbg(sdmmc_dev(host), "cmd->resp[0] = 0x%08x\n", sd_write_data() 296 static void sd_send_cmd_get_rsp(struct rtsx_usb_sdmmc *host, sd_send_cmd_get_rsp() argument 299 struct rtsx_ucr *ucr = host->ucr; sd_send_cmd_get_rsp() 310 dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n", sd_send_cmd_get_rsp() 340 dev_dbg(sdmmc_dev(host), "cmd->flag is not valid\n"); sd_send_cmd_get_rsp() 390 dev_dbg(sdmmc_dev(host), sd_send_cmd_get_rsp() 397 sd_print_debug_regs(host); sd_send_cmd_get_rsp() 398 sd_clear_error(host); sd_send_cmd_get_rsp() 401 dev_dbg(sdmmc_dev(host), sd_send_cmd_get_rsp() 406 dev_dbg(sdmmc_dev(host), sd_send_cmd_get_rsp() 424 dev_dbg(sdmmc_dev(host), "Invalid response bit\n"); sd_send_cmd_get_rsp() 432 dev_dbg(sdmmc_dev(host), "CRC7 error\n"); sd_send_cmd_get_rsp() 447 dev_dbg(sdmmc_dev(host), "cmd->resp[%d] = 0x%08x\n", sd_send_cmd_get_rsp() 452 dev_dbg(sdmmc_dev(host), "cmd->resp[0] = 0x%08x\n", sd_send_cmd_get_rsp() 460 static int sd_rw_multi(struct rtsx_usb_sdmmc *host, struct mmc_request *mrq) sd_rw_multi() argument 462 struct rtsx_ucr *ucr = host->ucr; sd_rw_multi() 472 dev_dbg(sdmmc_dev(host), "%s: read %zu bytes\n", sd_rw_multi() 478 dev_dbg(sdmmc_dev(host), "%s: write %zu bytes\n", sd_rw_multi() 535 dev_dbg(sdmmc_dev(host), "rtsx_usb_transfer_data error %d\n" sd_rw_multi() 537 sd_clear_error(host); sd_rw_multi() 544 static inline void sd_enable_initial_mode(struct rtsx_usb_sdmmc *host) sd_enable_initial_mode() argument 546 rtsx_usb_write_register(host->ucr, SD_CFG1, sd_enable_initial_mode() 550 static inline void sd_disable_initial_mode(struct rtsx_usb_sdmmc *host) sd_disable_initial_mode() argument 552 rtsx_usb_write_register(host->ucr, SD_CFG1, sd_disable_initial_mode() 556 static void sd_normal_rw(struct rtsx_usb_sdmmc *host, sd_normal_rw() argument 570 if (host->initial_mode) sd_normal_rw() 571 sd_disable_initial_mode(host); sd_normal_rw() 573 cmd->error = sd_read_data(host, cmd, (u16)data->blksz, buf, sd_normal_rw() 576 if (host->initial_mode) sd_normal_rw() 577 sd_enable_initial_mode(host); sd_normal_rw() 583 cmd->error = sd_write_data(host, cmd, (u16)data->blksz, buf, sd_normal_rw() 590 static int sd_change_phase(struct rtsx_usb_sdmmc *host, u8 sample_point, int tx) sd_change_phase() argument 592 struct rtsx_ucr *ucr = host->ucr; sd_change_phase() 595 dev_dbg(sdmmc_dev(host), "%s: %s sample_point = %d\n", sd_change_phase() 639 static u8 sd_search_final_phase(struct rtsx_usb_sdmmc *host, u32 phase_map) sd_search_final_phase() argument 646 dev_dbg(sdmmc_dev(host), "Phase: [map:%x]\n", phase_map); sd_search_final_phase() 660 dev_dbg(sdmmc_dev(host), "Phase: [map:%x] [maxlen:%d] [final:%d]\n", sd_search_final_phase() 666 static void sd_wait_data_idle(struct rtsx_usb_sdmmc *host) sd_wait_data_idle() argument 672 err = rtsx_usb_ep0_read_register(host->ucr, sd_wait_data_idle() 681 static int sd_tuning_rx_cmd(struct rtsx_usb_sdmmc *host, sd_tuning_rx_cmd() argument 687 err = sd_change_phase(host, sample_point, 0); sd_tuning_rx_cmd() 692 err = sd_read_data(host, &cmd, 0x40, NULL, 0, 100); sd_tuning_rx_cmd() 695 sd_wait_data_idle(host); sd_tuning_rx_cmd() 696 sd_clear_error(host); sd_tuning_rx_cmd() 703 static void sd_tuning_phase(struct rtsx_usb_sdmmc *host, sd_tuning_phase() argument 710 err = sd_tuning_rx_cmd(host, opcode, (u8)i); sd_tuning_phase() 719 static int sd_tuning_rx(struct rtsx_usb_sdmmc *host, u8 opcode) sd_tuning_rx() argument 726 err = sd_change_phase(host, 0x01, 1); sd_tuning_rx() 728 dev_dbg(sdmmc_dev(host), "TX phase setting failed\n"); sd_tuning_rx() 734 sd_tuning_phase(host, opcode, &(raw_phase_map[i])); sd_tuning_rx() 742 dev_dbg(sdmmc_dev(host), "RX raw_phase_map[%d] = 0x%04x\n", sd_tuning_rx() 746 dev_dbg(sdmmc_dev(host), "RX phase_map = 0x%04x\n", phase_map); sd_tuning_rx() 749 final_phase = sd_search_final_phase(host, phase_map); sd_tuning_rx() 753 err = sd_change_phase(host, final_phase, 0); sd_tuning_rx() 765 struct rtsx_usb_sdmmc *host = mmc_priv(mmc); sdmmc_get_ro() local 766 struct rtsx_ucr *ucr = host->ucr; sdmmc_get_ro() 770 if (host->host_removal) sdmmc_get_ro() 793 struct rtsx_usb_sdmmc *host = mmc_priv(mmc); sdmmc_get_cd() local 794 struct rtsx_ucr *ucr = host->ucr; sdmmc_get_cd() 798 if (host->host_removal) sdmmc_get_cd() 813 host->card_exist = true; sdmmc_get_cd() 818 host->card_exist = false; sdmmc_get_cd() 824 struct rtsx_usb_sdmmc *host = mmc_priv(mmc); sdmmc_request() local 825 struct rtsx_ucr *ucr = host->ucr; sdmmc_request() 830 dev_dbg(sdmmc_dev(host), "%s\n", __func__); sdmmc_request() 832 if (host->host_removal) { sdmmc_request() 837 if ((!host->card_exist)) { sdmmc_request() 855 mutex_lock(&host->host_mutex); sdmmc_request() 856 host->mrq = mrq; sdmmc_request() 857 mutex_unlock(&host->host_mutex); sdmmc_request() 863 sd_send_cmd_get_rsp(host, cmd); sdmmc_request() 866 sd_send_cmd_get_rsp(host, cmd); sdmmc_request() 869 sd_rw_multi(host, mrq); sdmmc_request() 872 sd_send_cmd_get_rsp(host, mrq->stop); sdmmc_request() 878 sd_normal_rw(host, mrq); sdmmc_request() 897 dev_dbg(sdmmc_dev(host), "cmd->error = %d\n", cmd->error); sdmmc_request() 901 mutex_lock(&host->host_mutex); sdmmc_request() 902 host->mrq = NULL; sdmmc_request() 903 mutex_unlock(&host->host_mutex); sdmmc_request() 908 static int sd_set_bus_width(struct rtsx_usb_sdmmc *host, sd_set_bus_width() argument 919 err = rtsx_usb_write_register(host->ucr, SD_CFG1, sd_set_bus_width() 981 static int sd_power_on(struct rtsx_usb_sdmmc *host) sd_power_on() argument 983 struct rtsx_ucr *ucr = host->ucr; sd_power_on() 986 dev_dbg(sdmmc_dev(host), "%s\n", __func__); sd_power_on() 1020 static int sd_power_off(struct rtsx_usb_sdmmc *host) sd_power_off() argument 1022 struct rtsx_ucr *ucr = host->ucr; sd_power_off() 1025 dev_dbg(sdmmc_dev(host), "%s\n", __func__); sd_power_off() 1044 static int sd_set_power_mode(struct rtsx_usb_sdmmc *host, sd_set_power_mode() argument 1052 if (power_mode == host->power_mode) sd_set_power_mode() 1056 err = sd_power_off(host); sd_set_power_mode() 1057 pm_runtime_put(sdmmc_dev(host)); sd_set_power_mode() 1059 pm_runtime_get_sync(sdmmc_dev(host)); sd_set_power_mode() 1060 err = sd_power_on(host); sd_set_power_mode() 1064 host->power_mode = power_mode; sd_set_power_mode() 1069 static int sd_set_timing(struct rtsx_usb_sdmmc *host, sd_set_timing() argument 1072 struct rtsx_ucr *ucr = host->ucr; sd_set_timing() 1135 struct rtsx_usb_sdmmc *host = mmc_priv(mmc); sdmmc_set_ios() local 1136 struct rtsx_ucr *ucr = host->ucr; sdmmc_set_ios() 1138 dev_dbg(sdmmc_dev(host), "%s\n", __func__); sdmmc_set_ios() 1146 sd_set_power_mode(host, ios->power_mode); sdmmc_set_ios() 1147 sd_set_bus_width(host, ios->bus_width); sdmmc_set_ios() 1148 sd_set_timing(host, ios->timing, &host->ddr_mode); sdmmc_set_ios() 1150 host->vpclk = false; sdmmc_set_ios() 1151 host->double_clk = true; sdmmc_set_ios() 1156 host->ssc_depth = SSC_DEPTH_2M; sdmmc_set_ios() 1157 host->vpclk = true; sdmmc_set_ios() 1158 host->double_clk = false; sdmmc_set_ios() 1162 host->ssc_depth = SSC_DEPTH_1M; sdmmc_set_ios() 1165 host->ssc_depth = SSC_DEPTH_512K; sdmmc_set_ios() 1169 host->initial_mode = (ios->clock <= 1000000) ? true : false; sdmmc_set_ios() 1170 host->clock = ios->clock; sdmmc_set_ios() 1172 rtsx_usb_switch_clock(host->ucr, host->clock, host->ssc_depth, sdmmc_set_ios() 1173 host->initial_mode, host->double_clk, host->vpclk); sdmmc_set_ios() 1176 dev_dbg(sdmmc_dev(host), "%s end\n", __func__); sdmmc_set_ios() 1181 struct rtsx_usb_sdmmc *host = mmc_priv(mmc); sdmmc_switch_voltage() local 1182 struct rtsx_ucr *ucr = host->ucr; sdmmc_switch_voltage() 1185 dev_dbg(sdmmc_dev(host), "%s: signal_voltage = %d\n", sdmmc_switch_voltage() 1188 if (host->host_removal) sdmmc_switch_voltage() 1230 struct rtsx_usb_sdmmc *host = mmc_priv(mmc); sdmmc_card_busy() local 1231 struct rtsx_ucr *ucr = host->ucr; sdmmc_card_busy() 1237 dev_dbg(sdmmc_dev(host), "%s\n", __func__); sdmmc_card_busy() 1270 struct rtsx_usb_sdmmc *host = mmc_priv(mmc); sdmmc_execute_tuning() local 1271 struct rtsx_ucr *ucr = host->ucr; sdmmc_execute_tuning() 1274 if (host->host_removal) sdmmc_execute_tuning() 1279 if (!host->ddr_mode) sdmmc_execute_tuning() 1280 err = sd_tuning_rx(host, MMC_SEND_TUNING_BLOCK); sdmmc_execute_tuning() 1301 struct rtsx_usb_sdmmc *host = container_of(led, rtsx_usb_led_control() local 1304 if (host->host_removal) rtsx_usb_led_control() 1307 host->led.brightness = brightness; rtsx_usb_led_control() 1308 schedule_work(&host->led_work); rtsx_usb_led_control() 1313 struct rtsx_usb_sdmmc *host = rtsx_usb_update_led() local 1315 struct rtsx_ucr *ucr = host->ucr; rtsx_usb_update_led() 1319 if (host->led.brightness == LED_OFF) rtsx_usb_update_led() 1328 static void rtsx_usb_init_host(struct rtsx_usb_sdmmc *host) rtsx_usb_init_host() argument 1330 struct mmc_host *mmc = host->mmc; rtsx_usb_init_host() 1350 host->power_mode = MMC_POWER_OFF; rtsx_usb_init_host() 1356 struct rtsx_usb_sdmmc *host; rtsx_usb_sdmmc_drv_probe() local 1368 mmc = mmc_alloc_host(sizeof(*host), &pdev->dev); rtsx_usb_sdmmc_drv_probe() 1372 host = mmc_priv(mmc); rtsx_usb_sdmmc_drv_probe() 1373 host->ucr = ucr; rtsx_usb_sdmmc_drv_probe() 1374 host->mmc = mmc; rtsx_usb_sdmmc_drv_probe() 1375 host->pdev = pdev; rtsx_usb_sdmmc_drv_probe() 1376 platform_set_drvdata(pdev, host); rtsx_usb_sdmmc_drv_probe() 1378 mutex_init(&host->host_mutex); rtsx_usb_sdmmc_drv_probe() 1379 rtsx_usb_init_host(host); rtsx_usb_sdmmc_drv_probe() 1383 snprintf(host->led_name, sizeof(host->led_name), rtsx_usb_sdmmc_drv_probe() 1385 host->led.name = host->led_name; rtsx_usb_sdmmc_drv_probe() 1386 host->led.brightness = LED_OFF; rtsx_usb_sdmmc_drv_probe() 1387 host->led.default_trigger = mmc_hostname(mmc); rtsx_usb_sdmmc_drv_probe() 1388 host->led.brightness_set = rtsx_usb_led_control; rtsx_usb_sdmmc_drv_probe() 1390 err = led_classdev_register(mmc_dev(mmc), &host->led); rtsx_usb_sdmmc_drv_probe() 1394 INIT_WORK(&host->led_work, rtsx_usb_update_led); rtsx_usb_sdmmc_drv_probe() 1404 struct rtsx_usb_sdmmc *host = platform_get_drvdata(pdev); rtsx_usb_sdmmc_drv_remove() local 1407 if (!host) rtsx_usb_sdmmc_drv_remove() 1410 mmc = host->mmc; rtsx_usb_sdmmc_drv_remove() 1411 host->host_removal = true; rtsx_usb_sdmmc_drv_remove() 1413 mutex_lock(&host->host_mutex); rtsx_usb_sdmmc_drv_remove() 1414 if (host->mrq) { rtsx_usb_sdmmc_drv_remove() 1418 host->mrq->cmd->error = -ENOMEDIUM; rtsx_usb_sdmmc_drv_remove() 1419 if (host->mrq->stop) rtsx_usb_sdmmc_drv_remove() 1420 host->mrq->stop->error = -ENOMEDIUM; rtsx_usb_sdmmc_drv_remove() 1421 mmc_request_done(mmc, host->mrq); rtsx_usb_sdmmc_drv_remove() 1423 mutex_unlock(&host->host_mutex); rtsx_usb_sdmmc_drv_remove() 1428 cancel_work_sync(&host->led_work); rtsx_usb_sdmmc_drv_remove() 1429 led_classdev_unregister(&host->led); rtsx_usb_sdmmc_drv_remove()
|
H A D | mmc_spi.c | 37 #include <linux/mmc/host.h> 164 static inline int mmc_cs_off(struct mmc_spi_host *host) mmc_cs_off() argument 167 return spi_setup(host->spi); mmc_cs_off() 171 mmc_spi_readbytes(struct mmc_spi_host *host, unsigned len) mmc_spi_readbytes() argument 175 if (len > sizeof(*host->data)) { mmc_spi_readbytes() 180 host->status.len = len; mmc_spi_readbytes() 182 if (host->dma_dev) mmc_spi_readbytes() 183 dma_sync_single_for_device(host->dma_dev, mmc_spi_readbytes() 184 host->data_dma, sizeof(*host->data), mmc_spi_readbytes() 187 status = spi_sync_locked(host->spi, &host->readback); mmc_spi_readbytes() 189 if (host->dma_dev) mmc_spi_readbytes() 190 dma_sync_single_for_cpu(host->dma_dev, mmc_spi_readbytes() 191 host->data_dma, sizeof(*host->data), mmc_spi_readbytes() 197 static int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout, mmc_spi_skip() argument 200 u8 *cp = host->data->status; mmc_spi_skip() 207 status = mmc_spi_readbytes(host, n); mmc_spi_skip() 230 mmc_spi_wait_unbusy(struct mmc_spi_host *host, unsigned long timeout) mmc_spi_wait_unbusy() argument 232 return mmc_spi_skip(host, timeout, sizeof(host->data->status), 0); mmc_spi_wait_unbusy() 235 static int mmc_spi_readtoken(struct mmc_spi_host *host, unsigned long timeout) mmc_spi_readtoken() argument 237 return mmc_spi_skip(host, timeout, 1, 0xff); mmc_spi_readtoken() 262 static int mmc_spi_response_get(struct mmc_spi_host *host, mmc_spi_response_get() argument 265 u8 *cp = host->data->status; mmc_spi_response_get() 266 u8 *end = cp + host->t.len; mmc_spi_response_get() 289 cp = host->data->status; mmc_spi_response_get() 304 value = mmc_spi_readbytes(host, 1); mmc_spi_response_get() 321 value = mmc_spi_readbytes(host, 1); mmc_spi_response_get() 324 cp = host->data->status; mmc_spi_response_get() 364 mmc_spi_wait_unbusy(host, r1b_timeout); mmc_spi_response_get() 373 value = mmc_spi_readbytes(host, 1); mmc_spi_response_get() 376 cp = host->data->status; mmc_spi_response_get() 396 value = mmc_spi_readbytes(host, 1); mmc_spi_response_get() 399 cp = host->data->status; mmc_spi_response_get() 417 dev_dbg(&host->spi->dev, "bad response type %04x\n", mmc_spi_response_get() 425 dev_dbg(&host->spi->dev, "%s: resp %04x %08x\n", mmc_spi_response_get() 434 mmc_cs_off(host); mmc_spi_response_get() 445 mmc_spi_command_send(struct mmc_spi_host *host, mmc_spi_command_send() argument 449 struct scratch *data = host->data; mmc_spi_command_send() 523 dev_dbg(&host->spi->dev, " mmc_spi: CMD%d, resp %s\n", mmc_spi_command_send() 527 spi_message_init(&host->m); mmc_spi_command_send() 529 t = &host->t; mmc_spi_command_send() 532 t->tx_dma = t->rx_dma = host->data_dma; mmc_spi_command_send() 535 spi_message_add_tail(t, &host->m); mmc_spi_command_send() 537 if (host->dma_dev) { mmc_spi_command_send() 538 host->m.is_dma_mapped = 1; mmc_spi_command_send() 539 dma_sync_single_for_device(host->dma_dev, mmc_spi_command_send() 540 host->data_dma, sizeof(*host->data), mmc_spi_command_send() 543 status = spi_sync_locked(host->spi, &host->m); mmc_spi_command_send() 545 if (host->dma_dev) mmc_spi_command_send() 546 dma_sync_single_for_cpu(host->dma_dev, mmc_spi_command_send() 547 host->data_dma, sizeof(*host->data), mmc_spi_command_send() 550 dev_dbg(&host->spi->dev, " ... write returned %d\n", status); mmc_spi_command_send() 556 return mmc_spi_response_get(host, cmd, cs_on); mmc_spi_command_send() 572 struct mmc_spi_host *host, mmc_spi_setup_data_message() 577 struct scratch *scratch = host->data; mmc_spi_setup_data_message() 578 dma_addr_t dma = host->data_dma; mmc_spi_setup_data_message() 580 spi_message_init(&host->m); mmc_spi_setup_data_message() 582 host->m.is_dma_mapped = 1; mmc_spi_setup_data_message() 588 t = &host->token; mmc_spi_setup_data_message() 598 spi_message_add_tail(t, &host->m); mmc_spi_setup_data_message() 604 t = &host->t; mmc_spi_setup_data_message() 606 t->tx_buf = host->ones; mmc_spi_setup_data_message() 607 t->tx_dma = host->ones_dma; mmc_spi_setup_data_message() 609 spi_message_add_tail(t, &host->m); mmc_spi_setup_data_message() 611 t = &host->crc; mmc_spi_setup_data_message() 620 t->tx_buf = host->ones; mmc_spi_setup_data_message() 621 t->tx_dma = host->ones_dma; mmc_spi_setup_data_message() 626 spi_message_add_tail(t, &host->m); mmc_spi_setup_data_message() 643 t = &host->early_status; mmc_spi_setup_data_message() 648 t->tx_buf = host->ones; mmc_spi_setup_data_message() 649 t->tx_dma = host->ones_dma; mmc_spi_setup_data_message() 654 spi_message_add_tail(t, &host->m); mmc_spi_setup_data_message() 671 mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t, mmc_spi_writeblock() argument 674 struct spi_device *spi = host->spi; mmc_spi_writeblock() 676 struct scratch *scratch = host->data; mmc_spi_writeblock() 679 if (host->mmc->use_spi_crc) mmc_spi_writeblock() 682 if (host->dma_dev) mmc_spi_writeblock() 683 dma_sync_single_for_device(host->dma_dev, mmc_spi_writeblock() 684 host->data_dma, sizeof(*scratch), mmc_spi_writeblock() 687 status = spi_sync_locked(spi, &host->m); mmc_spi_writeblock() 694 if (host->dma_dev) mmc_spi_writeblock() 695 dma_sync_single_for_cpu(host->dma_dev, mmc_spi_writeblock() 696 host->data_dma, sizeof(*scratch), mmc_spi_writeblock() 727 /* host shall then issue MMC_STOP_TRANSMISSION */ mmc_spi_writeblock() 731 /* host shall then issue MMC_STOP_TRANSMISSION, mmc_spi_writeblock() 747 if (host->dma_dev) mmc_spi_writeblock() 758 return mmc_spi_wait_unbusy(host, timeout); mmc_spi_writeblock() 778 mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t, mmc_spi_readblock() argument 781 struct spi_device *spi = host->spi; mmc_spi_readblock() 783 struct scratch *scratch = host->data; mmc_spi_readblock() 790 status = mmc_spi_readbytes(host, 1); mmc_spi_readblock() 795 status = mmc_spi_readtoken(host, timeout); mmc_spi_readblock() 812 if (host->dma_dev) { mmc_spi_readblock() 813 dma_sync_single_for_device(host->dma_dev, mmc_spi_readblock() 814 host->data_dma, sizeof(*scratch), mmc_spi_readblock() 816 dma_sync_single_for_device(host->dma_dev, mmc_spi_readblock() 821 status = spi_sync_locked(spi, &host->m); mmc_spi_readblock() 823 if (host->dma_dev) { mmc_spi_readblock() 824 dma_sync_single_for_cpu(host->dma_dev, mmc_spi_readblock() 825 host->data_dma, sizeof(*scratch), mmc_spi_readblock() 827 dma_sync_single_for_cpu(host->dma_dev, mmc_spi_readblock() 853 if (host->mmc->use_spi_crc) { mmc_spi_readblock() 866 if (host->dma_dev) mmc_spi_readblock() 878 mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd, mmc_spi_data_do() argument 881 struct spi_device *spi = host->spi; mmc_spi_data_do() 882 struct device *dma_dev = host->dma_dev; mmc_spi_data_do() 895 mmc_spi_setup_data_message(host, multiple, direction); mmc_spi_data_do() 896 t = &host->t; mmc_spi_data_do() 945 dev_dbg(&host->spi->dev, mmc_spi_data_do() 953 status = mmc_spi_writeblock(host, t, timeout); mmc_spi_data_do() 955 status = mmc_spi_readblock(host, t, timeout); mmc_spi_data_do() 990 struct scratch *scratch = host->data; mmc_spi_data_do() 1001 INIT_LIST_HEAD(&host->m.transfers); mmc_spi_data_do() 1002 list_add(&host->early_status.transfer_list, mmc_spi_data_do() 1003 &host->m.transfers); mmc_spi_data_do() 1008 host->early_status.tx_buf = host->early_status.rx_buf; mmc_spi_data_do() 1009 host->early_status.tx_dma = host->early_status.rx_dma; mmc_spi_data_do() 1010 host->early_status.len = statlen; mmc_spi_data_do() 1012 if (host->dma_dev) mmc_spi_data_do() 1013 dma_sync_single_for_device(host->dma_dev, mmc_spi_data_do() 1014 host->data_dma, sizeof(*scratch), mmc_spi_data_do() 1017 tmp = spi_sync_locked(spi, &host->m); mmc_spi_data_do() 1019 if (host->dma_dev) mmc_spi_data_do() 1020 dma_sync_single_for_cpu(host->dma_dev, mmc_spi_data_do() 1021 host->data_dma, sizeof(*scratch), mmc_spi_data_do() 1038 tmp = mmc_spi_wait_unbusy(host, timeout); mmc_spi_data_do() 1052 struct mmc_spi_host *host = mmc_priv(mmc); mmc_spi_request() local 1065 dev_dbg(&host->spi->dev, "bogus command\n"); mmc_spi_request() 1072 dev_dbg(&host->spi->dev, "bogus STOP command\n"); mmc_spi_request() 1079 mmc_request_done(host->mmc, mrq); mmc_spi_request() 1086 spi_bus_lock(host->spi->master); mmc_spi_request() 1090 status = mmc_spi_command_send(host, mrq, mrq->cmd, mrq->data != NULL); mmc_spi_request() 1092 mmc_spi_data_do(host, mrq->cmd, mrq->data, mrq->data->blksz); mmc_spi_request() 1105 status = mmc_spi_command_send(host, mrq, &stop, 0); mmc_spi_request() 1112 status = mmc_spi_command_send(host, mrq, mrq->stop, 0); mmc_spi_request() 1114 mmc_cs_off(host); mmc_spi_request() 1118 spi_bus_unlock(host->spi->master); mmc_spi_request() 1120 mmc_request_done(host->mmc, mrq); mmc_spi_request() 1131 static void mmc_spi_initsequence(struct mmc_spi_host *host) mmc_spi_initsequence() argument 1136 mmc_spi_wait_unbusy(host, r1b_timeout); mmc_spi_initsequence() 1137 mmc_spi_readbytes(host, 10); mmc_spi_initsequence() 1153 host->spi->mode |= SPI_CS_HIGH; mmc_spi_initsequence() 1154 if (spi_setup(host->spi) != 0) { mmc_spi_initsequence() 1156 dev_warn(&host->spi->dev, mmc_spi_initsequence() 1158 host->spi->mode &= ~SPI_CS_HIGH; mmc_spi_initsequence() 1160 mmc_spi_readbytes(host, 18); mmc_spi_initsequence() 1162 host->spi->mode &= ~SPI_CS_HIGH; mmc_spi_initsequence() 1163 if (spi_setup(host->spi) != 0) { mmc_spi_initsequence() 1165 dev_err(&host->spi->dev, mmc_spi_initsequence() 1183 struct mmc_spi_host *host = mmc_priv(mmc); mmc_spi_set_ios() local 1185 if (host->power_mode != ios->power_mode) { mmc_spi_set_ios() 1188 canpower = host->pdata && host->pdata->setpower; mmc_spi_set_ios() 1190 dev_dbg(&host->spi->dev, "mmc_spi: power %s (%d)%s\n", mmc_spi_set_ios() 1202 host->pdata->setpower(&host->spi->dev, mmc_spi_set_ios() 1205 msleep(host->powerup_msecs); mmc_spi_set_ios() 1211 mmc_spi_initsequence(host); mmc_spi_set_ios() 1226 host->spi->mode &= ~(SPI_CPOL|SPI_CPHA); mmc_spi_set_ios() 1227 mres = spi_setup(host->spi); mmc_spi_set_ios() 1229 dev_dbg(&host->spi->dev, mmc_spi_set_ios() 1232 if (spi_write(host->spi, &nullbyte, 1) < 0) mmc_spi_set_ios() 1233 dev_dbg(&host->spi->dev, mmc_spi_set_ios() 1247 host->spi->mode |= (SPI_CPOL|SPI_CPHA); mmc_spi_set_ios() 1248 mres = spi_setup(host->spi); mmc_spi_set_ios() 1250 dev_dbg(&host->spi->dev, mmc_spi_set_ios() 1256 host->power_mode = ios->power_mode; mmc_spi_set_ios() 1259 if (host->spi->max_speed_hz != ios->clock && ios->clock != 0) { mmc_spi_set_ios() 1262 host->spi->max_speed_hz = ios->clock; mmc_spi_set_ios() 1263 status = spi_setup(host->spi); mmc_spi_set_ios() 1264 dev_dbg(&host->spi->dev, mmc_spi_set_ios() 1266 host->spi->max_speed_hz, status); mmc_spi_set_ios() 1287 struct mmc_spi_host *host = mmc_priv(mmc); mmc_spi_detect_irq() local 1288 u16 delay_msec = max(host->pdata->detect_delay, (u16)100); mmc_spi_detect_irq() 1298 struct mmc_spi_host *host; mmc_spi_probe() local 1338 mmc = mmc_alloc_host(sizeof(*host), &spi->dev); mmc_spi_probe() 1361 host = mmc_priv(mmc); mmc_spi_probe() 1362 host->mmc = mmc; mmc_spi_probe() 1363 host->spi = spi; mmc_spi_probe() 1365 host->ones = ones; mmc_spi_probe() 1370 host->pdata = mmc_spi_get_pdata(spi); mmc_spi_probe() 1371 if (host->pdata) mmc_spi_probe() 1372 mmc->ocr_avail = host->pdata->ocr_mask; mmc_spi_probe() 1377 if (host->pdata && host->pdata->setpower) { mmc_spi_probe() 1378 host->powerup_msecs = host->pdata->powerup_msecs; mmc_spi_probe() 1379 if (!host->powerup_msecs || host->powerup_msecs > 250) mmc_spi_probe() 1380 host->powerup_msecs = 250; mmc_spi_probe() 1386 host->data = kmalloc(sizeof(*host->data), GFP_KERNEL); mmc_spi_probe() 1387 if (!host->data) mmc_spi_probe() 1393 host->dma_dev = dev; mmc_spi_probe() 1394 host->ones_dma = dma_map_single(dev, ones, mmc_spi_probe() 1396 host->data_dma = dma_map_single(dev, host->data, mmc_spi_probe() 1397 sizeof(*host->data), DMA_BIDIRECTIONAL); mmc_spi_probe() 1401 dma_sync_single_for_cpu(host->dma_dev, mmc_spi_probe() 1402 host->data_dma, sizeof(*host->data), mmc_spi_probe() 1407 spi_message_init(&host->readback); mmc_spi_probe() 1408 host->readback.is_dma_mapped = (host->dma_dev != NULL); mmc_spi_probe() 1410 spi_message_add_tail(&host->status, &host->readback); mmc_spi_probe() 1411 host->status.tx_buf = host->ones; mmc_spi_probe() 1412 host->status.tx_dma = host->ones_dma; mmc_spi_probe() 1413 host->status.rx_buf = &host->data->status; mmc_spi_probe() 1414 host->status.rx_dma = host->data_dma + offsetof(struct scratch, status); mmc_spi_probe() 1415 host->status.cs_change = 1; mmc_spi_probe() 1418 if (host->pdata && host->pdata->init) { mmc_spi_probe() 1419 status = host->pdata->init(&spi->dev, mmc_spi_detect_irq, mmc); mmc_spi_probe() 1425 if (host->pdata) { mmc_spi_probe() 1426 mmc->caps |= host->pdata->caps; mmc_spi_probe() 1427 mmc->caps2 |= host->pdata->caps2; mmc_spi_probe() 1434 if (host->pdata && host->pdata->flags & MMC_SPI_USE_CD_GPIO) { mmc_spi_probe() 1435 status = mmc_gpio_request_cd(mmc, host->pdata->cd_gpio, mmc_spi_probe() 1436 host->pdata->cd_debounce); mmc_spi_probe() 1448 if (host->pdata && host->pdata->flags & MMC_SPI_USE_RO_GPIO) { mmc_spi_probe() 1450 status = mmc_gpio_request_ro(mmc, host->pdata->ro_gpio); mmc_spi_probe() 1455 dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n", mmc_spi_probe() 1457 host->dma_dev ? "" : ", no DMA", mmc_spi_probe() 1459 (host->pdata && host->pdata->setpower) mmc_spi_probe() 1468 if (host->dma_dev) mmc_spi_probe() 1469 dma_unmap_single(host->dma_dev, host->data_dma, mmc_spi_probe() 1470 sizeof(*host->data), DMA_BIDIRECTIONAL); mmc_spi_probe() 1471 kfree(host->data); mmc_spi_probe() 1487 struct mmc_spi_host *host; mmc_spi_remove() local 1490 host = mmc_priv(mmc); mmc_spi_remove() 1493 if (host->pdata && host->pdata->exit) mmc_spi_remove() 1494 host->pdata->exit(&spi->dev, mmc); mmc_spi_remove() 1498 if (host->dma_dev) { mmc_spi_remove() 1499 dma_unmap_single(host->dma_dev, host->ones_dma, mmc_spi_remove() 1501 dma_unmap_single(host->dma_dev, host->data_dma, mmc_spi_remove() 1502 sizeof(*host->data), DMA_BIDIRECTIONAL); mmc_spi_remove() 1505 kfree(host->data); mmc_spi_remove() 1506 kfree(host->ones); mmc_spi_remove() 1535 MODULE_DESCRIPTION("SPI SD/MMC host driver"); 571 mmc_spi_setup_data_message( struct mmc_spi_host *host, int multiple, enum dma_data_direction direction) mmc_spi_setup_data_message() argument
|
H A D | sdhci-pltfm.c | 39 unsigned int sdhci_pltfm_clk_get_max_clock(struct sdhci_host *host) sdhci_pltfm_clk_get_max_clock() argument 41 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); sdhci_pltfm_clk_get_max_clock() 72 struct sdhci_host *host = platform_get_drvdata(pdev); sdhci_get_of_property() local 73 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); sdhci_get_of_property() 79 host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12; sdhci_get_of_property() 84 host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA; sdhci_get_of_property() 87 host->quirks |= SDHCI_QUIRK_INVERTED_WRITE_PROTECT; sdhci_get_of_property() 90 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; sdhci_get_of_property() 93 host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; sdhci_get_of_property() 96 host->quirks |= SDHCI_QUIRK_BROKEN_DMA; sdhci_get_of_property() 102 host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; sdhci_get_of_property() 109 host->mmc->pm_caps |= MMC_PM_KEEP_POWER; sdhci_get_of_property() 112 host->mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ; sdhci_get_of_property() 123 struct sdhci_host *host; sdhci_pltfm_init() local 136 host = sdhci_alloc_host(&pdev->dev, sdhci_pltfm_init() 139 if (IS_ERR(host)) { sdhci_pltfm_init() 140 ret = PTR_ERR(host); sdhci_pltfm_init() 144 host->hw_name = dev_name(&pdev->dev); sdhci_pltfm_init() 146 host->ops = pdata->ops; sdhci_pltfm_init() 148 host->ops = &sdhci_pltfm_ops; sdhci_pltfm_init() 150 host->quirks = pdata->quirks; sdhci_pltfm_init() 151 host->quirks2 = pdata->quirks2; sdhci_pltfm_init() 154 host->irq = platform_get_irq(pdev, 0); sdhci_pltfm_init() 157 mmc_hostname(host->mmc))) { sdhci_pltfm_init() 163 host->ioaddr = ioremap(iomem->start, resource_size(iomem)); sdhci_pltfm_init() 164 if (!host->ioaddr) { sdhci_pltfm_init() 174 if (host->ops && host->ops->platform_init) sdhci_pltfm_init() 175 host->ops->platform_init(host); sdhci_pltfm_init() 177 platform_set_drvdata(pdev, host); sdhci_pltfm_init() 179 return host; sdhci_pltfm_init() 184 sdhci_free_host(host); sdhci_pltfm_init() 193 struct sdhci_host *host = platform_get_drvdata(pdev); sdhci_pltfm_free() local 196 iounmap(host->ioaddr); sdhci_pltfm_free() 198 sdhci_free_host(host); sdhci_pltfm_free() 206 struct sdhci_host *host; sdhci_pltfm_register() local 209 host = sdhci_pltfm_init(pdev, pdata, priv_size); sdhci_pltfm_register() 210 if (IS_ERR(host)) sdhci_pltfm_register() 211 return PTR_ERR(host); sdhci_pltfm_register() 215 ret = sdhci_add_host(host); sdhci_pltfm_register() 225 struct sdhci_host *host = platform_get_drvdata(pdev); sdhci_pltfm_unregister() local 226 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); sdhci_pltfm_unregister() 227 int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff); sdhci_pltfm_unregister() 229 sdhci_remove_host(host, dead); sdhci_pltfm_unregister() 240 struct sdhci_host *host = dev_get_drvdata(dev); sdhci_pltfm_suspend() local 242 return sdhci_suspend_host(host); sdhci_pltfm_suspend() 248 struct sdhci_host *host = dev_get_drvdata(dev); sdhci_pltfm_resume() local 250 return sdhci_resume_host(host); sdhci_pltfm_resume()
|
H A D | tmio_mmc.h | 2 * linux/drivers/mmc/host/tmio_mmc.h 48 void (*enable)(struct tmio_mmc_host *host, bool enable); 59 void (*set_pwr)(struct platform_device *host, int state); 60 void (*set_clk_div)(struct platform_device *host, int state); 91 spinlock_t lock; /* protect host private data */ 97 int (*write16_hook)(struct tmio_mmc_host *host, int addr); 105 void tmio_mmc_host_free(struct tmio_mmc_host *host); 106 int tmio_mmc_host_probe(struct tmio_mmc_host *host, 108 void tmio_mmc_host_remove(struct tmio_mmc_host *host); 109 void tmio_mmc_do_data_irq(struct tmio_mmc_host *host); 111 void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i); 112 void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i); 133 void tmio_mmc_start_dma(struct tmio_mmc_host *host, struct mmc_data *data); 134 void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable); 135 void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata); 136 void tmio_mmc_release_dma(struct tmio_mmc_host *host); 137 void tmio_mmc_abort_dma(struct tmio_mmc_host *host); 139 static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host, tmio_mmc_start_dma() argument 144 static inline void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) tmio_mmc_enable_dma() argument 148 static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host, tmio_mmc_request_dma() argument 151 host->chan_tx = NULL; tmio_mmc_request_dma() 152 host->chan_rx = NULL; tmio_mmc_request_dma() 155 static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host) tmio_mmc_release_dma() argument 159 static inline void tmio_mmc_abort_dma(struct tmio_mmc_host *host) tmio_mmc_abort_dma() argument 169 static inline u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr) sd_ctrl_read16() argument 171 return readw(host->ctl + (addr << host->bus_shift)); sd_ctrl_read16() 174 static inline void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr, sd_ctrl_read16_rep() argument 177 readsw(host->ctl + (addr << host->bus_shift), buf, count); sd_ctrl_read16_rep() 180 static inline u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr) sd_ctrl_read32() argument 182 return readw(host->ctl + (addr << host->bus_shift)) | sd_ctrl_read32() 183 readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16; sd_ctrl_read32() 186 static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val) sd_ctrl_write16() argument 191 if (host->write16_hook && host->write16_hook(host, addr)) sd_ctrl_write16() 193 writew(val, host->ctl + (addr << host->bus_shift)); sd_ctrl_write16() 196 static inline void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr, sd_ctrl_write16_rep() argument 199 writesw(host->ctl + (addr << host->bus_shift), buf, count); sd_ctrl_write16_rep() 202 static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val) sd_ctrl_write32() argument 204 writew(val, host->ctl + (addr << host->bus_shift)); sd_ctrl_write32() 205 writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift)); sd_ctrl_write32()
|
H A D | dw_mmc-exynos.c | 15 #include <linux/mmc/host.h> 79 static inline u8 dw_mci_exynos_get_ciu_div(struct dw_mci *host) dw_mci_exynos_get_ciu_div() argument 81 struct dw_mci_exynos_priv_data *priv = host->priv; dw_mci_exynos_get_ciu_div() 89 return SDMMC_CLKSEL_GET_DIV(mci_readl(host, CLKSEL64)) + 1; dw_mci_exynos_get_ciu_div() 91 return SDMMC_CLKSEL_GET_DIV(mci_readl(host, CLKSEL)) + 1; dw_mci_exynos_get_ciu_div() 94 static int dw_mci_exynos_priv_init(struct dw_mci *host) dw_mci_exynos_priv_init() argument 96 struct dw_mci_exynos_priv_data *priv = host->priv; dw_mci_exynos_priv_init() 100 mci_writel(host, MPSBEGIN0, 0); dw_mci_exynos_priv_init() 101 mci_writel(host, MPSEND0, SDMMC_ENDING_SEC_NR_MAX); dw_mci_exynos_priv_init() 102 mci_writel(host, MPSCTRL0, SDMMC_MPSCTRL_SECURE_WRITE_BIT | dw_mci_exynos_priv_init() 109 priv->saved_strobe_ctrl = mci_readl(host, HS400_DLINE_CTRL); dw_mci_exynos_priv_init() 110 priv->saved_dqs_en = mci_readl(host, HS400_DQS_EN); dw_mci_exynos_priv_init() 112 mci_writel(host, HS400_DQS_EN, priv->saved_dqs_en); dw_mci_exynos_priv_init() 121 static int dw_mci_exynos_setup_clock(struct dw_mci *host) dw_mci_exynos_setup_clock() argument 123 struct dw_mci_exynos_priv_data *priv = host->priv; dw_mci_exynos_setup_clock() 125 host->bus_hz /= (priv->ciu_div + 1); dw_mci_exynos_setup_clock() 130 static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing) dw_mci_exynos_set_clksel_timing() argument 132 struct dw_mci_exynos_priv_data *priv = host->priv; dw_mci_exynos_set_clksel_timing() 137 clksel = mci_readl(host, CLKSEL64); dw_mci_exynos_set_clksel_timing() 139 clksel = mci_readl(host, CLKSEL); dw_mci_exynos_set_clksel_timing() 145 mci_writel(host, CLKSEL64, clksel); dw_mci_exynos_set_clksel_timing() 147 mci_writel(host, CLKSEL, clksel); dw_mci_exynos_set_clksel_timing() 153 struct dw_mci *host = dev_get_drvdata(dev); dw_mci_exynos_suspend() local 155 return dw_mci_suspend(host); dw_mci_exynos_suspend() 160 struct dw_mci *host = dev_get_drvdata(dev); dw_mci_exynos_resume() local 162 dw_mci_exynos_priv_init(host); dw_mci_exynos_resume() 163 return dw_mci_resume(host); dw_mci_exynos_resume() 179 struct dw_mci *host = dev_get_drvdata(dev); dw_mci_exynos_resume_noirq() local 180 struct dw_mci_exynos_priv_data *priv = host->priv; dw_mci_exynos_resume_noirq() 185 clksel = mci_readl(host, CLKSEL64); dw_mci_exynos_resume_noirq() 187 clksel = mci_readl(host, CLKSEL); dw_mci_exynos_resume_noirq() 192 mci_writel(host, CLKSEL64, clksel); dw_mci_exynos_resume_noirq() 194 mci_writel(host, CLKSEL, clksel); dw_mci_exynos_resume_noirq() 205 static void dw_mci_exynos_prepare_command(struct dw_mci *host, u32 *cmdr) dw_mci_exynos_prepare_command() argument 207 struct dw_mci_exynos_priv_data *priv = host->priv; dw_mci_exynos_prepare_command() 217 if (SDMMC_CLKSEL_GET_DRV_WD3(mci_readl(host, CLKSEL64))) dw_mci_exynos_prepare_command() 220 if (SDMMC_CLKSEL_GET_DRV_WD3(mci_readl(host, CLKSEL))) dw_mci_exynos_prepare_command() 225 static void dw_mci_exynos_config_hs400(struct dw_mci *host, u32 timing) dw_mci_exynos_config_hs400() argument 227 struct dw_mci_exynos_priv_data *priv = host->priv; dw_mci_exynos_config_hs400() 247 mci_writel(host, HS400_DQS_EN, dqs); dw_mci_exynos_config_hs400() 248 mci_writel(host, HS400_DLINE_CTRL, strobe); dw_mci_exynos_config_hs400() 251 static void dw_mci_exynos_adjust_clock(struct dw_mci *host, unsigned int wanted) dw_mci_exynos_adjust_clock() argument 253 struct dw_mci_exynos_priv_data *priv = host->priv; dw_mci_exynos_adjust_clock() 261 if (!wanted || IS_ERR(host->ciu_clk)) dw_mci_exynos_adjust_clock() 271 div = dw_mci_exynos_get_ciu_div(host); dw_mci_exynos_adjust_clock() 272 ret = clk_set_rate(host->ciu_clk, wanted * div); dw_mci_exynos_adjust_clock() 274 dev_warn(host->dev, dw_mci_exynos_adjust_clock() 277 actual = clk_get_rate(host->ciu_clk); dw_mci_exynos_adjust_clock() 278 host->bus_hz = actual / div; dw_mci_exynos_adjust_clock() 280 host->current_speed = 0; dw_mci_exynos_adjust_clock() 283 static void dw_mci_exynos_set_ios(struct dw_mci *host, struct mmc_ios *ios) dw_mci_exynos_set_ios() argument 285 struct dw_mci_exynos_priv_data *priv = host->priv; dw_mci_exynos_set_ios() 307 dw_mci_exynos_set_clksel_timing(host, clksel); dw_mci_exynos_set_ios() 310 dw_mci_exynos_config_hs400(host, timing); dw_mci_exynos_set_ios() 313 dw_mci_exynos_adjust_clock(host, wanted); dw_mci_exynos_set_ios() 316 static int dw_mci_exynos_parse_dt(struct dw_mci *host) dw_mci_exynos_parse_dt() argument 319 struct device_node *np = host->dev->of_node; dw_mci_exynos_parse_dt() 325 priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL); dw_mci_exynos_parse_dt() 361 dev_dbg(host->dev, dw_mci_exynos_parse_dt() 366 host->priv = priv; dw_mci_exynos_parse_dt() 370 static inline u8 dw_mci_exynos_get_clksmpl(struct dw_mci *host) dw_mci_exynos_get_clksmpl() argument 372 struct dw_mci_exynos_priv_data *priv = host->priv; dw_mci_exynos_get_clksmpl() 376 return SDMMC_CLKSEL_CCLK_SAMPLE(mci_readl(host, CLKSEL64)); dw_mci_exynos_get_clksmpl() 378 return SDMMC_CLKSEL_CCLK_SAMPLE(mci_readl(host, CLKSEL)); dw_mci_exynos_get_clksmpl() 381 static inline void dw_mci_exynos_set_clksmpl(struct dw_mci *host, u8 sample) dw_mci_exynos_set_clksmpl() argument 384 struct dw_mci_exynos_priv_data *priv = host->priv; dw_mci_exynos_set_clksmpl() 388 clksel = mci_readl(host, CLKSEL64); dw_mci_exynos_set_clksmpl() 390 clksel = mci_readl(host, CLKSEL); dw_mci_exynos_set_clksmpl() 394 mci_writel(host, CLKSEL64, clksel); dw_mci_exynos_set_clksmpl() 396 mci_writel(host, CLKSEL, clksel); dw_mci_exynos_set_clksmpl() 399 static inline u8 dw_mci_exynos_move_next_clksmpl(struct dw_mci *host) dw_mci_exynos_move_next_clksmpl() argument 401 struct dw_mci_exynos_priv_data *priv = host->priv; dw_mci_exynos_move_next_clksmpl() 407 clksel = mci_readl(host, CLKSEL64); dw_mci_exynos_move_next_clksmpl() 409 clksel = mci_readl(host, CLKSEL); dw_mci_exynos_move_next_clksmpl() 416 mci_writel(host, CLKSEL64, clksel); dw_mci_exynos_move_next_clksmpl() 418 mci_writel(host, CLKSEL, clksel); dw_mci_exynos_move_next_clksmpl() 451 struct dw_mci *host = slot->host; dw_mci_exynos_execute_tuning() local 452 struct dw_mci_exynos_priv_data *priv = host->priv; dw_mci_exynos_execute_tuning() 458 start_smpl = dw_mci_exynos_get_clksmpl(host); dw_mci_exynos_execute_tuning() 461 mci_writel(host, TMOUT, ~0); dw_mci_exynos_execute_tuning() 462 smpl = dw_mci_exynos_move_next_clksmpl(host); dw_mci_exynos_execute_tuning() 471 dw_mci_exynos_set_clksmpl(host, found); dw_mci_exynos_execute_tuning() 480 static int dw_mci_exynos_prepare_hs400_tuning(struct dw_mci *host, dw_mci_exynos_prepare_hs400_tuning() argument 483 struct dw_mci_exynos_priv_data *priv = host->priv; dw_mci_exynos_prepare_hs400_tuning() 485 dw_mci_exynos_set_clksel_timing(host, priv->hs400_timing); dw_mci_exynos_prepare_hs400_tuning() 486 dw_mci_exynos_adjust_clock(host, (ios->clock) << 1); dw_mci_exynos_prepare_hs400_tuning()
|
H A D | sdhci-acpi.c | 40 #include <linux/mmc/host.h> 74 struct sdhci_host *host; member in struct:sdhci_acpi_host 86 static int sdhci_acpi_enable_dma(struct sdhci_host *host) sdhci_acpi_enable_dma() argument 88 struct sdhci_acpi_host *c = sdhci_priv(host); sdhci_acpi_enable_dma() 95 if (host->flags & SDHCI_USE_64_BIT_DMA) { sdhci_acpi_enable_dma() 96 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA) { sdhci_acpi_enable_dma() 97 host->flags &= ~SDHCI_USE_64_BIT_DMA; sdhci_acpi_enable_dma() 113 static void sdhci_acpi_int_hw_reset(struct sdhci_host *host) sdhci_acpi_int_hw_reset() argument 117 reg = sdhci_readb(host, SDHCI_POWER_CONTROL); sdhci_acpi_int_hw_reset() 119 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); sdhci_acpi_int_hw_reset() 123 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); sdhci_acpi_int_hw_reset() 153 struct sdhci_host *host; sdhci_acpi_emmc_probe_slot() local 155 if (!c || !c->host) sdhci_acpi_emmc_probe_slot() 158 host = c->host; sdhci_acpi_emmc_probe_slot() 163 sdhci_readl(host, SDHCI_CAPABILITIES) == 0x446cc8b2 && sdhci_acpi_emmc_probe_slot() 164 sdhci_readl(host, SDHCI_CAPABILITIES_1) == 0x00000807) sdhci_acpi_emmc_probe_slot() 165 host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */ sdhci_acpi_emmc_probe_slot() 174 struct sdhci_host *host; sdhci_acpi_sdio_probe_slot() local 176 if (!c || !c->host) sdhci_acpi_sdio_probe_slot() 179 host = c->host; sdhci_acpi_sdio_probe_slot() 190 struct sdhci_host *host; sdhci_acpi_sd_probe_slot() local 192 if (!c || !c->host || !c->slot) sdhci_acpi_sd_probe_slot() 195 host = c->host; sdhci_acpi_sd_probe_slot() 288 struct sdhci_host *host; sdhci_acpi_probe() local 315 host = sdhci_alloc_host(dev, sizeof(struct sdhci_acpi_host)); sdhci_acpi_probe() 316 if (IS_ERR(host)) sdhci_acpi_probe() 317 return PTR_ERR(host); sdhci_acpi_probe() 319 c = sdhci_priv(host); sdhci_acpi_probe() 320 c->host = host; sdhci_acpi_probe() 327 host->hw_name = "ACPI"; sdhci_acpi_probe() 328 host->ops = &sdhci_acpi_ops_dflt; sdhci_acpi_probe() 329 host->irq = platform_get_irq(pdev, 0); sdhci_acpi_probe() 331 host->ioaddr = devm_ioremap_nocache(dev, iomem->start, sdhci_acpi_probe() 333 if (host->ioaddr == NULL) { sdhci_acpi_probe() 345 host->ops = c->slot->chip->ops; sdhci_acpi_probe() 346 host->quirks |= c->slot->chip->quirks; sdhci_acpi_probe() 347 host->quirks2 |= c->slot->chip->quirks2; sdhci_acpi_probe() 348 host->mmc->caps |= c->slot->chip->caps; sdhci_acpi_probe() 349 host->mmc->caps2 |= c->slot->chip->caps2; sdhci_acpi_probe() 350 host->mmc->pm_caps |= c->slot->chip->pm_caps; sdhci_acpi_probe() 352 host->quirks |= c->slot->quirks; sdhci_acpi_probe() 353 host->quirks2 |= c->slot->quirks2; sdhci_acpi_probe() 354 host->mmc->caps |= c->slot->caps; sdhci_acpi_probe() 355 host->mmc->caps2 |= c->slot->caps2; sdhci_acpi_probe() 356 host->mmc->pm_caps |= c->slot->pm_caps; sdhci_acpi_probe() 359 host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP; sdhci_acpi_probe() 364 if (mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0, NULL)) { sdhci_acpi_probe() 370 err = sdhci_add_host(host); sdhci_acpi_probe() 385 sdhci_free_host(c->host); sdhci_acpi_probe() 404 dead = (sdhci_readl(c->host, SDHCI_INT_STATUS) == ~0); sdhci_acpi_remove() 405 sdhci_remove_host(c->host, dead); sdhci_acpi_remove() 406 sdhci_free_host(c->host); sdhci_acpi_remove() 417 return sdhci_suspend_host(c->host); sdhci_acpi_suspend() 424 return sdhci_resume_host(c->host); sdhci_acpi_resume() 440 return sdhci_runtime_suspend_host(c->host); sdhci_acpi_runtime_suspend() 447 return sdhci_runtime_resume_host(c->host); sdhci_acpi_runtime_resume()
|
H A D | sdricoh_cs.c | 38 #include <linux/mmc/host.h> 102 static inline unsigned int sdricoh_readl(struct sdricoh_host *host, sdricoh_readl() argument 105 unsigned int value = readl(host->iobase + reg); sdricoh_readl() 106 dev_vdbg(host->dev, "rl %x 0x%x\n", reg, value); sdricoh_readl() 110 static inline void sdricoh_writel(struct sdricoh_host *host, unsigned int reg, sdricoh_writel() argument 113 writel(value, host->iobase + reg); sdricoh_writel() 114 dev_vdbg(host->dev, "wl %x 0x%x\n", reg, value); sdricoh_writel() 118 static inline unsigned int sdricoh_readw(struct sdricoh_host *host, sdricoh_readw() argument 121 unsigned int value = readw(host->iobase + reg); sdricoh_readw() 122 dev_vdbg(host->dev, "rb %x 0x%x\n", reg, value); sdricoh_readw() 126 static inline void sdricoh_writew(struct sdricoh_host *host, unsigned int reg, sdricoh_writew() argument 129 writew(value, host->iobase + reg); sdricoh_writew() 130 dev_vdbg(host->dev, "ww %x 0x%x\n", reg, value); sdricoh_writew() 133 static inline unsigned int sdricoh_readb(struct sdricoh_host *host, sdricoh_readb() argument 136 unsigned int value = readb(host->iobase + reg); sdricoh_readb() 137 dev_vdbg(host->dev, "rb %x 0x%x\n", reg, value); sdricoh_readb() 141 static int sdricoh_query_status(struct sdricoh_host *host, unsigned int wanted, sdricoh_query_status() argument 145 struct device *dev = host->dev; sdricoh_query_status() 147 status = sdricoh_readl(host, R21C_STATUS); sdricoh_query_status() 148 sdricoh_writel(host, R2E4_STATUS_RESP, status); sdricoh_query_status() 167 static int sdricoh_mmc_cmd(struct sdricoh_host *host, unsigned char opcode, sdricoh_mmc_cmd() argument 174 sdricoh_writel(host, R21C_STATUS, 0x18); sdricoh_mmc_cmd() 176 sdricoh_writel(host, R204_CMD_ARG, arg); sdricoh_mmc_cmd() 177 sdricoh_writel(host, R200_CMD, (0x10000 << 8) | opcode); sdricoh_mmc_cmd() 181 status = sdricoh_readl(host, R21C_STATUS); sdricoh_mmc_cmd() 182 sdricoh_writel(host, R2E4_STATUS_RESP, status); sdricoh_mmc_cmd() 198 static int sdricoh_reset(struct sdricoh_host *host) sdricoh_reset() argument 200 dev_dbg(host->dev, "reset\n"); sdricoh_reset() 201 sdricoh_writel(host, R2F0_RESET, 0x10001); sdricoh_reset() 202 sdricoh_writel(host, R2E0_INIT, 0x10000); sdricoh_reset() 203 if (sdricoh_readl(host, R2E0_INIT) != 0x10000) sdricoh_reset() 205 sdricoh_writel(host, R2E0_INIT, 0x10007); sdricoh_reset() 207 sdricoh_writel(host, R224_MODE, 0x2000000); sdricoh_reset() 208 sdricoh_writel(host, R228_POWER, 0xe0); sdricoh_reset() 212 sdricoh_writel(host, R21C_STATUS, 0x18); sdricoh_reset() 217 static int sdricoh_blockio(struct sdricoh_host *host, int read, sdricoh_blockio() argument 224 if (sdricoh_query_status(host, STATUS_READY_TO_READ, sdricoh_blockio() 227 sdricoh_writel(host, R21C_STATUS, 0x18); sdricoh_blockio() 230 data = sdricoh_readl(host, R230_DATA); sdricoh_blockio() 241 if (sdricoh_query_status(host, STATUS_READY_TO_WRITE, sdricoh_blockio() 244 sdricoh_writel(host, R21C_STATUS, 0x18); sdricoh_blockio() 255 sdricoh_writel(host, R230_DATA, data); sdricoh_blockio() 267 struct sdricoh_host *host = mmc_priv(mmc); sdricoh_request() local 270 struct device *dev = host->dev; sdricoh_request() 277 sdricoh_writel(host, R21C_STATUS, 0x18); sdricoh_request() 280 if (host->app_cmd) { sdricoh_request() 282 host->app_cmd = 0; sdricoh_request() 284 host->app_cmd = 1; sdricoh_request() 288 sdricoh_writew(host, R226_BLOCKSIZE, data->blksz); sdricoh_request() 289 sdricoh_writel(host, R208_DATAIO, 0); sdricoh_request() 292 cmd->error = sdricoh_mmc_cmd(host, opcode, cmd->arg); sdricoh_request() 300 sdricoh_readl(host, sdricoh_request() 304 sdricoh_readb(host, R20C_RESP + sdricoh_request() 308 cmd->resp[0] = sdricoh_readl(host, R20C_RESP); sdricoh_request() 318 sdricoh_writel(host, R21C_STATUS, 0x837f031e); sdricoh_request() 328 sdricoh_blockio(host, sdricoh_request() 341 sdricoh_writel(host, R208_DATAIO, 1); sdricoh_request() 343 if (sdricoh_query_status(host, STATUS_TRANSFER_FINISHED, sdricoh_request() 357 struct sdricoh_host *host = mmc_priv(mmc); sdricoh_set_ios() local 358 dev_dbg(host->dev, "set_ios\n"); sdricoh_set_ios() 361 sdricoh_writel(host, R228_POWER, 0xc0e0); sdricoh_set_ios() 364 sdricoh_writel(host, R224_MODE, 0x2000300); sdricoh_set_ios() 365 sdricoh_writel(host, R228_POWER, 0x40e0); sdricoh_set_ios() 367 sdricoh_writel(host, R224_MODE, 0x2000340); sdricoh_set_ios() 371 sdricoh_writel(host, R224_MODE, 0x2000320); sdricoh_set_ios() 372 sdricoh_writel(host, R228_POWER, 0xe0); sdricoh_set_ios() 378 struct sdricoh_host *host = mmc_priv(mmc); sdricoh_get_ro() local 381 status = sdricoh_readl(host, R21C_STATUS); sdricoh_get_ro() 382 sdricoh_writel(host, R2E4_STATUS_RESP, status); sdricoh_get_ro() 404 struct sdricoh_host *host = NULL; sdricoh_init_mmc() local 432 host = mmc_priv(mmc); sdricoh_init_mmc() 434 host->iobase = iobase; sdricoh_init_mmc() 435 host->dev = dev; sdricoh_init_mmc() 436 host->pci_dev = pci_dev; sdricoh_init_mmc() 451 if (sdricoh_reset(host)) { sdricoh_init_mmc() 461 dev_dbg(dev, "mmc host registered\n"); sdricoh_init_mmc() 504 /* remove mmc host */ sdricoh_pcmcia_detach() 506 struct sdricoh_host *host = mmc_priv(mmc); sdricoh_pcmcia_detach() local 508 pci_iounmap(host->pci_dev, host->iobase); sdricoh_pcmcia_detach() 509 pci_dev_put(host->pci_dev); sdricoh_pcmcia_detach()
|
H A D | sdhci-cns3xxx.c | 17 #include <linux/mmc/host.h> 21 static unsigned int sdhci_cns3xxx_get_max_clk(struct sdhci_host *host) sdhci_cns3xxx_get_max_clk() argument 26 static void sdhci_cns3xxx_set_clock(struct sdhci_host *host, unsigned int clock) sdhci_cns3xxx_set_clock() argument 28 struct device *dev = mmc_dev(host->mmc); sdhci_cns3xxx_set_clock() 33 host->mmc->actual_clock = 0; sdhci_cns3xxx_set_clock() 35 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); sdhci_cns3xxx_set_clock() 40 while (host->max_clk / div > clock) { sdhci_cns3xxx_set_clock() 54 clock, host->max_clk / div); sdhci_cns3xxx_set_clock() 62 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); sdhci_cns3xxx_set_clock() 65 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL)) sdhci_cns3xxx_set_clock() 76 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); sdhci_cns3xxx_set_clock()
|
H A D | sdhci-msm.c | 2 * drivers/mmc/host/sdhci-msm.c - Qualcomm SDHCI Platform driver 67 static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host, u8 poll) msm_dll_poll_ck_out_en() argument 71 struct mmc_host *mmc = host->mmc; msm_dll_poll_ck_out_en() 74 ck_out_en = !!(readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) & msm_dll_poll_ck_out_en() 85 ck_out_en = !!(readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) & msm_dll_poll_ck_out_en() 92 static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase) msm_config_cm_dll_phase() argument 101 struct mmc_host *mmc = host->mmc; msm_config_cm_dll_phase() 103 spin_lock_irqsave(&host->lock, flags); msm_config_cm_dll_phase() 105 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG); msm_config_cm_dll_phase() 108 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG); msm_config_cm_dll_phase() 111 rc = msm_dll_poll_ck_out_en(host, 0); msm_config_cm_dll_phase() 119 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG); msm_config_cm_dll_phase() 122 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG); msm_config_cm_dll_phase() 125 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) msm_config_cm_dll_phase() 126 | CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG); msm_config_cm_dll_phase() 129 rc = msm_dll_poll_ck_out_en(host, 1); msm_config_cm_dll_phase() 133 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG); msm_config_cm_dll_phase() 136 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG); msm_config_cm_dll_phase() 143 spin_unlock_irqrestore(&host->lock, flags); msm_config_cm_dll_phase() 157 static int msm_find_most_appropriate_phase(struct sdhci_host *host, msm_find_most_appropriate_phase() argument 166 struct mmc_host *mmc = host->mmc; msm_find_most_appropriate_phase() 259 static inline void msm_cm_dll_set_freq(struct sdhci_host *host) msm_cm_dll_set_freq() argument 264 if (host->clock <= 112000000) msm_cm_dll_set_freq() 266 else if (host->clock <= 125000000) msm_cm_dll_set_freq() 268 else if (host->clock <= 137000000) msm_cm_dll_set_freq() 270 else if (host->clock <= 150000000) msm_cm_dll_set_freq() 272 else if (host->clock <= 162000000) msm_cm_dll_set_freq() 274 else if (host->clock <= 175000000) msm_cm_dll_set_freq() 276 else if (host->clock <= 187000000) msm_cm_dll_set_freq() 278 else if (host->clock <= 200000000) msm_cm_dll_set_freq() 281 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG); msm_cm_dll_set_freq() 284 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG); msm_cm_dll_set_freq() 288 static int msm_init_cm_dll(struct sdhci_host *host) msm_init_cm_dll() argument 290 struct mmc_host *mmc = host->mmc; msm_init_cm_dll() 294 spin_lock_irqsave(&host->lock, flags); msm_init_cm_dll() 301 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) msm_init_cm_dll() 302 & ~CORE_CLK_PWRSAVE), host->ioaddr + CORE_VENDOR_SPEC); msm_init_cm_dll() 305 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) msm_init_cm_dll() 306 | CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG); msm_init_cm_dll() 309 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) msm_init_cm_dll() 310 | CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG); msm_init_cm_dll() 311 msm_cm_dll_set_freq(host); msm_init_cm_dll() 314 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) msm_init_cm_dll() 315 & ~CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG); msm_init_cm_dll() 318 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) msm_init_cm_dll() 319 & ~CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG); msm_init_cm_dll() 322 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) msm_init_cm_dll() 323 | CORE_DLL_EN), host->ioaddr + CORE_DLL_CONFIG); msm_init_cm_dll() 326 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) msm_init_cm_dll() 327 | CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG); msm_init_cm_dll() 330 while (!(readl_relaxed(host->ioaddr + CORE_DLL_STATUS) & msm_init_cm_dll() 336 spin_unlock_irqrestore(&host->lock, flags); msm_init_cm_dll() 342 spin_unlock_irqrestore(&host->lock, flags); msm_init_cm_dll() 346 static int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode) sdhci_msm_execute_tuning() argument 351 struct mmc_host *mmc = host->mmc; sdhci_msm_execute_tuning() 352 struct mmc_ios ios = host->mmc->ios; sdhci_msm_execute_tuning() 358 if (host->clock <= 100 * 1000 * 1000 || sdhci_msm_execute_tuning() 365 rc = msm_init_cm_dll(host); sdhci_msm_execute_tuning() 372 rc = msm_config_cm_dll_phase(host, phase); sdhci_msm_execute_tuning() 386 rc = msm_find_most_appropriate_phase(host, tuned_phases, sdhci_msm_execute_tuning() 397 rc = msm_config_cm_dll_phase(host, phase); sdhci_msm_execute_tuning() 431 struct sdhci_host *host; sdhci_msm_probe() local 445 host = sdhci_pltfm_init(pdev, &msm_host->sdhci_msm_pdata, 0); sdhci_msm_probe() 446 if (IS_ERR(host)) sdhci_msm_probe() 447 return PTR_ERR(host); sdhci_msm_probe() 449 pltfm_host = sdhci_priv(host); sdhci_msm_probe() 451 msm_host->mmc = host->mmc; sdhci_msm_probe() 454 ret = mmc_of_parse(host->mmc); sdhci_msm_probe() 520 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; sdhci_msm_probe() 521 host->quirks |= SDHCI_QUIRK_SINGLE_POWER_WRITE; sdhci_msm_probe() 523 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION)); sdhci_msm_probe() 540 caps = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES); sdhci_msm_probe() 542 writel_relaxed(caps, host->ioaddr + sdhci_msm_probe() 546 ret = sdhci_add_host(host); sdhci_msm_probe() 566 struct sdhci_host *host = platform_get_drvdata(pdev); sdhci_msm_remove() local 567 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); sdhci_msm_remove() 569 int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == sdhci_msm_remove() 572 sdhci_remove_host(host, dead); sdhci_msm_remove()
|
H A D | sdhci-s3c.c | 1 /* linux/drivers/mmc/host/sdhci-s3c.c 30 #include <linux/mmc/host.h> 39 * @host: The SDHCI host created 48 struct sdhci_host *host; member in struct:sdhci_s3c 65 * @sdhci_quirks: sdhci host specific quirks. 76 static inline struct sdhci_s3c *to_s3c(struct sdhci_host *host) to_s3c() argument 78 return sdhci_priv(host); to_s3c() 83 * @host: The SDHCI host instance. 87 static unsigned int sdhci_s3c_get_max_clk(struct sdhci_host *host) sdhci_s3c_get_max_clk() argument 89 struct sdhci_s3c *ourhost = to_s3c(host); sdhci_s3c_get_max_clk() 150 * @host: The SDHCI host being changed 156 static void sdhci_s3c_set_clock(struct sdhci_host *host, unsigned int clock) sdhci_s3c_set_clock() argument 158 struct sdhci_s3c *ourhost = to_s3c(host); sdhci_s3c_set_clock() 165 host->mmc->actual_clock = 0; sdhci_s3c_set_clock() 169 sdhci_set_clock(host, clock); sdhci_s3c_set_clock() 195 host->max_clk = ourhost->clk_rates[best_src]; sdhci_s3c_set_clock() 199 writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL); sdhci_s3c_set_clock() 201 ctrl = readl(host->ioaddr + S3C_SDHCI_CONTROL2); sdhci_s3c_set_clock() 204 writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL2); sdhci_s3c_set_clock() 208 host->ioaddr + S3C64XX_SDHCI_CONTROL4); sdhci_s3c_set_clock() 210 ctrl = readl(host->ioaddr + S3C_SDHCI_CONTROL2); sdhci_s3c_set_clock() 216 writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL2); sdhci_s3c_set_clock() 222 writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL3); sdhci_s3c_set_clock() 224 sdhci_set_clock(host, clock); sdhci_s3c_set_clock() 229 * @host: The SDHCI host being queried 231 * To init mmc host properly a minimal clock value is needed. For high system 236 static unsigned int sdhci_s3c_get_min_clock(struct sdhci_host *host) sdhci_s3c_get_min_clock() argument 238 struct sdhci_s3c *ourhost = to_s3c(host); sdhci_s3c_get_min_clock() 254 static unsigned int sdhci_cmu_get_max_clock(struct sdhci_host *host) sdhci_cmu_get_max_clock() argument 256 struct sdhci_s3c *ourhost = to_s3c(host); sdhci_cmu_get_max_clock() 276 static unsigned int sdhci_cmu_get_min_clock(struct sdhci_host *host) sdhci_cmu_get_min_clock() argument 278 struct sdhci_s3c *ourhost = to_s3c(host); sdhci_cmu_get_min_clock() 298 static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock) sdhci_cmu_set_clock() argument 300 struct sdhci_s3c *ourhost = to_s3c(host); sdhci_cmu_set_clock() 306 host->mmc->actual_clock = 0; sdhci_cmu_set_clock() 310 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); sdhci_cmu_set_clock() 314 sdhci_s3c_set_clock(host, clock); sdhci_cmu_set_clock() 317 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); sdhci_cmu_set_clock() 319 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); sdhci_cmu_set_clock() 321 spin_unlock_irq(&host->lock); sdhci_cmu_set_clock() 323 spin_lock_irq(&host->lock); sdhci_cmu_set_clock() 326 mmc_hostname(host->mmc), clock); sdhci_cmu_set_clock() 331 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); sdhci_cmu_set_clock() 335 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL)) sdhci_cmu_set_clock() 339 mmc_hostname(host->mmc)); sdhci_cmu_set_clock() 347 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); sdhci_cmu_set_clock() 352 * @host: The SDHCI host being queried 358 static void sdhci_s3c_set_bus_width(struct sdhci_host *host, int width) sdhci_s3c_set_bus_width() argument 362 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); sdhci_s3c_set_bus_width() 379 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); sdhci_s3c_set_bus_width() 393 struct sdhci_host *host, struct s3c_sdhci_platdata *pdata) sdhci_s3c_parse_dt() 423 struct sdhci_host *host, struct s3c_sdhci_platdata *pdata) sdhci_s3c_parse_dt() 450 struct sdhci_host *host; sdhci_s3c_probe() local 466 host = sdhci_alloc_host(dev, sizeof(struct sdhci_s3c)); sdhci_s3c_probe() 467 if (IS_ERR(host)) { sdhci_s3c_probe() 469 return PTR_ERR(host); sdhci_s3c_probe() 471 sc = sdhci_priv(host); sdhci_s3c_probe() 480 ret = sdhci_s3c_parse_dt(&pdev->dev, host, pdata); sdhci_s3c_probe() 490 sc->host = host; sdhci_s3c_probe() 495 platform_set_drvdata(pdev, host); sdhci_s3c_probe() 529 host->ioaddr = devm_ioremap_resource(&pdev->dev, res); sdhci_s3c_probe() 530 if (IS_ERR(host->ioaddr)) { sdhci_s3c_probe() 531 ret = PTR_ERR(host->ioaddr); sdhci_s3c_probe() 539 host->hw_name = "samsung-hsmmc"; sdhci_s3c_probe() 540 host->ops = &sdhci_s3c_ops; sdhci_s3c_probe() 541 host->quirks = 0; sdhci_s3c_probe() 542 host->quirks2 = 0; sdhci_s3c_probe() 543 host->irq = irq; sdhci_s3c_probe() 546 host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC; sdhci_s3c_probe() 547 host->quirks |= SDHCI_QUIRK_NO_HISPD_BIT; sdhci_s3c_probe() 549 host->quirks |= drv_data->sdhci_quirks; sdhci_s3c_probe() 557 host->quirks |= SDHCI_QUIRK_BROKEN_DMA; sdhci_s3c_probe() 564 host->quirks |= SDHCI_QUIRK_NO_BUSY_IRQ; sdhci_s3c_probe() 566 /* This host supports the Auto CMD12 */ sdhci_s3c_probe() 567 host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12; sdhci_s3c_probe() 570 host->quirks |= SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC; sdhci_s3c_probe() 574 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; sdhci_s3c_probe() 577 host->mmc->caps = MMC_CAP_NONREMOVABLE; sdhci_s3c_probe() 581 host->mmc->caps |= MMC_CAP_8_BIT_DATA; sdhci_s3c_probe() 583 host->mmc->caps |= MMC_CAP_4_BIT_DATA; sdhci_s3c_probe() 588 host->mmc->pm_caps |= pdata->pm_caps; sdhci_s3c_probe() 590 host->quirks |= (SDHCI_QUIRK_32BIT_DMA_ADDR | sdhci_s3c_probe() 594 host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK; sdhci_s3c_probe() 606 /* It supports additional host capabilities if needed */ sdhci_s3c_probe() 608 host->mmc->caps |= pdata->host_caps; sdhci_s3c_probe() 611 host->mmc->caps2 |= pdata->host_caps2; sdhci_s3c_probe() 618 ret = mmc_of_parse(host->mmc); sdhci_s3c_probe() 622 ret = sdhci_add_host(host); sdhci_s3c_probe() 641 sdhci_free_host(host); sdhci_s3c_probe() 648 struct sdhci_host *host = platform_get_drvdata(pdev); sdhci_s3c_remove() local 649 struct sdhci_s3c *sc = sdhci_priv(host); sdhci_s3c_remove() 658 sdhci_remove_host(host, 1); sdhci_s3c_remove() 665 sdhci_free_host(host); sdhci_s3c_remove() 673 struct sdhci_host *host = dev_get_drvdata(dev); sdhci_s3c_suspend() local 675 return sdhci_suspend_host(host); sdhci_s3c_suspend() 680 struct sdhci_host *host = dev_get_drvdata(dev); sdhci_s3c_resume() local 682 return sdhci_resume_host(host); sdhci_s3c_resume() 689 struct sdhci_host *host = dev_get_drvdata(dev); sdhci_s3c_runtime_suspend() local 690 struct sdhci_s3c *ourhost = to_s3c(host); sdhci_s3c_runtime_suspend() 694 ret = sdhci_runtime_suspend_host(host); sdhci_s3c_runtime_suspend() 704 struct sdhci_host *host = dev_get_drvdata(dev); sdhci_s3c_runtime_resume() local 705 struct sdhci_s3c *ourhost = to_s3c(host); sdhci_s3c_runtime_resume() 712 ret = sdhci_runtime_resume_host(host); sdhci_s3c_runtime_resume() 392 sdhci_s3c_parse_dt(struct device *dev, struct sdhci_host *host, struct s3c_sdhci_platdata *pdata) sdhci_s3c_parse_dt() argument 422 sdhci_s3c_parse_dt(struct device *dev, struct sdhci_host *host, struct s3c_sdhci_platdata *pdata) sdhci_s3c_parse_dt() argument
|
H A D | sdhci-esdhc-imx.c | 21 #include <linux/mmc/host.h> 211 static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg) esdhc_clrset_le() argument 213 void __iomem *base = host->ioaddr + (reg & ~0x3); esdhc_clrset_le() 219 static u32 esdhc_readl_le(struct sdhci_host *host, int reg) esdhc_readl_le() argument 221 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); esdhc_readl_le() 223 u32 val = readl(host->ioaddr + reg); esdhc_readl_le() 256 val = readl(host->ioaddr + SDHCI_CAPABILITIES) & 0xFFFF; esdhc_readl_le() 285 writel(SDHCI_INT_RESPONSE, host->ioaddr + esdhc_readl_le() 294 static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) esdhc_writel_le() argument 296 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); esdhc_writel_le() 310 data = readl(host->ioaddr + SDHCI_HOST_CONTROL); esdhc_writel_le() 312 writel(data, host->ioaddr + SDHCI_HOST_CONTROL); esdhc_writel_le() 314 writel(data, host->ioaddr + SDHCI_HOST_CONTROL); esdhc_writel_le() 322 v = readl(host->ioaddr + ESDHC_VENDOR_SPEC); esdhc_writel_le() 324 writel(v, host->ioaddr + ESDHC_VENDOR_SPEC); esdhc_writel_le() 331 writel(data, host->ioaddr + SDHCI_TRANSFER_MODE); esdhc_writel_le() 343 writel(val, host->ioaddr + reg); esdhc_writel_le() 346 static u16 esdhc_readw_le(struct sdhci_host *host, int reg) esdhc_readw_le() argument 348 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); esdhc_readw_le() 357 * The usdhc register returns a wrong host version. esdhc_readw_le() 365 val = readl(host->ioaddr + ESDHC_VENDOR_SPEC); esdhc_readw_le() 371 val = readl(host->ioaddr + ESDHC_MIX_CTRL); esdhc_readw_le() 374 val = readl(host->ioaddr + SDHCI_ACMD12_ERR); esdhc_readw_le() 389 u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL); esdhc_readw_le() 397 ret = readw(host->ioaddr + SDHCI_TRANSFER_MODE); esdhc_readw_le() 403 return readw(host->ioaddr + reg); esdhc_readw_le() 406 static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) esdhc_writew_le() argument 408 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); esdhc_writew_le() 414 new_val = readl(host->ioaddr + ESDHC_VENDOR_SPEC); esdhc_writew_le() 419 writel(new_val, host->ioaddr + ESDHC_VENDOR_SPEC); esdhc_writew_le() 422 new_val = readl(host->ioaddr + ESDHC_VENDOR_SPEC); esdhc_writew_le() 427 writel(new_val, host->ioaddr + ESDHC_VENDOR_SPEC); esdhc_writew_le() 429 new_val = readl(host->ioaddr + ESDHC_MIX_CTRL); esdhc_writew_le() 434 writel(new_val , host->ioaddr + ESDHC_MIX_CTRL); esdhc_writew_le() 436 u32 v = readl(host->ioaddr + SDHCI_ACMD12_ERR); esdhc_writew_le() 437 u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL); esdhc_writew_le() 452 writel(v, host->ioaddr + SDHCI_ACMD12_ERR); esdhc_writew_le() 453 writel(m, host->ioaddr + ESDHC_MIX_CTRL); esdhc_writew_le() 458 && (host->cmd->opcode == SD_IO_RW_EXTENDED) esdhc_writew_le() 459 && (host->cmd->data->blocks > 1) esdhc_writew_le() 460 && (host->cmd->data->flags & MMC_DATA_READ)) { esdhc_writew_le() 462 v = readl(host->ioaddr + ESDHC_VENDOR_SPEC); esdhc_writew_le() 464 writel(v, host->ioaddr + ESDHC_VENDOR_SPEC); esdhc_writew_le() 468 u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL); esdhc_writew_le() 475 writel(m, host->ioaddr + ESDHC_MIX_CTRL); esdhc_writew_le() 485 if (host->cmd->opcode == MMC_STOP_TRANSMISSION) esdhc_writew_le() 488 if ((host->cmd->opcode == MMC_SET_BLOCK_COUNT) && esdhc_writew_le() 494 host->ioaddr + SDHCI_TRANSFER_MODE); esdhc_writew_le() 497 host->ioaddr + SDHCI_TRANSFER_MODE); esdhc_writew_le() 503 esdhc_clrset_le(host, 0xffff, val, reg); esdhc_writew_le() 506 static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg) esdhc_writeb_le() argument 508 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); esdhc_writeb_le() 539 esdhc_clrset_le(host, mask, new_val, reg); esdhc_writeb_le() 542 esdhc_clrset_le(host, 0xff, val, reg); esdhc_writeb_le() 553 esdhc_clrset_le(host, 0x7, 0x7, ESDHC_SYSTEM_CONTROL); esdhc_writeb_le() 560 new_val = readl(host->ioaddr + ESDHC_MIX_CTRL); esdhc_writeb_le() 562 host->ioaddr + ESDHC_MIX_CTRL); esdhc_writeb_le() 568 static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host) esdhc_pltfm_get_max_clock() argument 570 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); esdhc_pltfm_get_max_clock() 580 static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host) esdhc_pltfm_get_min_clock() argument 582 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); esdhc_pltfm_get_min_clock() 587 static inline void esdhc_pltfm_set_clock(struct sdhci_host *host, esdhc_pltfm_set_clock() argument 590 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); esdhc_pltfm_set_clock() 598 host->mmc->actual_clock = 0; esdhc_pltfm_set_clock() 601 val = readl(host->ioaddr + ESDHC_VENDOR_SPEC); esdhc_pltfm_set_clock() 603 host->ioaddr + ESDHC_VENDOR_SPEC); esdhc_pltfm_set_clock() 611 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); esdhc_pltfm_set_clock() 614 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); esdhc_pltfm_set_clock() 622 host->mmc->actual_clock = host_clock / pre_div / div; esdhc_pltfm_set_clock() 623 dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n", esdhc_pltfm_set_clock() 624 clock, host->mmc->actual_clock); esdhc_pltfm_set_clock() 632 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); esdhc_pltfm_set_clock() 636 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); esdhc_pltfm_set_clock() 639 val = readl(host->ioaddr + ESDHC_VENDOR_SPEC); esdhc_pltfm_set_clock() 641 host->ioaddr + ESDHC_VENDOR_SPEC); esdhc_pltfm_set_clock() 647 static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host) esdhc_pltfm_get_ro() argument 649 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); esdhc_pltfm_get_ro() 655 return mmc_gpio_get_ro(host->mmc); esdhc_pltfm_get_ro() 657 return !(readl(host->ioaddr + SDHCI_PRESENT_STATE) & esdhc_pltfm_get_ro() 666 static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width) esdhc_pltfm_set_bus_width() argument 682 esdhc_clrset_le(host, ESDHC_CTRL_BUSWIDTH_MASK, ctrl, esdhc_pltfm_set_bus_width() 686 static void esdhc_prepare_tuning(struct sdhci_host *host, u32 val) esdhc_prepare_tuning() argument 693 reg = readl(host->ioaddr + ESDHC_MIX_CTRL); esdhc_prepare_tuning() 696 writel(reg, host->ioaddr + ESDHC_MIX_CTRL); esdhc_prepare_tuning() 697 writel(val << 8, host->ioaddr + ESDHC_TUNE_CTRL_STATUS); esdhc_prepare_tuning() 698 dev_dbg(mmc_dev(host->mmc), esdhc_prepare_tuning() 700 val, readl(host->ioaddr + ESDHC_TUNE_CTRL_STATUS)); esdhc_prepare_tuning() 703 static void esdhc_post_tuning(struct sdhci_host *host) esdhc_post_tuning() argument 707 reg = readl(host->ioaddr + ESDHC_MIX_CTRL); esdhc_post_tuning() 709 writel(reg, host->ioaddr + ESDHC_MIX_CTRL); esdhc_post_tuning() 712 static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode) esdhc_executing_tuning() argument 719 esdhc_prepare_tuning(host, min); esdhc_executing_tuning() 720 if (!mmc_send_tuning(host->mmc)) esdhc_executing_tuning() 728 esdhc_prepare_tuning(host, max); esdhc_executing_tuning() 729 if (mmc_send_tuning(host->mmc)) { esdhc_executing_tuning() 738 esdhc_prepare_tuning(host, avg); esdhc_executing_tuning() 739 ret = mmc_send_tuning(host->mmc); esdhc_executing_tuning() 740 esdhc_post_tuning(host); esdhc_executing_tuning() 742 dev_dbg(mmc_dev(host->mmc), "tunning %s at 0x%x ret %d\n", esdhc_executing_tuning() 748 static int esdhc_change_pinstate(struct sdhci_host *host, esdhc_change_pinstate() argument 751 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); esdhc_change_pinstate() 755 dev_dbg(mmc_dev(host->mmc), "change pinctrl state for uhs %d\n", uhs); esdhc_change_pinstate() 779 static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing) esdhc_set_uhs_signaling() argument 781 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); esdhc_set_uhs_signaling() 794 writel(readl(host->ioaddr + ESDHC_MIX_CTRL) | esdhc_set_uhs_signaling() 796 host->ioaddr + ESDHC_MIX_CTRL); esdhc_set_uhs_signaling() 805 writel(v, host->ioaddr + ESDHC_DLL_CTRL); esdhc_set_uhs_signaling() 810 esdhc_change_pinstate(host, timing); esdhc_set_uhs_signaling() 813 static void esdhc_reset(struct sdhci_host *host, u8 mask) esdhc_reset() argument 815 sdhci_reset(host, mask); esdhc_reset() 817 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); esdhc_reset() 818 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); esdhc_reset() 821 static unsigned int esdhc_get_max_timeout_count(struct sdhci_host *host) esdhc_get_max_timeout_count() argument 823 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); esdhc_get_max_timeout_count() 829 static void esdhc_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) esdhc_set_timeout() argument 831 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); esdhc_set_timeout() 835 sdhci_writeb(host, esdhc_is_usdhc(imx_data) ? 0xF : 0xE, esdhc_set_timeout() 867 struct sdhci_host *host, sdhci_esdhc_imx_probe_dt() 905 mmc_of_parse_voltage(np, &host->ocr_mask); sdhci_esdhc_imx_probe_dt() 908 ret = mmc_of_parse(host->mmc); sdhci_esdhc_imx_probe_dt() 912 if (!IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc))) sdhci_esdhc_imx_probe_dt() 913 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; sdhci_esdhc_imx_probe_dt() 920 struct sdhci_host *host, sdhci_esdhc_imx_probe_dt() 932 struct sdhci_host *host; sdhci_esdhc_imx_probe() local 938 host = sdhci_pltfm_init(pdev, &sdhci_esdhc_imx_pdata, 0); sdhci_esdhc_imx_probe() 939 if (IS_ERR(host)) sdhci_esdhc_imx_probe() 940 return PTR_ERR(host); sdhci_esdhc_imx_probe() 942 pltfm_host = sdhci_priv(host); sdhci_esdhc_imx_probe() 987 dev_warn(mmc_dev(host->mmc), "could not get default state\n"); sdhci_esdhc_imx_probe() 989 host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; sdhci_esdhc_imx_probe() 993 host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK sdhci_esdhc_imx_probe() 1001 writel(0x08100810, host->ioaddr + ESDHC_WTMK_LVL); sdhci_esdhc_imx_probe() 1002 host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN; sdhci_esdhc_imx_probe() 1003 host->mmc->caps |= MMC_CAP_1_8V_DDR; sdhci_esdhc_imx_probe() 1011 writel(readl(host->ioaddr + ESDHC_TUNING_CTRL) | sdhci_esdhc_imx_probe() 1013 host->ioaddr + ESDHC_TUNING_CTRL); sdhci_esdhc_imx_probe() 1016 if (sdhci_esdhc_imx_probe_dt(pdev, host, boarddata) < 0) { sdhci_esdhc_imx_probe() 1017 if (!host->mmc->parent->platform_data) { sdhci_esdhc_imx_probe() 1018 dev_err(mmc_dev(host->mmc), "no board data!\n"); sdhci_esdhc_imx_probe() 1023 host->mmc->parent->platform_data); sdhci_esdhc_imx_probe() 1028 err = mmc_gpio_request_ro(host->mmc, boarddata->wp_gpio); sdhci_esdhc_imx_probe() 1030 dev_err(mmc_dev(host->mmc), sdhci_esdhc_imx_probe() 1034 host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; sdhci_esdhc_imx_probe() 1042 err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio, 0); sdhci_esdhc_imx_probe() 1044 dev_err(mmc_dev(host->mmc), sdhci_esdhc_imx_probe() 1052 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; sdhci_esdhc_imx_probe() 1056 host->mmc->caps |= MMC_CAP_NONREMOVABLE; sdhci_esdhc_imx_probe() 1065 host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA; sdhci_esdhc_imx_probe() 1068 host->mmc->caps |= MMC_CAP_4_BIT_DATA; sdhci_esdhc_imx_probe() 1072 host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA; sdhci_esdhc_imx_probe() 1085 dev_warn(mmc_dev(host->mmc), sdhci_esdhc_imx_probe() 1088 host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; sdhci_esdhc_imx_probe() 1091 host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; sdhci_esdhc_imx_probe() 1094 err = sdhci_add_host(host); sdhci_esdhc_imx_probe() 1117 struct sdhci_host *host = platform_get_drvdata(pdev); sdhci_esdhc_imx_remove() local 1118 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); sdhci_esdhc_imx_remove() 1120 int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff); sdhci_esdhc_imx_remove() 1126 sdhci_remove_host(host, dead); sdhci_esdhc_imx_remove() 1140 struct sdhci_host *host = dev_get_drvdata(dev); sdhci_esdhc_runtime_suspend() local 1141 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); sdhci_esdhc_runtime_suspend() 1145 ret = sdhci_runtime_suspend_host(host); sdhci_esdhc_runtime_suspend() 1147 if (!sdhci_sdio_irq_enabled(host)) { sdhci_esdhc_runtime_suspend() 1158 struct sdhci_host *host = dev_get_drvdata(dev); sdhci_esdhc_runtime_resume() local 1159 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); sdhci_esdhc_runtime_resume() 1162 if (!sdhci_sdio_irq_enabled(host)) { sdhci_esdhc_runtime_resume() 1168 return sdhci_runtime_resume_host(host); sdhci_esdhc_runtime_resume() 866 sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, struct sdhci_host *host, struct esdhc_platform_data *boarddata) sdhci_esdhc_imx_probe_dt() argument 919 sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, struct sdhci_host *host, struct esdhc_platform_data *boarddata) sdhci_esdhc_imx_probe_dt() argument
|
H A D | via-sdmmc.c | 2 * drivers/mmc/host/via-sdmmc.c - VIA SD/MMC Card Reader driver 17 #include <linux/mmc/host.h> 67 * INTEN : Enable SD host interrupt. 168 * CFE : Enable SD host automatic Clock FReezing 333 static void via_print_sdchc(struct via_crdr_mmc_host *host) via_print_sdchc() argument 335 void __iomem *addrbase = host->sdhc_mmiobase; via_print_sdchc() 352 static void via_print_pcictrl(struct via_crdr_mmc_host *host) via_print_pcictrl() argument 354 void __iomem *addrbase = host->pcictrl_mmiobase; via_print_pcictrl() 366 static void via_save_pcictrlreg(struct via_crdr_mmc_host *host) via_save_pcictrlreg() argument 371 pm_pcictrl_reg = &(host->pm_pcictrl_reg); via_save_pcictrlreg() 372 addrbase = host->pcictrl_mmiobase; via_save_pcictrlreg() 385 static void via_restore_pcictrlreg(struct via_crdr_mmc_host *host) via_restore_pcictrlreg() argument 390 pm_pcictrl_reg = &(host->pm_pcictrl_reg); via_restore_pcictrlreg() 391 addrbase = host->pcictrl_mmiobase; via_restore_pcictrlreg() 402 static void via_save_sdcreg(struct via_crdr_mmc_host *host) via_save_sdcreg() argument 407 pm_sdhc_reg = &(host->pm_sdhc_reg); via_save_sdcreg() 408 addrbase = host->sdhc_mmiobase; via_save_sdcreg() 422 static void via_restore_sdcreg(struct via_crdr_mmc_host *host) via_restore_sdcreg() argument 427 pm_sdhc_reg = &(host->pm_sdhc_reg); via_restore_sdcreg() 428 addrbase = host->sdhc_mmiobase; via_restore_sdcreg() 450 static void via_set_ddma(struct via_crdr_mmc_host *host, via_set_ddma() argument 462 addrbase = host->ddma_mmiobase; via_set_ddma() 471 addrbase = host->pcictrl_mmiobase; via_set_ddma() 473 dev_info(host->mmc->parent, "forcing card speed to 8MHz\n"); via_set_ddma() 478 static void via_sdc_preparedata(struct via_crdr_mmc_host *host, via_sdc_preparedata() argument 485 WARN_ON(host->data); via_sdc_preparedata() 488 BUG_ON(data->blksz > host->mmc->max_blk_size); via_sdc_preparedata() 489 BUG_ON(data->blocks > host->mmc->max_blk_count); via_sdc_preparedata() 491 host->data = data; via_sdc_preparedata() 493 count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, via_sdc_preparedata() 498 via_set_ddma(host, sg_dma_address(data->sg), sg_dma_len(data->sg), via_sdc_preparedata() 501 addrbase = host->sdhc_mmiobase; via_sdc_preparedata() 510 static void via_sdc_get_response(struct via_crdr_mmc_host *host, via_sdc_get_response() argument 513 void __iomem *addrbase = host->sdhc_mmiobase; via_sdc_get_response() 552 static void via_sdc_send_command(struct via_crdr_mmc_host *host, via_sdc_send_command() argument 559 WARN_ON(host->cmd); via_sdc_send_command() 562 mod_timer(&host->timer, jiffies + HZ); via_sdc_send_command() 563 host->cmd = cmd; via_sdc_send_command() 586 pr_err("%s: cmd->flag is not valid\n", mmc_hostname(host->mmc)); via_sdc_send_command() 593 via_sdc_preparedata(host, data); via_sdc_send_command() 613 if (cmd == host->mrq->stop) via_sdc_send_command() 618 addrbase = host->sdhc_mmiobase; via_sdc_send_command() 623 static void via_sdc_finish_data(struct via_crdr_mmc_host *host) via_sdc_finish_data() argument 627 BUG_ON(!host->data); via_sdc_finish_data() 629 data = host->data; via_sdc_finish_data() 630 host->data = NULL; via_sdc_finish_data() 637 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, via_sdc_finish_data() 642 via_sdc_send_command(host, data->stop); via_sdc_finish_data() 644 tasklet_schedule(&host->finish_tasklet); via_sdc_finish_data() 647 static void via_sdc_finish_command(struct via_crdr_mmc_host *host) via_sdc_finish_command() argument 649 via_sdc_get_response(host, host->cmd); via_sdc_finish_command() 651 host->cmd->error = 0; via_sdc_finish_command() 653 if (!host->cmd->data) via_sdc_finish_command() 654 tasklet_schedule(&host->finish_tasklet); via_sdc_finish_command() 656 host->cmd = NULL; via_sdc_finish_command() 662 struct via_crdr_mmc_host *host; via_sdc_request() local 666 host = mmc_priv(mmc); via_sdc_request() 668 spin_lock_irqsave(&host->lock, flags); via_sdc_request() 670 addrbase = host->pcictrl_mmiobase; via_sdc_request() 673 status = readw(host->sdhc_mmiobase + VIA_CRDR_SDSTATUS); via_sdc_request() 675 writew(status, host->sdhc_mmiobase + VIA_CRDR_SDSTATUS); via_sdc_request() 677 WARN_ON(host->mrq != NULL); via_sdc_request() 678 host->mrq = mrq; via_sdc_request() 680 status = readw(host->sdhc_mmiobase + VIA_CRDR_SDSTATUS); via_sdc_request() 681 if (!(status & VIA_CRDR_SDSTS_SLOTG) || host->reject) { via_sdc_request() 682 host->mrq->cmd->error = -ENOMEDIUM; via_sdc_request() 683 tasklet_schedule(&host->finish_tasklet); via_sdc_request() 685 via_sdc_send_command(host, mrq->cmd); via_sdc_request() 689 spin_unlock_irqrestore(&host->lock, flags); via_sdc_request() 692 static void via_sdc_set_power(struct via_crdr_mmc_host *host, via_sdc_set_power() argument 698 spin_lock_irqsave(&host->lock, flags); via_sdc_set_power() 700 host->power = (1 << power); via_sdc_set_power() 702 gatt = readb(host->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT); via_sdc_set_power() 703 if (host->power == MMC_VDD_165_195) via_sdc_set_power() 711 writeb(gatt, host->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT); via_sdc_set_power() 714 spin_unlock_irqrestore(&host->lock, flags); via_sdc_set_power() 716 via_pwron_sleep(host); via_sdc_set_power() 721 struct via_crdr_mmc_host *host; via_sdc_set_ios() local 727 host = mmc_priv(mmc); via_sdc_set_ios() 729 spin_lock_irqsave(&host->lock, flags); via_sdc_set_ios() 731 addrbase = host->sdhc_mmiobase; via_sdc_set_ios() 768 addrbase = host->pcictrl_mmiobase; via_sdc_set_ios() 773 spin_unlock_irqrestore(&host->lock, flags); via_sdc_set_ios() 776 via_sdc_set_power(host, ios->vdd, 1); via_sdc_set_ios() 778 via_sdc_set_power(host, ios->vdd, 0); via_sdc_set_ios() 783 struct via_crdr_mmc_host *host; via_sdc_get_ro() local 787 host = mmc_priv(mmc); via_sdc_get_ro() 789 spin_lock_irqsave(&host->lock, flags); via_sdc_get_ro() 791 status = readw(host->sdhc_mmiobase + VIA_CRDR_SDSTATUS); via_sdc_get_ro() 793 spin_unlock_irqrestore(&host->lock, flags); via_sdc_get_ro() 804 static void via_reset_pcictrl(struct via_crdr_mmc_host *host) via_reset_pcictrl() argument 809 spin_lock_irqsave(&host->lock, flags); via_reset_pcictrl() 811 via_save_pcictrlreg(host); via_reset_pcictrl() 812 via_save_sdcreg(host); via_reset_pcictrl() 814 spin_unlock_irqrestore(&host->lock, flags); via_reset_pcictrl() 817 if (host->power == MMC_VDD_165_195) via_reset_pcictrl() 821 writeb(gatt, host->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT); via_reset_pcictrl() 822 via_pwron_sleep(host); via_reset_pcictrl() 824 writeb(gatt, host->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT); via_reset_pcictrl() 827 spin_lock_irqsave(&host->lock, flags); via_reset_pcictrl() 829 via_restore_pcictrlreg(host); via_reset_pcictrl() 830 via_restore_sdcreg(host); via_reset_pcictrl() 833 spin_unlock_irqrestore(&host->lock, flags); via_reset_pcictrl() 836 static void via_sdc_cmd_isr(struct via_crdr_mmc_host *host, u16 intmask) via_sdc_cmd_isr() argument 840 if (!host->cmd) { via_sdc_cmd_isr() 843 mmc_hostname(host->mmc), intmask); via_sdc_cmd_isr() 848 host->cmd->error = -ETIMEDOUT; via_sdc_cmd_isr() 850 host->cmd->error = -EILSEQ; via_sdc_cmd_isr() 852 if (host->cmd->error) via_sdc_cmd_isr() 853 tasklet_schedule(&host->finish_tasklet); via_sdc_cmd_isr() 855 via_sdc_finish_command(host); via_sdc_cmd_isr() 858 static void via_sdc_data_isr(struct via_crdr_mmc_host *host, u16 intmask) via_sdc_data_isr() argument 863 host->data->error = -ETIMEDOUT; via_sdc_data_isr() 865 host->data->error = -EILSEQ; via_sdc_data_isr() 867 via_sdc_finish_data(host); via_sdc_data_isr() 968 struct via_crdr_mmc_host *host; via_sdc_tasklet_finish() local 972 host = (struct via_crdr_mmc_host *)param; via_sdc_tasklet_finish() 974 spin_lock_irqsave(&host->lock, flags); via_sdc_tasklet_finish() 976 del_timer(&host->timer); via_sdc_tasklet_finish() 977 mrq = host->mrq; via_sdc_tasklet_finish() 978 host->mrq = NULL; via_sdc_tasklet_finish() 979 host->cmd = NULL; via_sdc_tasklet_finish() 980 host->data = NULL; via_sdc_tasklet_finish() 982 spin_unlock_irqrestore(&host->lock, flags); via_sdc_tasklet_finish() 984 mmc_request_done(host->mmc, mrq); via_sdc_tasklet_finish() 989 struct via_crdr_mmc_host *host; via_sdc_card_detect() local 994 host = container_of(work, struct via_crdr_mmc_host, carddet_work); via_sdc_card_detect() 996 addrbase = host->ddma_mmiobase; via_sdc_card_detect() 999 spin_lock_irqsave(&host->lock, flags); via_sdc_card_detect() 1001 addrbase = host->pcictrl_mmiobase; via_sdc_card_detect() 1004 addrbase = host->sdhc_mmiobase; via_sdc_card_detect() 1007 if (host->mrq) { via_sdc_card_detect() 1009 mmc_hostname(host->mmc)); via_sdc_card_detect() 1010 host->mrq->cmd->error = -ENOMEDIUM; via_sdc_card_detect() 1011 tasklet_schedule(&host->finish_tasklet); via_sdc_card_detect() 1015 spin_unlock_irqrestore(&host->lock, flags); via_sdc_card_detect() 1017 via_reset_pcictrl(host); via_sdc_card_detect() 1019 spin_lock_irqsave(&host->lock, flags); via_sdc_card_detect() 1023 spin_unlock_irqrestore(&host->lock, flags); via_sdc_card_detect() 1025 via_print_pcictrl(host); via_sdc_card_detect() 1026 via_print_sdchc(host); via_sdc_card_detect() 1028 mmc_detect_change(host->mmc, msecs_to_jiffies(500)); via_sdc_card_detect() 1031 static void via_init_mmc_host(struct via_crdr_mmc_host *host) via_init_mmc_host() argument 1033 struct mmc_host *mmc = host->mmc; via_init_mmc_host() 1038 init_timer(&host->timer); via_init_mmc_host() 1039 host->timer.data = (unsigned long)host; via_init_mmc_host() 1040 host->timer.function = via_sdc_timeout; via_init_mmc_host() 1042 spin_lock_init(&host->lock); via_init_mmc_host() 1059 INIT_WORK(&host->carddet_work, via_sdc_card_detect); via_init_mmc_host() 1061 tasklet_init(&host->finish_tasklet, via_sdc_tasklet_finish, via_init_mmc_host() 1062 (unsigned long)host); via_init_mmc_host() 1064 addrbase = host->sdhc_mmiobase; via_init_mmc_host() 1235 static void via_init_sdc_pm(struct via_crdr_mmc_host *host) via_init_sdc_pm() argument 1242 pm_sdhcreg = &(host->pm_sdhc_reg); via_init_sdc_pm() 1243 addrbase = host->sdhc_mmiobase; via_init_sdc_pm() 1265 via_print_pcictrl(host); via_init_sdc_pm() 1266 via_print_sdchc(host); via_init_sdc_pm() 1271 struct via_crdr_mmc_host *host; via_sd_suspend() local 1273 host = pci_get_drvdata(pcidev); via_sd_suspend() 1275 via_save_pcictrlreg(host); via_sd_suspend() 1276 via_save_sdcreg(host); via_sd_suspend()
|
H A D | sh_mobile_sdhi.c | 28 #include <linux/mmc/host.h> 38 #define host_to_priv(host) container_of((host)->pdata, struct sh_mobile_sdhi, mmc_data) 91 static void sh_mobile_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width) sh_mobile_sdhi_sdbuf_width() argument 99 switch (sd_ctrl_read16(host, CTL_VERSION)) { sh_mobile_sdhi_sdbuf_width() 111 sd_ctrl_write16(host, EXT_ACC, val); sh_mobile_sdhi_sdbuf_width() 117 struct tmio_mmc_host *host = mmc_priv(mmc); sh_mobile_sdhi_clk_enable() local 118 struct sh_mobile_sdhi *priv = host_to_priv(host); sh_mobile_sdhi_clk_enable() 126 sh_mobile_sdhi_sdbuf_width(host, 16); sh_mobile_sdhi_clk_enable() 134 struct tmio_mmc_host *host = mmc_priv(mmc); sh_mobile_sdhi_clk_disable() local 135 struct sh_mobile_sdhi *priv = host_to_priv(host); sh_mobile_sdhi_clk_disable() 139 static int sh_mobile_sdhi_wait_idle(struct tmio_mmc_host *host) sh_mobile_sdhi_wait_idle() argument 143 while (--timeout && !(sd_ctrl_read16(host, CTL_STATUS2) & (1 << 13))) sh_mobile_sdhi_wait_idle() 147 dev_warn(&host->pdev->dev, "timeout waiting for SD bus idle\n"); sh_mobile_sdhi_wait_idle() 154 static int sh_mobile_sdhi_write16_hook(struct tmio_mmc_host *host, int addr) sh_mobile_sdhi_write16_hook() argument 166 return sh_mobile_sdhi_wait_idle(host); sh_mobile_sdhi_write16_hook() 190 static void sh_mobile_sdhi_enable_dma(struct tmio_mmc_host *host, bool enable) sh_mobile_sdhi_enable_dma() argument 192 sd_ctrl_write16(host, CTL_DMA_ENABLE, enable ? 2 : 0); sh_mobile_sdhi_enable_dma() 195 sh_mobile_sdhi_sdbuf_width(host, enable ? 32 : 16); sh_mobile_sdhi_enable_dma() 205 struct tmio_mmc_host *host; sh_mobile_sdhi_probe() local 231 host = tmio_mmc_host_alloc(pdev); sh_mobile_sdhi_probe() 232 if (!host) { sh_mobile_sdhi_probe() 237 host->dma = dma_priv; sh_mobile_sdhi_probe() 238 host->write16_hook = sh_mobile_sdhi_write16_hook; sh_mobile_sdhi_probe() 239 host->clk_enable = sh_mobile_sdhi_clk_enable; sh_mobile_sdhi_probe() 240 host->clk_disable = sh_mobile_sdhi_clk_disable; sh_mobile_sdhi_probe() 241 host->multi_io_quirk = sh_mobile_sdhi_multi_io_quirk; sh_mobile_sdhi_probe() 244 host->bus_shift = 1; sh_mobile_sdhi_probe() 246 host->bus_shift = 0; sh_mobile_sdhi_probe() 287 ret = tmio_mmc_host_probe(host, mmc_data); sh_mobile_sdhi_probe() 300 dev_name(&pdev->dev), host); sh_mobile_sdhi_probe() 309 dev_name(&pdev->dev), host); sh_mobile_sdhi_probe() 318 dev_name(&pdev->dev), host); sh_mobile_sdhi_probe() 335 dev_name(&pdev->dev), host); sh_mobile_sdhi_probe() 348 mmc_hostname(host->mmc), (unsigned long) sh_mobile_sdhi_probe() 350 host->mmc->f_max / 1000000); sh_mobile_sdhi_probe() 355 tmio_mmc_host_remove(host); sh_mobile_sdhi_probe() 357 tmio_mmc_host_free(host); sh_mobile_sdhi_probe() 365 struct tmio_mmc_host *host = mmc_priv(mmc); sh_mobile_sdhi_remove() local 367 tmio_mmc_host_remove(host); sh_mobile_sdhi_remove()
|
H A D | sdhci-pxav3.c | 26 #include <linux/mmc/host.h> 131 struct sdhci_host *host) armada_38x_quirks() 134 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); armada_38x_quirks() 138 host->quirks |= SDHCI_QUIRK_MISSING_CAPS; armada_38x_quirks() 140 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES); armada_38x_quirks() 141 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1); armada_38x_quirks() 156 host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_DDR50); armada_38x_quirks() 167 host->caps &= ~SDHCI_CAN_VDD_180; armada_38x_quirks() 168 host->mmc->caps &= ~MMC_CAP_1_8V_DDR; armada_38x_quirks() 170 host->caps &= ~SDHCI_CAN_VDD_330; armada_38x_quirks() 172 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_USE_SDR50_TUNING); armada_38x_quirks() 177 static void pxav3_reset(struct sdhci_host *host, u8 mask) pxav3_reset() argument 179 struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc)); pxav3_reset() 182 sdhci_reset(host, mask); pxav3_reset() 192 tmp = readw(host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP); pxav3_reset() 196 writew(tmp, host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP); pxav3_reset() 202 static void pxav3_gen_init_74_clocks(struct sdhci_host *host, u8 power_mode) pxav3_gen_init_74_clocks() argument 204 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); pxav3_gen_init_74_clocks() 212 dev_dbg(mmc_dev(host->mmc), pxav3_gen_init_74_clocks() 220 tmp = readw(host->ioaddr + SD_CE_ATA_2); pxav3_gen_init_74_clocks() 222 writew(tmp, host->ioaddr + SD_CE_ATA_2); pxav3_gen_init_74_clocks() 225 tmp = readw(host->ioaddr + SD_CFG_FIFO_PARAM); pxav3_gen_init_74_clocks() 227 writew(tmp, host->ioaddr + SD_CFG_FIFO_PARAM); pxav3_gen_init_74_clocks() 234 if ((readw(host->ioaddr + SD_CE_ATA_2) pxav3_gen_init_74_clocks() 241 dev_warn(mmc_dev(host->mmc), "74 clock interrupt not cleared\n"); pxav3_gen_init_74_clocks() 244 tmp = readw(host->ioaddr + SD_CE_ATA_2); pxav3_gen_init_74_clocks() 246 writew(tmp, host->ioaddr + SD_CE_ATA_2); pxav3_gen_init_74_clocks() 251 static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs) pxav3_set_uhs_signaling() argument 253 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); pxav3_set_uhs_signaling() 261 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); pxav3_set_uhs_signaling() 263 /* Select Bus Speed Mode for host */ pxav3_set_uhs_signaling() 302 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); pxav3_set_uhs_signaling() 303 dev_dbg(mmc_dev(host->mmc), pxav3_set_uhs_signaling() 366 struct sdhci_host *host = NULL; sdhci_pxav3_probe() local 375 host = sdhci_pltfm_init(pdev, &sdhci_pxav3_pdata, 0); sdhci_pxav3_probe() 376 if (IS_ERR(host)) sdhci_pxav3_probe() 377 return PTR_ERR(host); sdhci_pxav3_probe() 379 pltfm_host = sdhci_priv(host); sdhci_pxav3_probe() 398 host->mmc->caps |= MMC_CAP_1_8V_DDR; sdhci_pxav3_probe() 401 ret = armada_38x_quirks(pdev, host); sdhci_pxav3_probe() 411 ret = mmc_of_parse(host->mmc); sdhci_pxav3_probe() 420 host->mmc->caps |= MMC_CAP_NONREMOVABLE; sdhci_pxav3_probe() 424 host->mmc->caps |= MMC_CAP_8_BIT_DATA; sdhci_pxav3_probe() 427 host->quirks |= pdata->quirks; sdhci_pxav3_probe() 429 host->quirks2 |= pdata->quirks2; sdhci_pxav3_probe() 431 host->mmc->caps |= pdata->host_caps; sdhci_pxav3_probe() 433 host->mmc->caps2 |= pdata->host_caps2; sdhci_pxav3_probe() 435 host->mmc->pm_caps |= pdata->pm_caps; sdhci_pxav3_probe() 438 ret = mmc_gpio_request_cd(host->mmc, pdata->ext_cd_gpio, sdhci_pxav3_probe() 441 dev_err(mmc_dev(host->mmc), sdhci_pxav3_probe() 455 ret = sdhci_add_host(host); sdhci_pxav3_probe() 457 dev_err(&pdev->dev, "failed to add host\n"); sdhci_pxav3_probe() 461 platform_set_drvdata(pdev, host); sdhci_pxav3_probe() 463 if (host->mmc->pm_caps & MMC_PM_KEEP_POWER) { sdhci_pxav3_probe() 465 host->mmc->pm_flags |= MMC_PM_WAKE_SDIO_IRQ; sdhci_pxav3_probe() 489 struct sdhci_host *host = platform_get_drvdata(pdev); sdhci_pxav3_remove() local 490 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); sdhci_pxav3_remove() 497 sdhci_remove_host(host, 1); sdhci_pxav3_remove() 511 struct sdhci_host *host = dev_get_drvdata(dev); sdhci_pxav3_suspend() local 514 ret = sdhci_suspend_host(host); sdhci_pxav3_suspend() 524 struct sdhci_host *host = dev_get_drvdata(dev); sdhci_pxav3_resume() local 527 ret = sdhci_resume_host(host); sdhci_pxav3_resume() 538 struct sdhci_host *host = dev_get_drvdata(dev); sdhci_pxav3_runtime_suspend() local 539 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); sdhci_pxav3_runtime_suspend() 543 ret = sdhci_runtime_suspend_host(host); sdhci_pxav3_runtime_suspend() 556 struct sdhci_host *host = dev_get_drvdata(dev); sdhci_pxav3_runtime_resume() local 557 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); sdhci_pxav3_runtime_resume() 564 return sdhci_runtime_resume_host(host); sdhci_pxav3_runtime_resume() 130 armada_38x_quirks(struct platform_device *pdev, struct sdhci_host *host) armada_38x_quirks() argument
|
H A D | sdhci-sirf.c | 11 #include <linux/mmc/host.h> 26 static void sdhci_sirf_set_bus_width(struct sdhci_host *host, int width) sdhci_sirf_set_bus_width() argument 30 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); sdhci_sirf_set_bus_width() 34 * CSR atlas7 and prima2 SD host version is not 3.0 sdhci_sirf_set_bus_width() 43 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); sdhci_sirf_set_bus_width() 46 static int sdhci_sirf_execute_tuning(struct sdhci_host *host, u32 opcode) sdhci_sirf_execute_tuning() argument 54 struct mmc_host *mmc = host->mmc; sdhci_sirf_execute_tuning() 56 clock_setting = sdhci_readw(host, SDHCI_CLK_DELAY_SETTING); sdhci_sirf_execute_tuning() 62 sdhci_writel(host, sdhci_sirf_execute_tuning() 96 sdhci_writel(host, sdhci_sirf_execute_tuning() 134 struct sdhci_host *host; sdhci_sirf_probe() local 152 host = sdhci_pltfm_init(pdev, &sdhci_sirf_pdata, sizeof(struct sdhci_sirf_priv)); sdhci_sirf_probe() 153 if (IS_ERR(host)) sdhci_sirf_probe() 154 return PTR_ERR(host); sdhci_sirf_probe() 156 pltfm_host = sdhci_priv(host); sdhci_sirf_probe() 167 ret = sdhci_add_host(host); sdhci_sirf_probe() 176 ret = mmc_gpio_request_cd(host->mmc, priv->gpio_cd, 0); sdhci_sirf_probe() 182 mmc_gpiod_request_cd_irq(host->mmc); sdhci_sirf_probe() 188 sdhci_remove_host(host, 0); sdhci_sirf_probe() 199 struct sdhci_host *host = dev_get_drvdata(dev); sdhci_sirf_suspend() local 200 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); sdhci_sirf_suspend() 203 ret = sdhci_suspend_host(host); sdhci_sirf_suspend() 214 struct sdhci_host *host = dev_get_drvdata(dev); sdhci_sirf_resume() local 215 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); sdhci_sirf_resume() 224 return sdhci_resume_host(host); sdhci_sirf_resume()
|
H A D | dw_mmc-rockchip.c | 13 #include <linux/mmc/host.h> 22 static void dw_mci_rockchip_prepare_command(struct dw_mci *host, u32 *cmdr) dw_mci_rockchip_prepare_command() argument 27 static int dw_mci_rk3288_setup_clock(struct dw_mci *host) dw_mci_rk3288_setup_clock() argument 29 host->bus_hz /= RK3288_CLKGEN_DIV; dw_mci_rk3288_setup_clock() 34 static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios) dw_mci_rk3288_set_ios() argument 59 ret = clk_set_rate(host->ciu_clk, cclkin); dw_mci_rk3288_set_ios() 61 dev_warn(host->dev, "failed to set rate %uHz\n", ios->clock); dw_mci_rk3288_set_ios() 63 bus_hz = clk_get_rate(host->ciu_clk) / RK3288_CLKGEN_DIV; dw_mci_rk3288_set_ios() 64 if (bus_hz != host->bus_hz) { dw_mci_rk3288_set_ios() 65 host->bus_hz = bus_hz; dw_mci_rk3288_set_ios() 67 host->current_speed = 0; dw_mci_rk3288_set_ios() 71 static int dw_mci_rockchip_init(struct dw_mci *host) dw_mci_rockchip_init() argument 74 host->sdio_id0 = 8; dw_mci_rockchip_init() 125 struct dw_mci *host = dev_get_drvdata(dev); dw_mci_rockchip_suspend() local 127 return dw_mci_suspend(host); dw_mci_rockchip_suspend() 132 struct dw_mci *host = dev_get_drvdata(dev); dw_mci_rockchip_resume() local 134 return dw_mci_resume(host); dw_mci_rockchip_resume()
|
H A D | sdhci-pltfm.h | 40 static inline u32 sdhci_be32bs_readl(struct sdhci_host *host, int reg) sdhci_be32bs_readl() argument 42 return in_be32(host->ioaddr + reg); sdhci_be32bs_readl() 45 static inline u16 sdhci_be32bs_readw(struct sdhci_host *host, int reg) sdhci_be32bs_readw() argument 47 return in_be16(host->ioaddr + (reg ^ 0x2)); sdhci_be32bs_readw() 50 static inline u8 sdhci_be32bs_readb(struct sdhci_host *host, int reg) sdhci_be32bs_readb() argument 52 return in_8(host->ioaddr + (reg ^ 0x3)); sdhci_be32bs_readb() 55 static inline void sdhci_be32bs_writel(struct sdhci_host *host, sdhci_be32bs_writel() argument 58 out_be32(host->ioaddr + reg, val); sdhci_be32bs_writel() 61 static inline void sdhci_be32bs_writew(struct sdhci_host *host, sdhci_be32bs_writew() argument 64 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); sdhci_be32bs_writew() 77 sdhci_be32bs_writel(host, sdhci_be32bs_writew() 82 clrsetbits_be32(host->ioaddr + base, 0xffff << shift, val << shift); sdhci_be32bs_writew() 85 static inline void sdhci_be32bs_writeb(struct sdhci_host *host, u8 val, int reg) sdhci_be32bs_writeb() argument 90 clrsetbits_be32(host->ioaddr + base , 0xff << shift, val << shift); sdhci_be32bs_writeb() 106 extern unsigned int sdhci_pltfm_clk_get_max_clock(struct sdhci_host *host); 108 static inline void *sdhci_pltfm_priv(struct sdhci_pltfm_host *host) sdhci_pltfm_priv() argument 110 return (void *)host->private; sdhci_pltfm_priv()
|
H A D | sdhci-st.c | 25 #include <linux/mmc/host.h> 144 * @host: sdhci host 145 * Description: this function is to configure the Arasan host controller. 148 * or eMMC4.3. This has to be done before registering the sdhci host. 150 static void st_mmcss_cconfig(struct device_node *np, struct sdhci_host *host) st_mmcss_cconfig() argument 152 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); st_mmcss_cconfig() 153 struct mmc_host *mhost = host->mmc; st_mmcss_cconfig() 165 host->ioaddr + ST_MMC_CCONFIG_REG_1); st_mmcss_cconfig() 184 writel_relaxed(cconf2, host->ioaddr + ST_MMC_CCONFIG_REG_2); st_mmcss_cconfig() 191 host->ioaddr + ST_MMC_GP_OUTPUT); st_mmcss_cconfig() 217 writel_relaxed(cconf3, host->ioaddr + ST_MMC_CCONFIG_REG_3); st_mmcss_cconfig() 218 writel_relaxed(cconf4, host->ioaddr + ST_MMC_CCONFIG_REG_4); st_mmcss_cconfig() 219 writel_relaxed(cconf5, host->ioaddr + ST_MMC_CCONFIG_REG_5); st_mmcss_cconfig() 250 static int sdhci_st_set_dll_for_clock(struct sdhci_host *host) sdhci_st_set_dll_for_clock() argument 253 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); sdhci_st_set_dll_for_clock() 256 if (host->clock > CLK_TO_CHECK_DLL_LOCK) { sdhci_st_set_dll_for_clock() 258 ret = st_mmcss_lock_dll(host->ioaddr); sdhci_st_set_dll_for_clock() 264 static void sdhci_st_set_uhs_signaling(struct sdhci_host *host, sdhci_st_set_uhs_signaling() argument 267 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); sdhci_st_set_uhs_signaling() 269 u16 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); sdhci_st_set_uhs_signaling() 272 /* Select Bus Speed Mode for host */ sdhci_st_set_uhs_signaling() 291 ret = sdhci_st_set_dll_for_clock(host); sdhci_st_set_uhs_signaling() 297 ret = sdhci_st_set_dll_for_clock(host); sdhci_st_set_uhs_signaling() 307 dev_warn(mmc_dev(host->mmc), "Error setting dll for clock " sdhci_st_set_uhs_signaling() 310 dev_dbg(mmc_dev(host->mmc), "uhs %d, ctrl_2 %04X\n", uhs, ctrl_2); sdhci_st_set_uhs_signaling() 312 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); sdhci_st_set_uhs_signaling() 315 static u32 sdhci_st_readl(struct sdhci_host *host, int reg) sdhci_st_readl() argument 321 ret = readl_relaxed(host->ioaddr + reg); sdhci_st_readl() 326 ret = readl_relaxed(host->ioaddr + reg); sdhci_st_readl() 353 struct sdhci_host *host; sdhci_st_probe() local 377 host = sdhci_pltfm_init(pdev, &sdhci_st_pdata, 0); sdhci_st_probe() 378 if (IS_ERR(host)) { sdhci_st_probe() 380 ret = PTR_ERR(host); sdhci_st_probe() 384 ret = mmc_of_parse(host->mmc); sdhci_st_probe() 401 pltfm_host = sdhci_priv(host); sdhci_st_probe() 406 st_mmcss_cconfig(np, host); sdhci_st_probe() 408 ret = sdhci_add_host(host); sdhci_st_probe() 414 platform_set_drvdata(pdev, host); sdhci_st_probe() 416 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION)); sdhci_st_probe() 438 struct sdhci_host *host = platform_get_drvdata(pdev); sdhci_st_remove() local 439 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); sdhci_st_remove() 454 struct sdhci_host *host = dev_get_drvdata(dev); sdhci_st_suspend() local 455 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); sdhci_st_suspend() 457 int ret = sdhci_suspend_host(host); sdhci_st_suspend() 472 struct sdhci_host *host = dev_get_drvdata(dev); sdhci_st_resume() local 473 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); sdhci_st_resume() 482 st_mmcss_cconfig(np, host); sdhci_st_resume() 484 return sdhci_resume_host(host); sdhci_st_resume()
|
H A D | mxs-mmc.c | 38 #include <linux/mmc/host.h> 78 struct mxs_mmc_host *host = mmc_priv(mmc); mxs_mmc_get_cd() local 79 struct mxs_ssp *ssp = &host->ssp; mxs_mmc_get_cd() 82 if (host->broken_cd) mxs_mmc_get_cd() 99 static int mxs_mmc_reset(struct mxs_mmc_host *host) mxs_mmc_reset() argument 101 struct mxs_ssp *ssp = &host->ssp; mxs_mmc_reset() 125 if (host->sdio_irq_en) { mxs_mmc_reset() 135 static void mxs_mmc_start_cmd(struct mxs_mmc_host *host, 138 static void mxs_mmc_request_done(struct mxs_mmc_host *host) mxs_mmc_request_done() argument 140 struct mmc_command *cmd = host->cmd; mxs_mmc_request_done() 141 struct mmc_data *data = host->data; mxs_mmc_request_done() 142 struct mmc_request *mrq = host->mrq; mxs_mmc_request_done() 143 struct mxs_ssp *ssp = &host->ssp; mxs_mmc_request_done() 157 dma_unmap_sg(mmc_dev(host->mmc), data->sg, mxs_mmc_request_done() 168 host->data = NULL; mxs_mmc_request_done() 170 mxs_mmc_start_cmd(host, mrq->stop); mxs_mmc_request_done() 175 host->mrq = NULL; mxs_mmc_request_done() 176 mmc_request_done(host->mmc, mrq); mxs_mmc_request_done() 181 struct mxs_mmc_host *host = param; mxs_mmc_dma_irq_callback() local 183 mxs_mmc_request_done(host); mxs_mmc_dma_irq_callback() 188 struct mxs_mmc_host *host = dev_id; mxs_mmc_irq_handler() local 189 struct mmc_command *cmd = host->cmd; mxs_mmc_irq_handler() 190 struct mmc_data *data = host->data; mxs_mmc_irq_handler() 191 struct mxs_ssp *ssp = &host->ssp; mxs_mmc_irq_handler() 194 spin_lock(&host->lock); mxs_mmc_irq_handler() 200 spin_unlock(&host->lock); mxs_mmc_irq_handler() 203 mmc_signal_sdio_irq(host->mmc); mxs_mmc_irq_handler() 225 struct mxs_mmc_host *host, unsigned long flags) mxs_mmc_prep_dma() 227 struct mxs_ssp *ssp = &host->ssp; mxs_mmc_prep_dma() 229 struct mmc_data *data = host->data; mxs_mmc_prep_dma() 235 dma_map_sg(mmc_dev(host->mmc), data->sg, mxs_mmc_prep_dma() 249 desc->callback_param = host; mxs_mmc_prep_dma() 252 dma_unmap_sg(mmc_dev(host->mmc), data->sg, mxs_mmc_prep_dma() 259 static void mxs_mmc_bc(struct mxs_mmc_host *host) mxs_mmc_bc() argument 261 struct mxs_ssp *ssp = &host->ssp; mxs_mmc_bc() 262 struct mmc_command *cmd = host->cmd; mxs_mmc_bc() 270 if (host->sdio_irq_en) { mxs_mmc_bc() 280 desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK); mxs_mmc_bc() 289 dev_warn(mmc_dev(host->mmc), mxs_mmc_bc() 293 static void mxs_mmc_ac(struct mxs_mmc_host *host) mxs_mmc_ac() argument 295 struct mxs_ssp *ssp = &host->ssp; mxs_mmc_ac() 296 struct mmc_command *cmd = host->cmd; mxs_mmc_ac() 312 if (host->sdio_irq_en) { mxs_mmc_ac() 322 desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK); mxs_mmc_ac() 331 dev_warn(mmc_dev(host->mmc), mxs_mmc_ac() 351 static void mxs_mmc_adtc(struct mxs_mmc_host *host) mxs_mmc_adtc() argument 353 struct mmc_command *cmd = host->cmd; mxs_mmc_adtc() 365 struct mxs_ssp *ssp = &host->ssp; mxs_mmc_adtc() 387 ctrl0 = BF_SSP(host->bus_width, CTRL0_BUS_WIDTH) | mxs_mmc_adtc() 426 if (host->sdio_irq_en) { mxs_mmc_adtc() 444 desc = mxs_mmc_prep_dma(host, 0); mxs_mmc_adtc() 449 WARN_ON(host->data != NULL); mxs_mmc_adtc() 450 host->data = data; mxs_mmc_adtc() 453 desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); mxs_mmc_adtc() 461 dev_warn(mmc_dev(host->mmc), mxs_mmc_adtc() 465 static void mxs_mmc_start_cmd(struct mxs_mmc_host *host, mxs_mmc_start_cmd() argument 468 host->cmd = cmd; mxs_mmc_start_cmd() 472 mxs_mmc_bc(host); mxs_mmc_start_cmd() 475 mxs_mmc_ac(host); mxs_mmc_start_cmd() 478 mxs_mmc_ac(host); mxs_mmc_start_cmd() 481 mxs_mmc_adtc(host); mxs_mmc_start_cmd() 484 dev_warn(mmc_dev(host->mmc), mxs_mmc_start_cmd() 492 struct mxs_mmc_host *host = mmc_priv(mmc); mxs_mmc_request() local 494 WARN_ON(host->mrq != NULL); mxs_mmc_request() 495 host->mrq = mrq; mxs_mmc_request() 496 mxs_mmc_start_cmd(host, mrq->cmd); mxs_mmc_request() 501 struct mxs_mmc_host *host = mmc_priv(mmc); mxs_mmc_set_ios() local 504 host->bus_width = 2; mxs_mmc_set_ios() 506 host->bus_width = 1; mxs_mmc_set_ios() 508 host->bus_width = 0; mxs_mmc_set_ios() 511 mxs_ssp_set_clk_rate(&host->ssp, ios->clock); mxs_mmc_set_ios() 516 struct mxs_mmc_host *host = mmc_priv(mmc); mxs_mmc_enable_sdio_irq() local 517 struct mxs_ssp *ssp = &host->ssp; mxs_mmc_enable_sdio_irq() 520 spin_lock_irqsave(&host->lock, flags); mxs_mmc_enable_sdio_irq() 522 host->sdio_irq_en = enable; mxs_mmc_enable_sdio_irq() 536 spin_unlock_irqrestore(&host->lock, flags); mxs_mmc_enable_sdio_irq() 540 mmc_signal_sdio_irq(host->mmc); mxs_mmc_enable_sdio_irq() 577 struct mxs_mmc_host *host; mxs_mmc_probe() local 592 host = mmc_priv(mmc); mxs_mmc_probe() 593 ssp = &host->ssp; mxs_mmc_probe() 604 host->mmc = mmc; mxs_mmc_probe() 605 host->sdio_irq_en = 0; mxs_mmc_probe() 626 ret = mxs_mmc_reset(host); mxs_mmc_probe() 634 dev_err(mmc_dev(host->mmc), mxs_mmc_probe() 645 host->broken_cd = of_property_read_bool(np, "broken-cd"); mxs_mmc_probe() 665 dev_name(&pdev->dev), host); mxs_mmc_probe() 669 spin_lock_init(&host->lock); mxs_mmc_probe() 675 dev_info(mmc_dev(host->mmc), "initialized\n"); mxs_mmc_probe() 691 struct mxs_mmc_host *host = mmc_priv(mmc); mxs_mmc_remove() local 692 struct mxs_ssp *ssp = &host->ssp; mxs_mmc_remove() 710 struct mxs_mmc_host *host = mmc_priv(mmc); mxs_mmc_suspend() local 711 struct mxs_ssp *ssp = &host->ssp; mxs_mmc_suspend() 720 struct mxs_mmc_host *host = mmc_priv(mmc); mxs_mmc_resume() local 721 struct mxs_ssp *ssp = &host->ssp; mxs_mmc_resume() 224 mxs_mmc_prep_dma( struct mxs_mmc_host *host, unsigned long flags) mxs_mmc_prep_dma() argument
|
H A D | sdhci.h | 2 * linux/drivers/mmc/host/sdhci.h - Secure Digital Host Controller Interface driver 21 #include <linux/mmc/host.h> 374 /* Controller is missing device caps. Use caps provided by host */ 389 /* The system physically doesn't support 1.8v, even if the host does */ 393 /* Controller has a non-standard host control register */ 506 unsigned int tuning_mode; /* Re-tuning mode supported by host */ 515 u32 (*read_l)(struct sdhci_host *host, int reg); 516 u16 (*read_w)(struct sdhci_host *host, int reg); 517 u8 (*read_b)(struct sdhci_host *host, int reg); 518 void (*write_l)(struct sdhci_host *host, u32 val, int reg); 519 void (*write_w)(struct sdhci_host *host, u16 val, int reg); 520 void (*write_b)(struct sdhci_host *host, u8 val, int reg); 523 void (*set_clock)(struct sdhci_host *host, unsigned int clock); 525 int (*enable_dma)(struct sdhci_host *host); 526 unsigned int (*get_max_clock)(struct sdhci_host *host); 527 unsigned int (*get_min_clock)(struct sdhci_host *host); 528 unsigned int (*get_timeout_clock)(struct sdhci_host *host); 529 unsigned int (*get_max_timeout_count)(struct sdhci_host *host); 530 void (*set_timeout)(struct sdhci_host *host, 532 void (*set_bus_width)(struct sdhci_host *host, int width); 533 void (*platform_send_init_74_clocks)(struct sdhci_host *host, 535 unsigned int (*get_ro)(struct sdhci_host *host); 536 void (*reset)(struct sdhci_host *host, u8 mask); 537 int (*platform_execute_tuning)(struct sdhci_host *host, u32 opcode); 538 void (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs); 539 void (*hw_reset)(struct sdhci_host *host); 540 void (*adma_workaround)(struct sdhci_host *host, u32 intmask); 541 void (*platform_init)(struct sdhci_host *host); 542 void (*card_event)(struct sdhci_host *host); 543 void (*voltage_switch)(struct sdhci_host *host); 548 static inline void sdhci_writel(struct sdhci_host *host, u32 val, int reg) sdhci_writel() argument 550 if (unlikely(host->ops->write_l)) sdhci_writel() 551 host->ops->write_l(host, val, reg); sdhci_writel() 553 writel(val, host->ioaddr + reg); sdhci_writel() 556 static inline void sdhci_writew(struct sdhci_host *host, u16 val, int reg) sdhci_writew() argument 558 if (unlikely(host->ops->write_w)) sdhci_writew() 559 host->ops->write_w(host, val, reg); sdhci_writew() 561 writew(val, host->ioaddr + reg); sdhci_writew() 564 static inline void sdhci_writeb(struct sdhci_host *host, u8 val, int reg) sdhci_writeb() argument 566 if (unlikely(host->ops->write_b)) sdhci_writeb() 567 host->ops->write_b(host, val, reg); sdhci_writeb() 569 writeb(val, host->ioaddr + reg); sdhci_writeb() 572 static inline u32 sdhci_readl(struct sdhci_host *host, int reg) sdhci_readl() argument 574 if (unlikely(host->ops->read_l)) sdhci_readl() 575 return host->ops->read_l(host, reg); sdhci_readl() 577 return readl(host->ioaddr + reg); sdhci_readl() 580 static inline u16 sdhci_readw(struct sdhci_host *host, int reg) sdhci_readw() argument 582 if (unlikely(host->ops->read_w)) sdhci_readw() 583 return host->ops->read_w(host, reg); sdhci_readw() 585 return readw(host->ioaddr + reg); sdhci_readw() 588 static inline u8 sdhci_readb(struct sdhci_host *host, int reg) sdhci_readb() argument 590 if (unlikely(host->ops->read_b)) sdhci_readb() 591 return host->ops->read_b(host, reg); sdhci_readb() 593 return readb(host->ioaddr + reg); sdhci_readb() 598 static inline void sdhci_writel(struct sdhci_host *host, u32 val, int reg) sdhci_writel() argument 600 writel(val, host->ioaddr + reg); sdhci_writel() 603 static inline void sdhci_writew(struct sdhci_host *host, u16 val, int reg) sdhci_writew() argument 605 writew(val, host->ioaddr + reg); sdhci_writew() 608 static inline void sdhci_writeb(struct sdhci_host *host, u8 val, int reg) sdhci_writeb() argument 610 writeb(val, host->ioaddr + reg); sdhci_writeb() 613 static inline u32 sdhci_readl(struct sdhci_host *host, int reg) sdhci_readl() argument 615 return readl(host->ioaddr + reg); sdhci_readl() 618 static inline u16 sdhci_readw(struct sdhci_host *host, int reg) sdhci_readw() argument 620 return readw(host->ioaddr + reg); sdhci_readw() 623 static inline u8 sdhci_readb(struct sdhci_host *host, int reg) sdhci_readb() argument 625 return readb(host->ioaddr + reg); sdhci_readb() 632 extern void sdhci_free_host(struct sdhci_host *host); 634 static inline void *sdhci_priv(struct sdhci_host *host) sdhci_priv() argument 636 return (void *)host->private; sdhci_priv() 639 extern void sdhci_card_detect(struct sdhci_host *host); 640 extern int sdhci_add_host(struct sdhci_host *host); 641 extern void sdhci_remove_host(struct sdhci_host *host, int dead); 642 extern void sdhci_send_command(struct sdhci_host *host, 645 static inline bool sdhci_sdio_irq_enabled(struct sdhci_host *host) sdhci_sdio_irq_enabled() argument 647 return !!(host->flags & SDHCI_SDIO_IRQ_ENABLED); sdhci_sdio_irq_enabled() 650 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock); 651 void sdhci_set_bus_width(struct sdhci_host *host, int width); 652 void sdhci_reset(struct sdhci_host *host, u8 mask); 653 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing); 656 extern int sdhci_suspend_host(struct sdhci_host *host); 657 extern int sdhci_resume_host(struct sdhci_host *host); 658 extern void sdhci_enable_irq_wakeups(struct sdhci_host *host); 659 extern int sdhci_runtime_suspend_host(struct sdhci_host *host); 660 extern int sdhci_runtime_resume_host(struct sdhci_host *host);
|
H A D | sdhci-pxav2.c | 28 #include <linux/mmc/host.h> 54 static void pxav2_reset(struct sdhci_host *host, u8 mask) pxav2_reset() argument 56 struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc)); pxav2_reset() 59 sdhci_reset(host, mask); pxav2_reset() 69 tmp = readw(host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP); pxav2_reset() 77 writew(tmp, host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP); pxav2_reset() 81 tmp = readw(host->ioaddr + SD_FIFO_PARAM); pxav2_reset() 83 writew(tmp, host->ioaddr + SD_FIFO_PARAM); pxav2_reset() 85 tmp = readw(host->ioaddr + SD_FIFO_PARAM); pxav2_reset() 88 writew(tmp, host->ioaddr + SD_FIFO_PARAM); pxav2_reset() 93 static void pxav2_mmc_set_bus_width(struct sdhci_host *host, int width) pxav2_mmc_set_bus_width() argument 98 ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL); pxav2_mmc_set_bus_width() 99 tmp = readw(host->ioaddr + SD_CE_ATA_2); pxav2_mmc_set_bus_width() 110 writew(tmp, host->ioaddr + SD_CE_ATA_2); pxav2_mmc_set_bus_width() 111 writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL); pxav2_mmc_set_bus_width() 169 struct sdhci_host *host = NULL; sdhci_pxav2_probe() local 175 host = sdhci_pltfm_init(pdev, NULL, 0); sdhci_pxav2_probe() 176 if (IS_ERR(host)) sdhci_pxav2_probe() 177 return PTR_ERR(host); sdhci_pxav2_probe() 179 pltfm_host = sdhci_priv(host); sdhci_pxav2_probe() 191 host->quirks = SDHCI_QUIRK_BROKEN_ADMA sdhci_pxav2_probe() 202 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; sdhci_pxav2_probe() 203 host->mmc->caps |= MMC_CAP_NONREMOVABLE; sdhci_pxav2_probe() 208 host->mmc->caps |= MMC_CAP_8_BIT_DATA; sdhci_pxav2_probe() 211 host->quirks |= pdata->quirks; sdhci_pxav2_probe() 213 host->mmc->caps |= pdata->host_caps; sdhci_pxav2_probe() 215 host->mmc->pm_caps |= pdata->pm_caps; sdhci_pxav2_probe() 218 host->ops = &pxav2_sdhci_ops; sdhci_pxav2_probe() 220 ret = sdhci_add_host(host); sdhci_pxav2_probe() 222 dev_err(&pdev->dev, "failed to add host\n"); sdhci_pxav2_probe() 226 platform_set_drvdata(pdev, host); sdhci_pxav2_probe() 240 struct sdhci_host *host = platform_get_drvdata(pdev); sdhci_pxav2_remove() local 241 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); sdhci_pxav2_remove() 243 sdhci_remove_host(host, 1); sdhci_pxav2_remove()
|
H A D | sdhci-tegra.c | 24 #include <linux/mmc/host.h> 54 static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg) tegra_sdhci_readw() argument 56 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); tegra_sdhci_readw() 66 return readw(host->ioaddr + reg); tegra_sdhci_readw() 69 static void tegra_sdhci_writew(struct sdhci_host *host, u16 val, int reg) tegra_sdhci_writew() argument 71 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); tegra_sdhci_writew() 83 host->ioaddr + SDHCI_TRANSFER_MODE); tegra_sdhci_writew() 87 writew(val, host->ioaddr + reg); tegra_sdhci_writew() 90 static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg) tegra_sdhci_writel() argument 92 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); tegra_sdhci_writel() 103 writel(val, host->ioaddr + reg); tegra_sdhci_writel() 108 u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL); tegra_sdhci_writel() 113 writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL); tegra_sdhci_writel() 117 static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host) tegra_sdhci_get_ro() argument 119 return mmc_gpio_get_ro(host->mmc); tegra_sdhci_get_ro() 122 static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask) tegra_sdhci_reset() argument 124 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); tegra_sdhci_reset() 129 sdhci_reset(host, mask); tegra_sdhci_reset() 134 misc_ctrl = sdhci_readw(host, SDHCI_TEGRA_VENDOR_MISC_CTRL); tegra_sdhci_reset() 145 sdhci_writew(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL); tegra_sdhci_reset() 148 static void tegra_sdhci_set_bus_width(struct sdhci_host *host, int bus_width) tegra_sdhci_set_bus_width() argument 152 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); tegra_sdhci_set_bus_width() 153 if ((host->mmc->caps & MMC_CAP_8_BIT_DATA) && tegra_sdhci_set_bus_width() 164 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); tegra_sdhci_set_bus_width() 252 struct sdhci_host *host; sdhci_tegra_probe() local 263 host = sdhci_pltfm_init(pdev, soc_data->pdata, 0); sdhci_tegra_probe() 264 if (IS_ERR(host)) sdhci_tegra_probe() 265 return PTR_ERR(host); sdhci_tegra_probe() 266 pltfm_host = sdhci_priv(host); sdhci_tegra_probe() 270 dev_err(mmc_dev(host->mmc), "failed to allocate tegra_host\n"); sdhci_tegra_probe() 277 rc = mmc_of_parse(host->mmc); sdhci_tegra_probe() 288 clk = devm_clk_get(mmc_dev(host->mmc), NULL); sdhci_tegra_probe() 290 dev_err(mmc_dev(host->mmc), "clk err\n"); sdhci_tegra_probe() 297 rc = sdhci_add_host(host); sdhci_tegra_probe()
|
H A D | tmio_mmc.c | 2 * linux/drivers/mmc/host/tmio_mmc.c 19 #include <linux/mmc/host.h> 63 struct tmio_mmc_host *host; tmio_mmc_probe() local 93 host = tmio_mmc_host_alloc(pdev); tmio_mmc_probe() 94 if (!host) tmio_mmc_probe() 98 host->bus_shift = resource_size(res) >> 10; tmio_mmc_probe() 100 ret = tmio_mmc_host_probe(host, pdata); tmio_mmc_probe() 105 dev_name(&pdev->dev), host); tmio_mmc_probe() 109 pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), tmio_mmc_probe() 110 (unsigned long)host->ctl, irq); tmio_mmc_probe() 115 tmio_mmc_host_remove(host); tmio_mmc_probe() 117 tmio_mmc_host_free(host); tmio_mmc_probe() 131 struct tmio_mmc_host *host = mmc_priv(mmc); tmio_mmc_remove() local 132 free_irq(platform_get_irq(pdev, 0), host); tmio_mmc_remove() 133 tmio_mmc_host_remove(host); tmio_mmc_remove()
|
H A D | sdhci-iproc.c | 20 #include <linux/mmc/host.h> 39 static inline u32 sdhci_iproc_readl(struct sdhci_host *host, int reg) sdhci_iproc_readl() argument 41 u32 val = readl(host->ioaddr + reg); sdhci_iproc_readl() 44 mmc_hostname(host->mmc), reg, val); sdhci_iproc_readl() 48 static u16 sdhci_iproc_readw(struct sdhci_host *host, int reg) sdhci_iproc_readw() argument 50 u32 val = sdhci_iproc_readl(host, (reg & ~3)); sdhci_iproc_readw() 55 static u8 sdhci_iproc_readb(struct sdhci_host *host, int reg) sdhci_iproc_readb() argument 57 u32 val = sdhci_iproc_readl(host, (reg & ~3)); sdhci_iproc_readb() 62 static inline void sdhci_iproc_writel(struct sdhci_host *host, u32 val, int reg) sdhci_iproc_writel() argument 65 mmc_hostname(host->mmc), reg, val); sdhci_iproc_writel() 67 writel(val, host->ioaddr + reg); sdhci_iproc_writel() 69 if (host->clock <= 400000) { sdhci_iproc_writel() 71 if (host->clock) sdhci_iproc_writel() 72 udelay((4 * 1000000 + host->clock - 1) / host->clock); sdhci_iproc_writel() 97 static void sdhci_iproc_writew(struct sdhci_host *host, u16 val, int reg) sdhci_iproc_writew() argument 99 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); sdhci_iproc_writew() 108 sdhci_iproc_writel(host, iproc_host->shadow_blk, sdhci_iproc_writew() 118 oldval = sdhci_iproc_readl(host, (reg & ~3)); sdhci_iproc_writew() 130 sdhci_iproc_writel(host, newval, reg & ~3); sdhci_iproc_writew() 134 static void sdhci_iproc_writeb(struct sdhci_host *host, u8 val, int reg) sdhci_iproc_writeb() argument 136 u32 oldval = sdhci_iproc_readl(host, (reg & ~3)); sdhci_iproc_writeb() 141 sdhci_iproc_writel(host, newval, reg & ~3); sdhci_iproc_writeb() 180 struct sdhci_host *host; sdhci_iproc_probe() local 190 host = sdhci_pltfm_init(pdev, iproc_data->pdata, sizeof(*iproc_host)); sdhci_iproc_probe() 191 if (IS_ERR(host)) sdhci_iproc_probe() 192 return PTR_ERR(host); sdhci_iproc_probe() 194 pltfm_host = sdhci_priv(host); sdhci_iproc_probe() 199 mmc_of_parse(host->mmc); sdhci_iproc_probe() 203 host->mmc->caps |= MMC_CAP_1_8V_DDR; sdhci_iproc_probe() 212 host->caps = iproc_host->data->caps; sdhci_iproc_probe() 213 host->caps1 = iproc_host->data->caps1; sdhci_iproc_probe() 216 return sdhci_add_host(host); sdhci_iproc_probe()
|
H A D | sdhci-of-arasan.c | 39 static unsigned int sdhci_arasan_get_timeout_clock(struct sdhci_host *host) sdhci_arasan_get_timeout_clock() argument 43 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); sdhci_arasan_get_timeout_clock() 45 div = readl(host->ioaddr + SDHCI_ARASAN_CLK_CTRL_OFFSET); sdhci_arasan_get_timeout_clock() 78 struct sdhci_host *host = platform_get_drvdata(pdev); sdhci_arasan_suspend() local 79 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); sdhci_arasan_suspend() 83 ret = sdhci_suspend_host(host); sdhci_arasan_suspend() 103 struct sdhci_host *host = platform_get_drvdata(pdev); sdhci_arasan_resume() local 104 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); sdhci_arasan_resume() 121 return sdhci_resume_host(host); sdhci_arasan_resume() 132 struct sdhci_host *host; sdhci_arasan_probe() local 165 host = sdhci_pltfm_init(pdev, &sdhci_arasan_pdata, 0); sdhci_arasan_probe() 166 if (IS_ERR(host)) { sdhci_arasan_probe() 167 ret = PTR_ERR(host); sdhci_arasan_probe() 172 pltfm_host = sdhci_priv(host); sdhci_arasan_probe() 176 ret = mmc_of_parse(host->mmc); sdhci_arasan_probe() 182 ret = sdhci_add_host(host); sdhci_arasan_probe() 200 struct sdhci_host *host = platform_get_drvdata(pdev); sdhci_arasan_remove() local 201 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); sdhci_arasan_remove()
|
H A D | sdhci-dove.c | 25 #include <linux/mmc/host.h> 31 static u16 sdhci_dove_readw(struct sdhci_host *host, int reg) sdhci_dove_readw() argument 41 ret = readw(host->ioaddr + reg); sdhci_dove_readw() 46 static u32 sdhci_dove_readl(struct sdhci_host *host, int reg) sdhci_dove_readl() argument 50 ret = readl(host->ioaddr + reg); sdhci_dove_readl() 81 struct sdhci_host *host; sdhci_dove_probe() local 85 host = sdhci_pltfm_init(pdev, &sdhci_dove_pdata, 0); sdhci_dove_probe() 86 if (IS_ERR(host)) sdhci_dove_probe() 87 return PTR_ERR(host); sdhci_dove_probe() 89 pltfm_host = sdhci_priv(host); sdhci_dove_probe() 95 ret = mmc_of_parse(host->mmc); sdhci_dove_probe() 99 ret = sdhci_add_host(host); sdhci_dove_probe()
|
H A D | mmci_qcom_dml.h | 19 int dml_hw_init(struct mmci_host *host, struct device_node *np); 20 void dml_start_xfer(struct mmci_host *host, struct mmc_data *data); 22 static inline int dml_hw_init(struct mmci_host *host, struct device_node *np) dml_hw_init() argument 26 static inline void dml_start_xfer(struct mmci_host *host, struct mmc_data *data) dml_start_xfer() argument
|
H A D | sdhci-of-hlwd.c | 2 * drivers/mmc/host/sdhci-of-hlwd.c 24 #include <linux/mmc/host.h> 36 static void sdhci_hlwd_writel(struct sdhci_host *host, u32 val, int reg) sdhci_hlwd_writel() argument 38 sdhci_be32bs_writel(host, val, reg); sdhci_hlwd_writel() 42 static void sdhci_hlwd_writew(struct sdhci_host *host, u16 val, int reg) sdhci_hlwd_writew() argument 44 sdhci_be32bs_writew(host, val, reg); sdhci_hlwd_writew() 48 static void sdhci_hlwd_writeb(struct sdhci_host *host, u8 val, int reg) sdhci_hlwd_writeb() argument 50 sdhci_be32bs_writeb(host, val, reg); sdhci_hlwd_writeb()
|
H A D | sdhci-bcm2835.c | 24 #include <linux/mmc/host.h> 57 static void bcm2835_sdhci_writel(struct sdhci_host *host, u32 val, int reg) bcm2835_sdhci_writel() argument 59 writel(val, host->ioaddr + reg); bcm2835_sdhci_writel() 64 static inline u32 bcm2835_sdhci_readl(struct sdhci_host *host, int reg) bcm2835_sdhci_readl() argument 66 u32 val = readl(host->ioaddr + reg); bcm2835_sdhci_readl() 74 static void bcm2835_sdhci_writew(struct sdhci_host *host, u16 val, int reg) bcm2835_sdhci_writew() argument 76 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); bcm2835_sdhci_writew() 79 bcm2835_sdhci_readl(host, reg & ~3); bcm2835_sdhci_writew() 88 bcm2835_sdhci_writel(host, newval, reg & ~3); bcm2835_sdhci_writew() 91 static u16 bcm2835_sdhci_readw(struct sdhci_host *host, int reg) bcm2835_sdhci_readw() argument 93 u32 val = bcm2835_sdhci_readl(host, (reg & ~3)); bcm2835_sdhci_readw() 101 static void bcm2835_sdhci_writeb(struct sdhci_host *host, u8 val, int reg) bcm2835_sdhci_writeb() argument 103 u32 oldval = bcm2835_sdhci_readl(host, reg & ~3); bcm2835_sdhci_writeb() 109 bcm2835_sdhci_writel(host, newval, reg & ~3); bcm2835_sdhci_writeb() 112 static u8 bcm2835_sdhci_readb(struct sdhci_host *host, int reg) bcm2835_sdhci_readb() argument 114 u32 val = bcm2835_sdhci_readl(host, (reg & ~3)); bcm2835_sdhci_readb() 122 static unsigned int bcm2835_sdhci_get_min_clock(struct sdhci_host *host) bcm2835_sdhci_get_min_clock() argument 150 struct sdhci_host *host; bcm2835_sdhci_probe() local 155 host = sdhci_pltfm_init(pdev, &bcm2835_sdhci_pdata, 0); bcm2835_sdhci_probe() 156 if (IS_ERR(host)) bcm2835_sdhci_probe() 157 return PTR_ERR(host); bcm2835_sdhci_probe() 162 dev_err(mmc_dev(host->mmc), bcm2835_sdhci_probe() 167 pltfm_host = sdhci_priv(host); bcm2835_sdhci_probe() 176 return sdhci_add_host(host); bcm2835_sdhci_probe()
|
H A D | sdhci-pci.c | 1 /* linux/drivers/mmc/host/sdhci-pci.c - SDHCI on PCI bus interface 22 #include <linux/mmc/host.h> 50 slot->host->caps = ricoh_mmc_probe_slot() 113 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA; mrst_hc_probe_slot() 133 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA; pch_hc_probe_slot() 142 struct sdhci_host *host = slot->host; sdhci_pci_sd_cd() local 144 mmc_detect_change(host->mmc, msecs_to_jiffies(200)); sdhci_pci_sd_cd() 208 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE; mfd_emmc_probe_slot() 209 slot->host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC | mfd_emmc_probe_slot() 216 slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE; mfd_sdio_probe_slot() 254 static void sdhci_pci_int_hw_reset(struct sdhci_host *host) sdhci_pci_int_hw_reset() argument 258 reg = sdhci_readb(host, SDHCI_POWER_CONTROL); sdhci_pci_int_hw_reset() 260 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); sdhci_pci_int_hw_reset() 264 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); sdhci_pci_int_hw_reset() 271 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | byt_emmc_probe_slot() 275 slot->host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ; byt_emmc_probe_slot() 278 slot->host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */ byt_emmc_probe_slot() 284 slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE | byt_sdio_probe_slot() 292 slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST | byt_sd_probe_slot() 337 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | intel_mrfl_mmc_probe_slot() 453 static void jmicron_enable_mmc(struct sdhci_host *host, int on) jmicron_enable_mmc() argument 457 scratch = readb(host->ioaddr + 0xC0); jmicron_enable_mmc() 464 writeb(scratch, host->ioaddr + 0xC0); jmicron_enable_mmc() 472 version = readl(slot->host->ioaddr + SDHCI_HOST_VERSION); jmicron_probe_slot() 482 slot->host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; jmicron_probe_slot() 487 slot->host->ocr_avail_sd = MMC_VDD_32_33 | MMC_VDD_33_34 | jmicron_probe_slot() 490 slot->host->ocr_avail_mmc = MMC_VDD_32_33 | MMC_VDD_33_34 | jmicron_probe_slot() 500 jmicron_enable_mmc(slot->host, 1); jmicron_probe_slot() 502 slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST; jmicron_probe_slot() 514 jmicron_enable_mmc(slot->host, 0); jmicron_remove_slot() 524 jmicron_enable_mmc(chip->slots[i]->host, 0); jmicron_suspend() 537 jmicron_enable_mmc(chip->slots[i]->host, 1); jmicron_resume() 599 u8 board_rev = readb(slot->host->ioaddr + SYSKT_BOARD_REV); syskt_probe_slot() 600 u8 chip_rev = readb(slot->host->ioaddr + SYSKT_CHIP_REV); syskt_probe_slot() 606 slot->host->quirks |= SDHCI_QUIRK_FORCE_DMA; syskt_probe_slot() 608 writeb(SYSKT_POWER_330, slot->host->ioaddr + SYSKT_POWER_DATA); syskt_probe_slot() 609 writeb(SYSKT_POWER_START, slot->host->ioaddr + SYSKT_POWER_CMD); syskt_probe_slot() 613 ps = readw(slot->host->ioaddr + SYSKT_POWER_STATUS); syskt_probe_slot() 621 writeb(0, slot->host->ioaddr + SYSKT_POWER_CMD); syskt_probe_slot() 648 slot->host->mmc->caps2 |= MMC_CAP2_HS200; rtsx_probe_slot() 1111 { /* Generic SD host controller */ 1126 static int sdhci_pci_enable_dma(struct sdhci_host *host) sdhci_pci_enable_dma() argument 1132 slot = sdhci_priv(host); sdhci_pci_enable_dma() 1137 (host->flags & SDHCI_USE_SDMA)) { sdhci_pci_enable_dma() 1142 if (host->flags & SDHCI_USE_64_BIT_DMA) { sdhci_pci_enable_dma() 1143 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA) { sdhci_pci_enable_dma() 1144 host->flags &= ~SDHCI_USE_64_BIT_DMA; sdhci_pci_enable_dma() 1161 static void sdhci_pci_set_bus_width(struct sdhci_host *host, int width) sdhci_pci_set_bus_width() argument 1165 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); sdhci_pci_set_bus_width() 1181 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); sdhci_pci_set_bus_width() 1184 static void sdhci_pci_gpio_hw_reset(struct sdhci_host *host) sdhci_pci_gpio_hw_reset() argument 1186 struct sdhci_pci_slot *slot = sdhci_priv(host); sdhci_pci_gpio_hw_reset() 1199 static void sdhci_pci_hw_reset(struct sdhci_host *host) sdhci_pci_hw_reset() argument 1201 struct sdhci_pci_slot *slot = sdhci_priv(host); sdhci_pci_hw_reset() 1204 slot->hw_reset(host); sdhci_pci_hw_reset() 1242 ret = sdhci_suspend_host(slot->host); sdhci_pci_suspend() 1247 slot_pm_flags = slot->host->mmc->pm_flags; sdhci_pci_suspend() 1249 sdhci_enable_irq_wakeups(slot->host); sdhci_pci_suspend() 1272 sdhci_resume_host(chip->slots[i]->host); sdhci_pci_suspend() 1298 ret = sdhci_resume_host(slot->host); sdhci_pci_resume() 1322 ret = sdhci_runtime_suspend_host(slot->host); sdhci_pci_runtime_suspend() 1338 sdhci_runtime_resume_host(chip->slots[i]->host); sdhci_pci_runtime_suspend() 1364 ret = sdhci_runtime_resume_host(slot->host); sdhci_pci_runtime_resume() 1397 struct sdhci_host *host; sdhci_pci_probe_slot() local 1420 host = sdhci_alloc_host(&pdev->dev, sizeof(struct sdhci_pci_slot)); sdhci_pci_probe_slot() 1421 if (IS_ERR(host)) { sdhci_pci_probe_slot() 1422 dev_err(&pdev->dev, "cannot allocate host\n"); sdhci_pci_probe_slot() 1423 return ERR_CAST(host); sdhci_pci_probe_slot() 1426 slot = sdhci_priv(host); sdhci_pci_probe_slot() 1429 slot->host = host; sdhci_pci_probe_slot() 1451 host->hw_name = "PCI"; sdhci_pci_probe_slot() 1452 host->ops = &sdhci_pci_ops; sdhci_pci_probe_slot() 1453 host->quirks = chip->quirks; sdhci_pci_probe_slot() 1454 host->quirks2 = chip->quirks2; sdhci_pci_probe_slot() 1456 host->irq = pdev->irq; sdhci_pci_probe_slot() 1458 ret = pci_request_region(pdev, bar, mmc_hostname(host->mmc)); sdhci_pci_probe_slot() 1464 host->ioaddr = pci_ioremap_bar(pdev, bar); sdhci_pci_probe_slot() 1465 if (!host->ioaddr) { sdhci_pci_probe_slot() 1480 slot->host->mmc->caps |= MMC_CAP_HW_RESET; sdhci_pci_probe_slot() 1488 host->mmc->pm_caps = MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ; sdhci_pci_probe_slot() 1489 host->mmc->slotno = slotno; sdhci_pci_probe_slot() 1490 host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP; sdhci_pci_probe_slot() 1493 mmc_gpiod_request_cd(host->mmc, slot->cd_con_id, slot->cd_idx, sdhci_pci_probe_slot() 1499 ret = sdhci_add_host(host); sdhci_pci_probe_slot() 1524 iounmap(host->ioaddr); sdhci_pci_probe_slot() 1534 sdhci_free_host(host); sdhci_pci_probe_slot() 1547 scratch = readl(slot->host->ioaddr + SDHCI_INT_STATUS); sdhci_pci_remove_slot() 1551 sdhci_remove_host(slot->host, dead); sdhci_pci_remove_slot() 1564 sdhci_free_host(slot->host); sdhci_pci_remove_slot()
|
/linux-4.1.27/drivers/memstick/host/ |
H A D | jmb38x_ms.c | 157 static unsigned int jmb38x_ms_read_data(struct jmb38x_ms_host *host, jmb38x_ms_read_data() argument 162 while (host->io_pos && length) { jmb38x_ms_read_data() 163 buf[off++] = host->io_word[0] & 0xff; jmb38x_ms_read_data() 164 host->io_word[0] >>= 8; jmb38x_ms_read_data() 166 host->io_pos--; jmb38x_ms_read_data() 172 while (!(STATUS_FIFO_EMPTY & readl(host->addr + STATUS))) { jmb38x_ms_read_data() 175 *(unsigned int *)(buf + off) = __raw_readl(host->addr + DATA); jmb38x_ms_read_data() 181 && !(STATUS_FIFO_EMPTY & readl(host->addr + STATUS))) { jmb38x_ms_read_data() 182 host->io_word[0] = readl(host->addr + DATA); jmb38x_ms_read_data() 183 for (host->io_pos = 4; host->io_pos; --host->io_pos) { jmb38x_ms_read_data() 184 buf[off++] = host->io_word[0] & 0xff; jmb38x_ms_read_data() 185 host->io_word[0] >>= 8; jmb38x_ms_read_data() 195 static unsigned int jmb38x_ms_read_reg_data(struct jmb38x_ms_host *host, jmb38x_ms_read_reg_data() argument 201 while (host->io_pos > 4 && length) { jmb38x_ms_read_reg_data() 202 buf[off++] = host->io_word[0] & 0xff; jmb38x_ms_read_reg_data() 203 host->io_word[0] >>= 8; jmb38x_ms_read_reg_data() 205 host->io_pos--; jmb38x_ms_read_reg_data() 211 while (host->io_pos && length) { jmb38x_ms_read_reg_data() 212 buf[off++] = host->io_word[1] & 0xff; jmb38x_ms_read_reg_data() 213 host->io_word[1] >>= 8; jmb38x_ms_read_reg_data() 215 host->io_pos--; jmb38x_ms_read_reg_data() 221 static unsigned int jmb38x_ms_write_data(struct jmb38x_ms_host *host, jmb38x_ms_write_data() argument 227 if (host->io_pos) { jmb38x_ms_write_data() 228 while (host->io_pos < 4 && length) { jmb38x_ms_write_data() 229 host->io_word[0] |= buf[off++] << (host->io_pos * 8); jmb38x_ms_write_data() 230 host->io_pos++; jmb38x_ms_write_data() 235 if (host->io_pos == 4 jmb38x_ms_write_data() 236 && !(STATUS_FIFO_FULL & readl(host->addr + STATUS))) { jmb38x_ms_write_data() 237 writel(host->io_word[0], host->addr + DATA); jmb38x_ms_write_data() 238 host->io_pos = 0; jmb38x_ms_write_data() 239 host->io_word[0] = 0; jmb38x_ms_write_data() 240 } else if (host->io_pos) { jmb38x_ms_write_data() 247 while (!(STATUS_FIFO_FULL & readl(host->addr + STATUS))) { jmb38x_ms_write_data() 252 host->addr + DATA); jmb38x_ms_write_data() 259 host->io_word[0] |= buf[off + 2] << 16; jmb38x_ms_write_data() 260 host->io_pos++; jmb38x_ms_write_data() 262 host->io_word[0] |= buf[off + 1] << 8; jmb38x_ms_write_data() 263 host->io_pos++; jmb38x_ms_write_data() 265 host->io_word[0] |= buf[off]; jmb38x_ms_write_data() 266 host->io_pos++; jmb38x_ms_write_data() 269 off += host->io_pos; jmb38x_ms_write_data() 274 static unsigned int jmb38x_ms_write_reg_data(struct jmb38x_ms_host *host, jmb38x_ms_write_reg_data() argument 280 while (host->io_pos < 4 && length) { jmb38x_ms_write_reg_data() 281 host->io_word[0] &= ~(0xff << (host->io_pos * 8)); jmb38x_ms_write_reg_data() 282 host->io_word[0] |= buf[off++] << (host->io_pos * 8); jmb38x_ms_write_reg_data() 283 host->io_pos++; jmb38x_ms_write_reg_data() 290 while (host->io_pos < 8 && length) { jmb38x_ms_write_reg_data() 291 host->io_word[1] &= ~(0xff << (host->io_pos * 8)); jmb38x_ms_write_reg_data() 292 host->io_word[1] |= buf[off++] << (host->io_pos * 8); jmb38x_ms_write_reg_data() 293 host->io_pos++; jmb38x_ms_write_reg_data() 300 static int jmb38x_ms_transfer_data(struct jmb38x_ms_host *host) jmb38x_ms_transfer_data() argument 309 if (host->req->long_data) { jmb38x_ms_transfer_data() 310 length = host->req->sg.length - host->block_pos; jmb38x_ms_transfer_data() 311 off = host->req->sg.offset + host->block_pos; jmb38x_ms_transfer_data() 313 length = host->req->data_len - host->block_pos; jmb38x_ms_transfer_data() 320 if (host->req->long_data) { jmb38x_ms_transfer_data() 321 pg = nth_page(sg_page(&host->req->sg), jmb38x_ms_transfer_data() 330 buf = host->req->data + host->block_pos; jmb38x_ms_transfer_data() 331 p_cnt = host->req->data_len - host->block_pos; jmb38x_ms_transfer_data() 334 if (host->req->data_dir == WRITE) jmb38x_ms_transfer_data() 335 t_size = !(host->cmd_flags & REG_DATA) jmb38x_ms_transfer_data() 336 ? jmb38x_ms_write_data(host, buf, p_cnt) jmb38x_ms_transfer_data() 337 : jmb38x_ms_write_reg_data(host, buf, p_cnt); jmb38x_ms_transfer_data() 339 t_size = !(host->cmd_flags & REG_DATA) jmb38x_ms_transfer_data() 340 ? jmb38x_ms_read_data(host, buf, p_cnt) jmb38x_ms_transfer_data() 341 : jmb38x_ms_read_reg_data(host, buf, p_cnt); jmb38x_ms_transfer_data() 343 if (host->req->long_data) { jmb38x_ms_transfer_data() 350 host->block_pos += t_size; jmb38x_ms_transfer_data() 355 if (!length && host->req->data_dir == WRITE) { jmb38x_ms_transfer_data() 356 if (host->cmd_flags & REG_DATA) { jmb38x_ms_transfer_data() 357 writel(host->io_word[0], host->addr + TPC_P0); jmb38x_ms_transfer_data() 358 writel(host->io_word[1], host->addr + TPC_P1); jmb38x_ms_transfer_data() 359 } else if (host->io_pos) { jmb38x_ms_transfer_data() 360 writel(host->io_word[0], host->addr + DATA); jmb38x_ms_transfer_data() 369 struct jmb38x_ms_host *host = memstick_priv(msh); jmb38x_ms_issue_cmd() local 373 if (!(STATUS_HAS_MEDIA & readl(host->addr + STATUS))) { jmb38x_ms_issue_cmd() 375 host->req->error = -ETIME; jmb38x_ms_issue_cmd() 376 return host->req->error; jmb38x_ms_issue_cmd() 379 dev_dbg(&msh->dev, "control %08x\n", readl(host->addr + HOST_CONTROL)); jmb38x_ms_issue_cmd() 380 dev_dbg(&msh->dev, "status %08x\n", readl(host->addr + INT_STATUS)); jmb38x_ms_issue_cmd() 381 dev_dbg(&msh->dev, "hstatus %08x\n", readl(host->addr + STATUS)); jmb38x_ms_issue_cmd() 383 host->cmd_flags = 0; jmb38x_ms_issue_cmd() 384 host->block_pos = 0; jmb38x_ms_issue_cmd() 385 host->io_pos = 0; jmb38x_ms_issue_cmd() 386 host->io_word[0] = 0; jmb38x_ms_issue_cmd() 387 host->io_word[1] = 0; jmb38x_ms_issue_cmd() 389 cmd = host->req->tpc << 16; jmb38x_ms_issue_cmd() 392 if (host->req->data_dir == READ) jmb38x_ms_issue_cmd() 395 if (host->req->need_card_int) { jmb38x_ms_issue_cmd() 396 if (host->ifmode == MEMSTICK_SERIAL) jmb38x_ms_issue_cmd() 402 data = host->req->data; jmb38x_ms_issue_cmd() 405 host->cmd_flags |= DMA_DATA; jmb38x_ms_issue_cmd() 407 if (host->req->long_data) { jmb38x_ms_issue_cmd() 408 data_len = host->req->sg.length; jmb38x_ms_issue_cmd() 410 data_len = host->req->data_len; jmb38x_ms_issue_cmd() 411 host->cmd_flags &= ~DMA_DATA; jmb38x_ms_issue_cmd() 416 host->cmd_flags |= REG_DATA; jmb38x_ms_issue_cmd() 418 host->cmd_flags &= ~DMA_DATA; jmb38x_ms_issue_cmd() 421 if (host->cmd_flags & DMA_DATA) { jmb38x_ms_issue_cmd() 422 if (1 != pci_map_sg(host->chip->pdev, &host->req->sg, 1, jmb38x_ms_issue_cmd() 423 host->req->data_dir == READ jmb38x_ms_issue_cmd() 426 host->req->error = -ENOMEM; jmb38x_ms_issue_cmd() 427 return host->req->error; jmb38x_ms_issue_cmd() 429 data_len = sg_dma_len(&host->req->sg); jmb38x_ms_issue_cmd() 430 writel(sg_dma_address(&host->req->sg), jmb38x_ms_issue_cmd() 431 host->addr + DMA_ADDRESS); jmb38x_ms_issue_cmd() 434 host->addr + BLOCK); jmb38x_ms_issue_cmd() 435 writel(DMA_CONTROL_ENABLE, host->addr + DMA_CONTROL); jmb38x_ms_issue_cmd() 436 } else if (!(host->cmd_flags & REG_DATA)) { jmb38x_ms_issue_cmd() 439 host->addr + BLOCK); jmb38x_ms_issue_cmd() 440 t_val = readl(host->addr + INT_STATUS_ENABLE); jmb38x_ms_issue_cmd() 441 t_val |= host->req->data_dir == READ jmb38x_ms_issue_cmd() 445 writel(t_val, host->addr + INT_STATUS_ENABLE); jmb38x_ms_issue_cmd() 446 writel(t_val, host->addr + INT_SIGNAL_ENABLE); jmb38x_ms_issue_cmd() 449 host->cmd_flags |= REG_DATA; jmb38x_ms_issue_cmd() 452 if (host->req->data_dir == WRITE) { jmb38x_ms_issue_cmd() 453 jmb38x_ms_transfer_data(host); jmb38x_ms_issue_cmd() 454 writel(host->io_word[0], host->addr + TPC_P0); jmb38x_ms_issue_cmd() 455 writel(host->io_word[1], host->addr + TPC_P1); jmb38x_ms_issue_cmd() 459 mod_timer(&host->timer, jiffies + host->timeout_jiffies); jmb38x_ms_issue_cmd() 460 writel(HOST_CONTROL_LED | readl(host->addr + HOST_CONTROL), jmb38x_ms_issue_cmd() 461 host->addr + HOST_CONTROL); jmb38x_ms_issue_cmd() 462 host->req->error = 0; jmb38x_ms_issue_cmd() 464 writel(cmd, host->addr + TPC); jmb38x_ms_issue_cmd() 472 struct jmb38x_ms_host *host = memstick_priv(msh); jmb38x_ms_complete_cmd() local 476 del_timer(&host->timer); jmb38x_ms_complete_cmd() 479 readl(host->addr + HOST_CONTROL)); jmb38x_ms_complete_cmd() 481 readl(host->addr + INT_STATUS)); jmb38x_ms_complete_cmd() 482 dev_dbg(&msh->dev, "c hstatus %08x\n", readl(host->addr + STATUS)); jmb38x_ms_complete_cmd() 484 host->req->int_reg = readl(host->addr + STATUS) & 0xff; jmb38x_ms_complete_cmd() 486 writel(0, host->addr + BLOCK); jmb38x_ms_complete_cmd() 487 writel(0, host->addr + DMA_CONTROL); jmb38x_ms_complete_cmd() 489 if (host->cmd_flags & DMA_DATA) { jmb38x_ms_complete_cmd() 490 pci_unmap_sg(host->chip->pdev, &host->req->sg, 1, jmb38x_ms_complete_cmd() 491 host->req->data_dir == READ jmb38x_ms_complete_cmd() 494 t_val = readl(host->addr + INT_STATUS_ENABLE); jmb38x_ms_complete_cmd() 495 if (host->req->data_dir == READ) jmb38x_ms_complete_cmd() 500 writel(t_val, host->addr + INT_STATUS_ENABLE); jmb38x_ms_complete_cmd() 501 writel(t_val, host->addr + INT_SIGNAL_ENABLE); jmb38x_ms_complete_cmd() 504 writel((~HOST_CONTROL_LED) & readl(host->addr + HOST_CONTROL), jmb38x_ms_complete_cmd() 505 host->addr + HOST_CONTROL); jmb38x_ms_complete_cmd() 509 rc = memstick_next_req(msh, &host->req); jmb38x_ms_complete_cmd() 513 rc = memstick_next_req(msh, &host->req); jmb38x_ms_complete_cmd() 515 host->req->error = -ETIME; jmb38x_ms_complete_cmd() 523 struct jmb38x_ms_host *host = memstick_priv(msh); jmb38x_ms_isr() local 526 spin_lock(&host->lock); jmb38x_ms_isr() 527 irq_status = readl(host->addr + INT_STATUS); jmb38x_ms_isr() 528 dev_dbg(&host->chip->pdev->dev, "irq_status = %08x\n", irq_status); jmb38x_ms_isr() 530 spin_unlock(&host->lock); jmb38x_ms_isr() 534 if (host->req) { jmb38x_ms_isr() 537 host->req->error = -EILSEQ; jmb38x_ms_isr() 539 dev_dbg(&host->chip->pdev->dev, "TPC_ERR\n"); jmb38x_ms_isr() 542 host->req->error = -ETIME; jmb38x_ms_isr() 544 if (host->cmd_flags & DMA_DATA) { jmb38x_ms_isr() 546 host->cmd_flags |= FIFO_READY; jmb38x_ms_isr() 550 jmb38x_ms_transfer_data(host); jmb38x_ms_isr() 553 jmb38x_ms_transfer_data(host); jmb38x_ms_isr() 554 host->cmd_flags |= FIFO_READY; jmb38x_ms_isr() 559 host->cmd_flags |= CMD_READY; jmb38x_ms_isr() 560 if (host->cmd_flags & REG_DATA) { jmb38x_ms_isr() 561 if (host->req->data_dir == READ) { jmb38x_ms_isr() 562 host->io_word[0] jmb38x_ms_isr() 563 = readl(host->addr jmb38x_ms_isr() 565 host->io_word[1] jmb38x_ms_isr() 566 = readl(host->addr jmb38x_ms_isr() 568 host->io_pos = 8; jmb38x_ms_isr() 570 jmb38x_ms_transfer_data(host); jmb38x_ms_isr() 572 host->cmd_flags |= FIFO_READY; jmb38x_ms_isr() 579 dev_dbg(&host->chip->pdev->dev, "media changed\n"); jmb38x_ms_isr() 583 writel(irq_status, host->addr + INT_STATUS); jmb38x_ms_isr() 585 if (host->req jmb38x_ms_isr() 586 && (((host->cmd_flags & CMD_READY) jmb38x_ms_isr() 587 && (host->cmd_flags & FIFO_READY)) jmb38x_ms_isr() 588 || host->req->error)) jmb38x_ms_isr() 591 spin_unlock(&host->lock); jmb38x_ms_isr() 598 struct jmb38x_ms_host *host = memstick_priv(msh); jmb38x_ms_abort() local 601 dev_dbg(&host->chip->pdev->dev, "abort\n"); jmb38x_ms_abort() 602 spin_lock_irqsave(&host->lock, flags); jmb38x_ms_abort() 603 if (host->req) { jmb38x_ms_abort() 604 host->req->error = -ETIME; jmb38x_ms_abort() 607 spin_unlock_irqrestore(&host->lock, flags); jmb38x_ms_abort() 613 struct jmb38x_ms_host *host = memstick_priv(msh); jmb38x_ms_req_tasklet() local 617 spin_lock_irqsave(&host->lock, flags); jmb38x_ms_req_tasklet() 618 if (!host->req) { jmb38x_ms_req_tasklet() 620 rc = memstick_next_req(msh, &host->req); jmb38x_ms_req_tasklet() 621 dev_dbg(&host->chip->pdev->dev, "tasklet req %d\n", rc); jmb38x_ms_req_tasklet() 624 spin_unlock_irqrestore(&host->lock, flags); jmb38x_ms_req_tasklet() 634 struct jmb38x_ms_host *host = memstick_priv(msh); jmb38x_ms_submit_req() local 636 tasklet_schedule(&host->notify); jmb38x_ms_submit_req() 639 static int jmb38x_ms_reset(struct jmb38x_ms_host *host) jmb38x_ms_reset() argument 644 | readl(host->addr + HOST_CONTROL), jmb38x_ms_reset() 645 host->addr + HOST_CONTROL); jmb38x_ms_reset() 650 & readl(host->addr + HOST_CONTROL))) jmb38x_ms_reset() 655 dev_dbg(&host->chip->pdev->dev, "reset_req timeout\n"); jmb38x_ms_reset() 659 | readl(host->addr + HOST_CONTROL), jmb38x_ms_reset() 660 host->addr + HOST_CONTROL); jmb38x_ms_reset() 665 & readl(host->addr + HOST_CONTROL))) jmb38x_ms_reset() 670 dev_dbg(&host->chip->pdev->dev, "reset timeout\n"); jmb38x_ms_reset() 675 writel(INT_STATUS_ALL, host->addr + INT_SIGNAL_ENABLE); jmb38x_ms_reset() 676 writel(INT_STATUS_ALL, host->addr + INT_STATUS_ENABLE); jmb38x_ms_reset() 684 struct jmb38x_ms_host *host = memstick_priv(msh); jmb38x_ms_set_param() local 685 unsigned int host_ctl = readl(host->addr + HOST_CONTROL); jmb38x_ms_set_param() 692 rc = jmb38x_ms_reset(host); jmb38x_ms_set_param() 699 writel(host_ctl, host->addr + HOST_CONTROL); jmb38x_ms_set_param() 701 writel(host->id ? PAD_PU_PD_ON_MS_SOCK1 jmb38x_ms_set_param() 703 host->addr + PAD_PU_PD); jmb38x_ms_set_param() 706 host->addr + PAD_OUTPUT_ENABLE); jmb38x_ms_set_param() 709 dev_dbg(&host->chip->pdev->dev, "power on\n"); jmb38x_ms_set_param() 713 writel(host_ctl, host->addr + HOST_CONTROL); jmb38x_ms_set_param() 714 writel(0, host->addr + PAD_OUTPUT_ENABLE); jmb38x_ms_set_param() 715 writel(PAD_PU_PD_OFF, host->addr + PAD_PU_PD); jmb38x_ms_set_param() 716 dev_dbg(&host->chip->pdev->dev, "power off\n"); jmb38x_ms_set_param() 721 dev_dbg(&host->chip->pdev->dev, jmb38x_ms_set_param() 750 writel(host_ctl, host->addr + HOST_CONTROL); jmb38x_ms_set_param() 751 writel(CLOCK_CONTROL_OFF, host->addr + CLOCK_CONTROL); jmb38x_ms_set_param() 752 writel(clock_ctl, host->addr + CLOCK_CONTROL); jmb38x_ms_set_param() 753 pci_write_config_byte(host->chip->pdev, jmb38x_ms_set_param() 756 host->ifmode = value; jmb38x_ms_set_param() 873 struct jmb38x_ms_host *host; jmb38x_ms_alloc_host() local 880 host = memstick_priv(msh); jmb38x_ms_alloc_host() 881 host->chip = jm; jmb38x_ms_alloc_host() 882 host->addr = ioremap(pci_resource_start(jm->pdev, cnt), jmb38x_ms_alloc_host() 884 if (!host->addr) jmb38x_ms_alloc_host() 887 spin_lock_init(&host->lock); jmb38x_ms_alloc_host() 888 host->id = cnt; jmb38x_ms_alloc_host() 889 snprintf(host->host_id, sizeof(host->host_id), DRIVER_NAME ":slot%d", jmb38x_ms_alloc_host() 890 host->id); jmb38x_ms_alloc_host() 891 host->irq = jm->pdev->irq; jmb38x_ms_alloc_host() 892 host->timeout_jiffies = msecs_to_jiffies(1000); jmb38x_ms_alloc_host() 894 tasklet_init(&host->notify, jmb38x_ms_req_tasklet, (unsigned long)msh); jmb38x_ms_alloc_host() 900 setup_timer(&host->timer, jmb38x_ms_abort, (unsigned long)msh); jmb38x_ms_alloc_host() 902 if (!request_irq(host->irq, jmb38x_ms_isr, IRQF_SHARED, host->host_id, jmb38x_ms_alloc_host() 906 iounmap(host->addr); jmb38x_ms_alloc_host() 914 struct jmb38x_ms_host *host = memstick_priv(msh); jmb38x_ms_free_host() local 916 free_irq(host->irq, msh); jmb38x_ms_free_host() 917 iounmap(host->addr); jmb38x_ms_free_host() 996 struct jmb38x_ms_host *host; jmb38x_ms_remove() local 1004 host = memstick_priv(jm->hosts[cnt]); jmb38x_ms_remove() 1007 tasklet_kill(&host->notify); jmb38x_ms_remove() 1008 writel(0, host->addr + INT_SIGNAL_ENABLE); jmb38x_ms_remove() 1009 writel(0, host->addr + INT_STATUS_ENABLE); jmb38x_ms_remove() 1012 spin_lock_irqsave(&host->lock, flags); jmb38x_ms_remove() 1013 if (host->req) { jmb38x_ms_remove() 1014 host->req->error = -ETIME; jmb38x_ms_remove() 1017 spin_unlock_irqrestore(&host->lock, flags); jmb38x_ms_remove() 1020 dev_dbg(&jm->pdev->dev, "host removed\n"); jmb38x_ms_remove()
|
H A D | tifm_ms.c | 86 static unsigned int tifm_ms_read_data(struct tifm_ms *host, tifm_ms_read_data() argument 89 struct tifm_dev *sock = host->dev; tifm_ms_read_data() 92 while (host->io_pos && length) { tifm_ms_read_data() 93 buf[off++] = host->io_word & 0xff; tifm_ms_read_data() 94 host->io_word >>= 8; tifm_ms_read_data() 96 host->io_pos--; tifm_ms_read_data() 113 host->io_word = readl(sock->addr + SOCK_MS_DATA); tifm_ms_read_data() 114 for (host->io_pos = 4; host->io_pos; --host->io_pos) { tifm_ms_read_data() 115 buf[off++] = host->io_word & 0xff; tifm_ms_read_data() 116 host->io_word >>= 8; tifm_ms_read_data() 126 static unsigned int tifm_ms_write_data(struct tifm_ms *host, tifm_ms_write_data() argument 129 struct tifm_dev *sock = host->dev; tifm_ms_write_data() 132 if (host->io_pos) { tifm_ms_write_data() 133 while (host->io_pos < 4 && length) { tifm_ms_write_data() 134 host->io_word |= buf[off++] << (host->io_pos * 8); tifm_ms_write_data() 135 host->io_pos++; tifm_ms_write_data() 140 if (host->io_pos == 4 tifm_ms_write_data() 144 writel(host->io_word, sock->addr + SOCK_MS_DATA); tifm_ms_write_data() 145 host->io_pos = 0; tifm_ms_write_data() 146 host->io_word = 0; tifm_ms_write_data() 147 } else if (host->io_pos) { tifm_ms_write_data() 167 host->io_word |= buf[off + 2] << 16; tifm_ms_write_data() 168 host->io_pos++; tifm_ms_write_data() 170 host->io_word |= buf[off + 1] << 8; tifm_ms_write_data() 171 host->io_pos++; tifm_ms_write_data() 173 host->io_word |= buf[off]; tifm_ms_write_data() 174 host->io_pos++; tifm_ms_write_data() 177 off += host->io_pos; tifm_ms_write_data() 182 static unsigned int tifm_ms_transfer_data(struct tifm_ms *host) tifm_ms_transfer_data() argument 184 struct tifm_dev *sock = host->dev; tifm_ms_transfer_data() 192 if (host->req->long_data) { tifm_ms_transfer_data() 193 length = host->req->sg.length - host->block_pos; tifm_ms_transfer_data() 194 off = host->req->sg.offset + host->block_pos; tifm_ms_transfer_data() 196 length = host->req->data_len - host->block_pos; tifm_ms_transfer_data() 200 host->block_pos); tifm_ms_transfer_data() 205 if (host->req->long_data) { tifm_ms_transfer_data() 206 pg = nth_page(sg_page(&host->req->sg), tifm_ms_transfer_data() 215 buf = host->req->data + host->block_pos; tifm_ms_transfer_data() 216 p_cnt = host->req->data_len - host->block_pos; tifm_ms_transfer_data() 219 t_size = host->req->data_dir == WRITE tifm_ms_transfer_data() 220 ? tifm_ms_write_data(host, buf, p_cnt) tifm_ms_transfer_data() 221 : tifm_ms_read_data(host, buf, p_cnt); tifm_ms_transfer_data() 223 if (host->req->long_data) { tifm_ms_transfer_data() 230 host->block_pos += t_size; tifm_ms_transfer_data() 236 if (!length && (host->req->data_dir == WRITE)) { tifm_ms_transfer_data() 237 if (host->io_pos) { tifm_ms_transfer_data() 241 writel(host->io_word, sock->addr + SOCK_MS_DATA); tifm_ms_transfer_data() 254 static int tifm_ms_issue_cmd(struct tifm_ms *host) tifm_ms_issue_cmd() argument 256 struct tifm_dev *sock = host->dev; tifm_ms_issue_cmd() 260 host->cmd_flags = 0; tifm_ms_issue_cmd() 261 host->block_pos = 0; tifm_ms_issue_cmd() 262 host->io_pos = 0; tifm_ms_issue_cmd() 263 host->io_word = 0; tifm_ms_issue_cmd() 264 host->cmd_flags = 0; tifm_ms_issue_cmd() 266 data = host->req->data; tifm_ms_issue_cmd() 268 host->use_dma = !no_dma; tifm_ms_issue_cmd() 270 if (host->req->long_data) { tifm_ms_issue_cmd() 271 data_len = host->req->sg.length; tifm_ms_issue_cmd() 273 host->use_dma = 0; tifm_ms_issue_cmd() 275 data_len = host->req->data_len; tifm_ms_issue_cmd() 276 host->use_dma = 0; tifm_ms_issue_cmd() 284 if (host->use_dma) { tifm_ms_issue_cmd() 285 if (1 != tifm_map_sg(sock, &host->req->sg, 1, tifm_ms_issue_cmd() 286 host->req->data_dir == READ tifm_ms_issue_cmd() 289 host->req->error = -ENOMEM; tifm_ms_issue_cmd() 290 return host->req->error; tifm_ms_issue_cmd() 292 data_len = sg_dma_len(&host->req->sg); tifm_ms_issue_cmd() 299 if (host->req->data_dir == WRITE) tifm_ms_issue_cmd() 305 writel(sg_dma_address(&host->req->sg), tifm_ms_issue_cmd() 309 writel(host->mode_mask | TIFM_MS_SYS_FIFO, tifm_ms_issue_cmd() 316 mod_timer(&host->timer, jiffies + host->timeout_jiffies); tifm_ms_issue_cmd() 319 host->req->error = 0; tifm_ms_issue_cmd() 324 if (host->use_dma) tifm_ms_issue_cmd() 331 cmd = (host->req->tpc & 0xf) << 12; tifm_ms_issue_cmd() 339 static void tifm_ms_complete_cmd(struct tifm_ms *host) tifm_ms_complete_cmd() argument 341 struct tifm_dev *sock = host->dev; tifm_ms_complete_cmd() 345 del_timer(&host->timer); tifm_ms_complete_cmd() 347 host->req->int_reg = readl(sock->addr + SOCK_MS_STATUS) & 0xff; tifm_ms_complete_cmd() 348 host->req->int_reg = (host->req->int_reg & 1) tifm_ms_complete_cmd() 349 | ((host->req->int_reg << 4) & 0xe0); tifm_ms_complete_cmd() 355 if (host->use_dma) { tifm_ms_complete_cmd() 356 tifm_unmap_sg(sock, &host->req->sg, 1, tifm_ms_complete_cmd() 357 host->req->data_dir == READ tifm_ms_complete_cmd() 367 rc = memstick_next_req(msh, &host->req); tifm_ms_complete_cmd() 368 } while (!rc && tifm_ms_issue_cmd(host)); tifm_ms_complete_cmd() 371 static int tifm_ms_check_status(struct tifm_ms *host) tifm_ms_check_status() argument 373 if (!host->req->error) { tifm_ms_check_status() 374 if (!(host->cmd_flags & CMD_READY)) tifm_ms_check_status() 376 if (!(host->cmd_flags & FIFO_READY)) tifm_ms_check_status() 378 if (host->req->need_card_int tifm_ms_check_status() 379 && !(host->cmd_flags & CARD_INT)) tifm_ms_check_status() 388 struct tifm_ms *host; tifm_ms_data_event() local 393 host = memstick_priv((struct memstick_host *)tifm_get_drvdata(sock)); tifm_ms_data_event() 398 fifo_status, host_status, host->cmd_flags); tifm_ms_data_event() 400 if (host->req) { tifm_ms_data_event() 401 if (host->use_dma && (fifo_status & 1)) { tifm_ms_data_event() 402 host->cmd_flags |= FIFO_READY; tifm_ms_data_event() 403 rc = tifm_ms_check_status(host); tifm_ms_data_event() 405 if (!host->use_dma && (fifo_status & TIFM_FIFO_MORE)) { tifm_ms_data_event() 406 if (!tifm_ms_transfer_data(host)) { tifm_ms_data_event() 407 host->cmd_flags |= FIFO_READY; tifm_ms_data_event() 408 rc = tifm_ms_check_status(host); tifm_ms_data_event() 415 tifm_ms_complete_cmd(host); tifm_ms_data_event() 424 struct tifm_ms *host; tifm_ms_card_event() local 429 host = memstick_priv((struct memstick_host *)tifm_get_drvdata(sock)); tifm_ms_card_event() 431 dev_dbg(&sock->dev, "host event: host_status %x, flags %x\n", tifm_ms_card_event() 432 host_status, host->cmd_flags); tifm_ms_card_event() 434 if (host->req) { tifm_ms_card_event() 436 host->req->error = -ETIME; tifm_ms_card_event() 438 host->req->error = -EILSEQ; tifm_ms_card_event() 441 host->cmd_flags |= CMD_READY; tifm_ms_card_event() 444 host->cmd_flags |= CARD_INT; tifm_ms_card_event() 446 rc = tifm_ms_check_status(host); tifm_ms_card_event() 454 tifm_ms_complete_cmd(host); tifm_ms_card_event() 463 struct tifm_ms *host = memstick_priv(msh); tifm_ms_req_tasklet() local 464 struct tifm_dev *sock = host->dev; tifm_ms_req_tasklet() 469 if (!host->req) { tifm_ms_req_tasklet() 470 if (host->eject) { tifm_ms_req_tasklet() 472 rc = memstick_next_req(msh, &host->req); tifm_ms_req_tasklet() 474 host->req->error = -ETIME; tifm_ms_req_tasklet() 481 rc = memstick_next_req(msh, &host->req); tifm_ms_req_tasklet() 482 } while (!rc && tifm_ms_issue_cmd(host)); tifm_ms_req_tasklet() 494 struct tifm_ms *host = memstick_priv(msh); tifm_ms_submit_req() local 496 tasklet_schedule(&host->notify); tifm_ms_submit_req() 503 struct tifm_ms *host = memstick_priv(msh); tifm_ms_set_param() local 504 struct tifm_dev *sock = host->dev; tifm_ms_set_param() 510 host->mode_mask = TIFM_MS_SYS_SRAC | TIFM_MS_SYS_REI; tifm_ms_set_param() 524 host->mode_mask = TIFM_MS_SYS_SRAC | TIFM_MS_SYS_REI; tifm_ms_set_param() 529 host->mode_mask = 0; tifm_ms_set_param() 543 struct tifm_ms *host = (struct tifm_ms *)data; tifm_ms_abort() local 545 dev_dbg(&host->dev->dev, "status %x\n", tifm_ms_abort() 546 readl(host->dev->addr + SOCK_MS_STATUS)); tifm_ms_abort() 550 dev_name(&host->dev->dev), host->req ? host->req->tpc : 0, tifm_ms_abort() 551 host->cmd_flags); tifm_ms_abort() 553 tifm_eject(host->dev); tifm_ms_abort() 559 struct tifm_ms *host; tifm_ms_probe() local 573 host = memstick_priv(msh); tifm_ms_probe() 575 host->dev = sock; tifm_ms_probe() 576 host->timeout_jiffies = msecs_to_jiffies(1000); tifm_ms_probe() 578 setup_timer(&host->timer, tifm_ms_abort, (unsigned long)host); tifm_ms_probe() 579 tasklet_init(&host->notify, tifm_ms_req_tasklet, (unsigned long)msh); tifm_ms_probe() 599 struct tifm_ms *host = memstick_priv(msh); tifm_ms_remove() local 604 tasklet_kill(&host->notify); tifm_ms_remove() 606 host->eject = 1; tifm_ms_remove() 607 if (host->req) { tifm_ms_remove() 608 del_timer(&host->timer); tifm_ms_remove() 612 if (host->use_dma) tifm_ms_remove() 613 tifm_unmap_sg(sock, &host->req->sg, 1, tifm_ms_remove() 614 host->req->data_dir == READ tifm_ms_remove() 617 host->req->error = -ETIME; tifm_ms_remove() 620 rc = memstick_next_req(msh, &host->req); tifm_ms_remove() 622 host->req->error = -ETIME; tifm_ms_remove()
|
H A D | rtsx_pci_ms.c | 45 static inline struct device *ms_dev(struct realtek_pci_ms *host) ms_dev() argument 47 return &(host->pdev->dev); ms_dev() 50 static inline void ms_clear_error(struct realtek_pci_ms *host) ms_clear_error() argument 52 rtsx_pci_write_register(host->pcr, CARD_STOP, ms_clear_error() 58 static void ms_print_debug_regs(struct realtek_pci_ms *host) ms_print_debug_regs() argument 60 struct rtsx_pcr *pcr = host->pcr; ms_print_debug_regs() 64 /* Print MS host internal registers */ ms_print_debug_regs() 74 dev_dbg(ms_dev(host), "0x%04X: 0x%02x\n", i, *(ptr++)); ms_print_debug_regs() 76 dev_dbg(ms_dev(host), "0x%04X: 0x%02x\n", i, *(ptr++)); ms_print_debug_regs() 81 #define ms_print_debug_regs(host) 85 static int ms_power_on(struct realtek_pci_ms *host) ms_power_on() argument 87 struct rtsx_pcr *pcr = host->pcr; ms_power_on() 119 static int ms_power_off(struct realtek_pci_ms *host) ms_power_off() argument 121 struct rtsx_pcr *pcr = host->pcr; ms_power_off() 140 static int ms_transfer_data(struct realtek_pci_ms *host, unsigned char data_dir, ms_transfer_data() argument 143 struct rtsx_pcr *pcr = host->pcr; ms_transfer_data() 148 struct memstick_dev *card = host->msh->card; ms_transfer_data() 151 dev_dbg(ms_dev(host), "%s: tpc = 0x%02x, data_dir = %s, length = %d\n", ms_transfer_data() 194 ms_clear_error(host); ms_transfer_data() 211 static int ms_write_bytes(struct realtek_pci_ms *host, u8 tpc, ms_write_bytes() argument 214 struct rtsx_pcr *pcr = host->pcr; ms_write_bytes() 217 dev_dbg(ms_dev(host), "%s: tpc = 0x%02x\n", __func__, tpc); ms_write_bytes() 249 dev_dbg(ms_dev(host), "MS_TRANS_CFG: 0x%02x\n", val); ms_write_bytes() 254 ms_print_debug_regs(host); ms_write_bytes() 256 ms_clear_error(host); ms_write_bytes() 279 static int ms_read_bytes(struct realtek_pci_ms *host, u8 tpc, ms_read_bytes() argument 282 struct rtsx_pcr *pcr = host->pcr; ms_read_bytes() 286 dev_dbg(ms_dev(host), "%s: tpc = 0x%02x\n", __func__, tpc); ms_read_bytes() 318 dev_dbg(ms_dev(host), "MS_TRANS_CFG: 0x%02x\n", val); ms_read_bytes() 323 ms_print_debug_regs(host); ms_read_bytes() 325 ms_clear_error(host); ms_read_bytes() 350 static int rtsx_pci_ms_issue_cmd(struct realtek_pci_ms *host) rtsx_pci_ms_issue_cmd() argument 352 struct memstick_request *req = host->req; rtsx_pci_ms_issue_cmd() 356 dev_dbg(ms_dev(host), "%s\n", __func__); rtsx_pci_ms_issue_cmd() 359 if (host->ifmode != MEMSTICK_SERIAL) rtsx_pci_ms_issue_cmd() 364 err = ms_transfer_data(host, req->data_dir, rtsx_pci_ms_issue_cmd() 368 err = ms_read_bytes(host, req->tpc, cfg, rtsx_pci_ms_issue_cmd() 371 err = ms_write_bytes(host, req->tpc, cfg, rtsx_pci_ms_issue_cmd() 378 if (req->need_card_int && (host->ifmode == MEMSTICK_SERIAL)) { rtsx_pci_ms_issue_cmd() 379 err = ms_read_bytes(host, MS_TPC_GET_INT, rtsx_pci_ms_issue_cmd() 386 dev_dbg(ms_dev(host), "int_reg: 0x%02x\n", int_reg); rtsx_pci_ms_issue_cmd() 403 struct realtek_pci_ms *host = container_of(work, rtsx_pci_ms_handle_req() local 405 struct rtsx_pcr *pcr = host->pcr; rtsx_pci_ms_handle_req() 406 struct memstick_host *msh = host->msh; rtsx_pci_ms_handle_req() 413 rtsx_pci_switch_clock(host->pcr, host->clock, host->ssc_depth, rtsx_pci_ms_handle_req() 419 if (!host->req) { rtsx_pci_ms_handle_req() 421 rc = memstick_next_req(msh, &host->req); rtsx_pci_ms_handle_req() 422 dev_dbg(ms_dev(host), "next req %d\n", rc); rtsx_pci_ms_handle_req() 425 host->req->error = rtsx_pci_ms_issue_cmd(host); rtsx_pci_ms_handle_req() 434 struct realtek_pci_ms *host = memstick_priv(msh); rtsx_pci_ms_request() local 436 dev_dbg(ms_dev(host), "--> %s\n", __func__); rtsx_pci_ms_request() 438 if (rtsx_pci_card_exclusive_check(host->pcr, RTSX_MS_CARD)) rtsx_pci_ms_request() 441 schedule_work(&host->handle_req); rtsx_pci_ms_request() 447 struct realtek_pci_ms *host = memstick_priv(msh); rtsx_pci_ms_set_param() local 448 struct rtsx_pcr *pcr = host->pcr; rtsx_pci_ms_set_param() 453 dev_dbg(ms_dev(host), "%s: param = %d, value = %d\n", rtsx_pci_ms_set_param() 456 err = rtsx_pci_card_exclusive_check(host->pcr, RTSX_MS_CARD); rtsx_pci_ms_set_param() 463 err = ms_power_on(host); rtsx_pci_ms_set_param() 465 err = ms_power_off(host); rtsx_pci_ms_set_param() 496 host->ssc_depth = ssc_depth; rtsx_pci_ms_set_param() 497 host->clock = clock; rtsx_pci_ms_set_param() 498 host->ifmode = value; rtsx_pci_ms_set_param() 509 struct realtek_pci_ms *host = platform_get_drvdata(pdev); rtsx_pci_ms_suspend() local 510 struct memstick_host *msh = host->msh; rtsx_pci_ms_suspend() 512 dev_dbg(ms_dev(host), "--> %s\n", __func__); rtsx_pci_ms_suspend() 520 struct realtek_pci_ms *host = platform_get_drvdata(pdev); rtsx_pci_ms_resume() local 521 struct memstick_host *msh = host->msh; rtsx_pci_ms_resume() 523 dev_dbg(ms_dev(host), "--> %s\n", __func__); rtsx_pci_ms_resume() 538 struct realtek_pci_ms *host = platform_get_drvdata(pdev); rtsx_pci_ms_card_event() local 540 memstick_detect_change(host->msh); rtsx_pci_ms_card_event() 546 struct realtek_pci_ms *host; rtsx_pci_ms_drv_probe() local 561 msh = memstick_alloc_host(sizeof(*host), &pdev->dev); rtsx_pci_ms_drv_probe() 565 host = memstick_priv(msh); rtsx_pci_ms_drv_probe() 566 host->pcr = pcr; rtsx_pci_ms_drv_probe() 567 host->msh = msh; rtsx_pci_ms_drv_probe() 568 host->pdev = pdev; rtsx_pci_ms_drv_probe() 569 platform_set_drvdata(pdev, host); rtsx_pci_ms_drv_probe() 573 mutex_init(&host->host_mutex); rtsx_pci_ms_drv_probe() 575 INIT_WORK(&host->handle_req, rtsx_pci_ms_handle_req); rtsx_pci_ms_drv_probe() 591 struct realtek_pci_ms *host = platform_get_drvdata(pdev); rtsx_pci_ms_drv_remove() local 596 if (!host) rtsx_pci_ms_drv_remove() 599 pcr = host->pcr; rtsx_pci_ms_drv_remove() 602 msh = host->msh; rtsx_pci_ms_drv_remove() 603 host->eject = true; rtsx_pci_ms_drv_remove() 604 cancel_work_sync(&host->handle_req); rtsx_pci_ms_drv_remove() 606 mutex_lock(&host->host_mutex); rtsx_pci_ms_drv_remove() 607 if (host->req) { rtsx_pci_ms_drv_remove() 614 host->req->error = -ENOMEDIUM; rtsx_pci_ms_drv_remove() 616 rc = memstick_next_req(msh, &host->req); rtsx_pci_ms_drv_remove() 618 host->req->error = -ENOMEDIUM; rtsx_pci_ms_drv_remove() 621 mutex_unlock(&host->host_mutex); rtsx_pci_ms_drv_remove()
|
H A D | rtsx_usb_ms.c | 54 static inline struct device *ms_dev(struct rtsx_usb_ms *host) ms_dev() argument 56 return &(host->pdev->dev); ms_dev() 59 static inline void ms_clear_error(struct rtsx_usb_ms *host) ms_clear_error() argument 61 struct rtsx_ucr *ucr = host->ucr; ms_clear_error() 72 static void ms_print_debug_regs(struct rtsx_usb_ms *host) ms_print_debug_regs() argument 74 struct rtsx_ucr *ucr = host->ucr; ms_print_debug_regs() 78 /* Print MS host internal registers */ ms_print_debug_regs() 104 dev_dbg(ms_dev(host), "0x%04X: 0x%02x\n", i, *(ptr++)); ms_print_debug_regs() 106 dev_dbg(ms_dev(host), "0x%04X: 0x%02x\n", i, *(ptr++)); ms_print_debug_regs() 108 dev_dbg(ms_dev(host), "0x%04X: 0x%02x\n", i, *(ptr++)); ms_print_debug_regs() 110 dev_dbg(ms_dev(host), "0x%04X: 0x%02x\n", CARD_DATA_SOURCE, *(ptr++)); ms_print_debug_regs() 111 dev_dbg(ms_dev(host), "0x%04X: 0x%02x\n", CARD_SELECT, *(ptr++)); ms_print_debug_regs() 112 dev_dbg(ms_dev(host), "0x%04X: 0x%02x\n", CARD_CLK_EN, *(ptr++)); ms_print_debug_regs() 113 dev_dbg(ms_dev(host), "0x%04X: 0x%02x\n", CARD_PWR_CTL, *(ptr++)); ms_print_debug_regs() 118 static void ms_print_debug_regs(struct rtsx_usb_ms *host) ms_print_debug_regs() argument 180 static int ms_power_on(struct rtsx_usb_ms *host) ms_power_on() argument 182 struct rtsx_ucr *ucr = host->ucr; ms_power_on() 185 dev_dbg(ms_dev(host), "%s\n", __func__); ms_power_on() 220 static int ms_power_off(struct rtsx_usb_ms *host) ms_power_off() argument 222 struct rtsx_ucr *ucr = host->ucr; ms_power_off() 225 dev_dbg(ms_dev(host), "%s\n", __func__); ms_power_off() 242 static int ms_transfer_data(struct rtsx_usb_ms *host, unsigned char data_dir, ms_transfer_data() argument 245 struct rtsx_ucr *ucr = host->ucr; ms_transfer_data() 251 struct memstick_dev *card = host->msh->card; ms_transfer_data() 253 dev_dbg(ms_dev(host), "%s: tpc = 0x%02x, data_dir = %s, length = %d\n", ms_transfer_data() 324 ms_clear_error(host); ms_transfer_data() 328 static int ms_write_bytes(struct rtsx_usb_ms *host, u8 tpc, ms_write_bytes() argument 331 struct rtsx_ucr *ucr = host->ucr; ms_write_bytes() 334 dev_dbg(ms_dev(host), "%s: tpc = 0x%02x\n", __func__, tpc); ms_write_bytes() 367 dev_dbg(ms_dev(host), "MS_TRANS_CFG: 0x%02x\n", val); ms_write_bytes() 372 ms_print_debug_regs(host); ms_write_bytes() 374 ms_clear_error(host); ms_write_bytes() 395 static int ms_read_bytes(struct rtsx_usb_ms *host, u8 tpc, ms_read_bytes() argument 398 struct rtsx_ucr *ucr = host->ucr; ms_read_bytes() 402 dev_dbg(ms_dev(host), "%s: tpc = 0x%02x\n", __func__, tpc); ms_read_bytes() 435 dev_dbg(ms_dev(host), "MS_TRANS_CFG: 0x%02x\n", val); ms_read_bytes() 437 if (int_reg && (host->ifmode != MEMSTICK_SERIAL)) ms_read_bytes() 440 ms_print_debug_regs(host); ms_read_bytes() 442 ms_clear_error(host); ms_read_bytes() 462 if (int_reg && (host->ifmode != MEMSTICK_SERIAL)) ms_read_bytes() 468 static int rtsx_usb_ms_issue_cmd(struct rtsx_usb_ms *host) rtsx_usb_ms_issue_cmd() argument 470 struct memstick_request *req = host->req; rtsx_usb_ms_issue_cmd() 474 dev_dbg(ms_dev(host), "%s\n", __func__); rtsx_usb_ms_issue_cmd() 477 if (host->ifmode != MEMSTICK_SERIAL) rtsx_usb_ms_issue_cmd() 482 err = ms_transfer_data(host, req->data_dir, rtsx_usb_ms_issue_cmd() 486 err = ms_read_bytes(host, req->tpc, cfg, rtsx_usb_ms_issue_cmd() 489 err = ms_write_bytes(host, req->tpc, cfg, rtsx_usb_ms_issue_cmd() 496 if (host->ifmode == MEMSTICK_SERIAL) { rtsx_usb_ms_issue_cmd() 497 err = ms_read_bytes(host, MS_TPC_GET_INT, rtsx_usb_ms_issue_cmd() 512 dev_dbg(ms_dev(host), "int_reg: 0x%02x\n", req->int_reg); rtsx_usb_ms_issue_cmd() 520 struct rtsx_usb_ms *host = container_of(work, rtsx_usb_ms_handle_req() local 522 struct rtsx_ucr *ucr = host->ucr; rtsx_usb_ms_handle_req() 523 struct memstick_host *msh = host->msh; rtsx_usb_ms_handle_req() 526 if (!host->req) { rtsx_usb_ms_handle_req() 528 rc = memstick_next_req(msh, &host->req); rtsx_usb_ms_handle_req() 529 dev_dbg(ms_dev(host), "next req %d\n", rc); rtsx_usb_ms_handle_req() 536 host->req->error = -EIO; rtsx_usb_ms_handle_req() 538 host->req->error = rtsx_usb_ms_handle_req() 539 rtsx_usb_ms_issue_cmd(host); rtsx_usb_ms_handle_req() 543 dev_dbg(ms_dev(host), "req result %d\n", rtsx_usb_ms_handle_req() 544 host->req->error); rtsx_usb_ms_handle_req() 553 struct rtsx_usb_ms *host = memstick_priv(msh); rtsx_usb_ms_request() local 555 dev_dbg(ms_dev(host), "--> %s\n", __func__); rtsx_usb_ms_request() 557 if (!host->eject) rtsx_usb_ms_request() 558 schedule_work(&host->handle_req); rtsx_usb_ms_request() 564 struct rtsx_usb_ms *host = memstick_priv(msh); rtsx_usb_ms_set_param() local 565 struct rtsx_ucr *ucr = host->ucr; rtsx_usb_ms_set_param() 570 dev_dbg(ms_dev(host), "%s: param = %d, value = %d\n", rtsx_usb_ms_set_param() 581 if (value == host->power_mode) rtsx_usb_ms_set_param() 585 pm_runtime_get_sync(ms_dev(host)); rtsx_usb_ms_set_param() 586 err = ms_power_on(host); rtsx_usb_ms_set_param() 588 err = ms_power_off(host); rtsx_usb_ms_set_param() 589 if (host->msh->card) rtsx_usb_ms_set_param() 590 pm_runtime_put_noidle(ms_dev(host)); rtsx_usb_ms_set_param() 592 pm_runtime_put(ms_dev(host)); rtsx_usb_ms_set_param() 596 host->power_mode = value; rtsx_usb_ms_set_param() 624 dev_dbg(ms_dev(host), "switch clock failed\n"); rtsx_usb_ms_set_param() 628 host->ssc_depth = ssc_depth; rtsx_usb_ms_set_param() 629 host->clock = clock; rtsx_usb_ms_set_param() 630 host->ifmode = value; rtsx_usb_ms_set_param() 643 dev_dbg(ms_dev(host), "%s: return = %d\n", __func__, err); rtsx_usb_ms_set_param() 650 struct rtsx_usb_ms *host = dev_get_drvdata(dev); rtsx_usb_ms_suspend() local 651 struct memstick_host *msh = host->msh; rtsx_usb_ms_suspend() 653 dev_dbg(ms_dev(host), "--> %s\n", __func__); rtsx_usb_ms_suspend() 661 struct rtsx_usb_ms *host = dev_get_drvdata(dev); rtsx_usb_ms_resume() local 662 struct memstick_host *msh = host->msh; rtsx_usb_ms_resume() 664 dev_dbg(ms_dev(host), "--> %s\n", __func__); rtsx_usb_ms_resume() 673 * successful host addition. It stops while the driver removal function sets 674 * host->eject true. 678 struct rtsx_usb_ms *host = (struct rtsx_usb_ms *)__host; rtsx_usb_detect_ms_card() local 679 struct rtsx_ucr *ucr = host->ucr; rtsx_usb_detect_ms_card() 701 dev_dbg(ms_dev(host), "MS slot change detected\n"); rtsx_usb_detect_ms_card() 702 memstick_detect_change(host->msh); rtsx_usb_detect_ms_card() 706 if (host->eject) rtsx_usb_detect_ms_card() 712 complete(&host->detect_ms_exit); rtsx_usb_detect_ms_card() 719 struct rtsx_usb_ms *host; rtsx_usb_ms_drv_probe() local 730 msh = memstick_alloc_host(sizeof(*host), &pdev->dev); rtsx_usb_ms_drv_probe() 734 host = memstick_priv(msh); rtsx_usb_ms_drv_probe() 735 host->ucr = ucr; rtsx_usb_ms_drv_probe() 736 host->msh = msh; rtsx_usb_ms_drv_probe() 737 host->pdev = pdev; rtsx_usb_ms_drv_probe() 738 host->power_mode = MEMSTICK_POWER_OFF; rtsx_usb_ms_drv_probe() 739 platform_set_drvdata(pdev, host); rtsx_usb_ms_drv_probe() 741 mutex_init(&host->host_mutex); rtsx_usb_ms_drv_probe() 742 INIT_WORK(&host->handle_req, rtsx_usb_ms_handle_req); rtsx_usb_ms_drv_probe() 744 init_completion(&host->detect_ms_exit); rtsx_usb_ms_drv_probe() 745 host->detect_ms = kthread_create(rtsx_usb_detect_ms_card, host, rtsx_usb_ms_drv_probe() 747 if (IS_ERR(host->detect_ms)) { rtsx_usb_ms_drv_probe() 750 err = PTR_ERR(host->detect_ms); rtsx_usb_ms_drv_probe() 763 wake_up_process(host->detect_ms); rtsx_usb_ms_drv_probe() 772 struct rtsx_usb_ms *host = platform_get_drvdata(pdev); rtsx_usb_ms_drv_remove() local 776 msh = host->msh; rtsx_usb_ms_drv_remove() 777 host->eject = true; rtsx_usb_ms_drv_remove() 778 cancel_work_sync(&host->handle_req); rtsx_usb_ms_drv_remove() 780 mutex_lock(&host->host_mutex); rtsx_usb_ms_drv_remove() 781 if (host->req) { rtsx_usb_ms_drv_remove() 785 host->req->error = -ENOMEDIUM; rtsx_usb_ms_drv_remove() 787 err = memstick_next_req(msh, &host->req); rtsx_usb_ms_drv_remove() 789 host->req->error = -ENOMEDIUM; rtsx_usb_ms_drv_remove() 792 mutex_unlock(&host->host_mutex); rtsx_usb_ms_drv_remove() 794 wait_for_completion(&host->detect_ms_exit); rtsx_usb_ms_drv_remove() 801 if (pm_runtime_active(ms_dev(host))) rtsx_usb_ms_drv_remove() 802 pm_runtime_put(ms_dev(host)); rtsx_usb_ms_drv_remove()
|
/linux-4.1.27/drivers/mmc/core/ |
H A D | pwrseq.h | 12 void (*pre_power_on)(struct mmc_host *host); 13 void (*post_power_on)(struct mmc_host *host); 14 void (*power_off)(struct mmc_host *host); 15 void (*free)(struct mmc_host *host); 24 int mmc_pwrseq_alloc(struct mmc_host *host); 25 void mmc_pwrseq_pre_power_on(struct mmc_host *host); 26 void mmc_pwrseq_post_power_on(struct mmc_host *host); 27 void mmc_pwrseq_power_off(struct mmc_host *host); 28 void mmc_pwrseq_free(struct mmc_host *host); 30 struct mmc_pwrseq *mmc_pwrseq_simple_alloc(struct mmc_host *host, 32 struct mmc_pwrseq *mmc_pwrseq_emmc_alloc(struct mmc_host *host, 37 static inline int mmc_pwrseq_alloc(struct mmc_host *host) { return 0; } mmc_pwrseq_pre_power_on() argument 38 static inline void mmc_pwrseq_pre_power_on(struct mmc_host *host) {} mmc_pwrseq_post_power_on() argument 39 static inline void mmc_pwrseq_post_power_on(struct mmc_host *host) {} mmc_pwrseq_power_off() argument 40 static inline void mmc_pwrseq_power_off(struct mmc_host *host) {} mmc_pwrseq_free() argument 41 static inline void mmc_pwrseq_free(struct mmc_host *host) {} argument
|
H A D | core.h | 33 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops); 34 void mmc_detach_bus(struct mmc_host *host); 36 struct device_node *mmc_of_find_child_device(struct mmc_host *host, 41 void mmc_set_chip_select(struct mmc_host *host, int mode); 42 void mmc_set_clock(struct mmc_host *host, unsigned int hz); 43 void mmc_gate_clock(struct mmc_host *host); 44 void mmc_ungate_clock(struct mmc_host *host); 45 void mmc_set_ungated(struct mmc_host *host); 46 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode); 47 void mmc_set_bus_width(struct mmc_host *host, unsigned int width); 48 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr); 49 int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr); 50 int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage); 51 void mmc_set_timing(struct mmc_host *host, unsigned int timing); 52 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type); 53 void mmc_power_up(struct mmc_host *host, u32 ocr); 54 void mmc_power_off(struct mmc_host *host); 55 void mmc_power_cycle(struct mmc_host *host, u32 ocr); 56 void mmc_set_initial_state(struct mmc_host *host); 69 void mmc_start_host(struct mmc_host *host); 70 void mmc_stop_host(struct mmc_host *host); 72 int _mmc_detect_card_removed(struct mmc_host *host); 74 int mmc_attach_mmc(struct mmc_host *host); 75 int mmc_attach_sd(struct mmc_host *host); 76 int mmc_attach_sdio(struct mmc_host *host); 82 void mmc_add_host_debugfs(struct mmc_host *host); 83 void mmc_remove_host_debugfs(struct mmc_host *host); 88 void mmc_init_context_info(struct mmc_host *host);
|
H A D | host.c | 2 * linux/drivers/mmc/core/host.c 12 * MMC host class device management 26 #include <linux/mmc/host.h> 31 #include "host.h" 42 struct mmc_host *host = cls_dev_to_mmc_host(dev); mmc_host_classdev_release() local 44 idr_remove(&mmc_host_idr, host->index); mmc_host_classdev_release() 46 kfree(host); mmc_host_classdev_release() 68 struct mmc_host *host = cls_dev_to_mmc_host(dev); clkgate_delay_show() local 69 return snprintf(buf, PAGE_SIZE, "%lu\n", host->clkgate_delay); clkgate_delay_show() 75 struct mmc_host *host = cls_dev_to_mmc_host(dev); clkgate_delay_store() local 81 spin_lock_irqsave(&host->clk_lock, flags); clkgate_delay_store() 82 host->clkgate_delay = value; clkgate_delay_store() 83 spin_unlock_irqrestore(&host->clk_lock, flags); clkgate_delay_store() 88 * Enabling clock gating will make the core call out to the host 94 static void mmc_host_clk_gate_delayed(struct mmc_host *host) mmc_host_clk_gate_delayed() argument 97 unsigned long freq = host->ios.clock; mmc_host_clk_gate_delayed() 103 mmc_hostname(host)); mmc_host_clk_gate_delayed() 111 spin_lock_irqsave(&host->clk_lock, flags); mmc_host_clk_gate_delayed() 118 if (!host->clk_requests) { mmc_host_clk_gate_delayed() 119 spin_unlock_irqrestore(&host->clk_lock, flags); mmc_host_clk_gate_delayed() 121 ndelay(host->clk_delay * tick_ns); mmc_host_clk_gate_delayed() 124 spin_unlock_irqrestore(&host->clk_lock, flags); mmc_host_clk_gate_delayed() 127 mutex_lock(&host->clk_gate_mutex); mmc_host_clk_gate_delayed() 128 spin_lock_irqsave(&host->clk_lock, flags); mmc_host_clk_gate_delayed() 129 if (!host->clk_requests) { mmc_host_clk_gate_delayed() 130 spin_unlock_irqrestore(&host->clk_lock, flags); mmc_host_clk_gate_delayed() 131 /* This will set host->ios.clock to 0 */ mmc_host_clk_gate_delayed() 132 mmc_gate_clock(host); mmc_host_clk_gate_delayed() 133 spin_lock_irqsave(&host->clk_lock, flags); mmc_host_clk_gate_delayed() 134 pr_debug("%s: gated MCI clock\n", mmc_hostname(host)); mmc_host_clk_gate_delayed() 136 spin_unlock_irqrestore(&host->clk_lock, flags); mmc_host_clk_gate_delayed() 137 mutex_unlock(&host->clk_gate_mutex); mmc_host_clk_gate_delayed() 145 struct mmc_host *host = container_of(work, struct mmc_host, mmc_host_clk_gate_work() local 148 mmc_host_clk_gate_delayed(host); mmc_host_clk_gate_work() 153 * @host: host to ungate. 155 * Makes sure the host ios.clock is restored to a non-zero value 159 void mmc_host_clk_hold(struct mmc_host *host) mmc_host_clk_hold() argument 164 cancel_delayed_work_sync(&host->clk_gate_work); mmc_host_clk_hold() 165 mutex_lock(&host->clk_gate_mutex); mmc_host_clk_hold() 166 spin_lock_irqsave(&host->clk_lock, flags); mmc_host_clk_hold() 167 if (host->clk_gated) { mmc_host_clk_hold() 168 spin_unlock_irqrestore(&host->clk_lock, flags); mmc_host_clk_hold() 169 mmc_ungate_clock(host); mmc_host_clk_hold() 170 spin_lock_irqsave(&host->clk_lock, flags); mmc_host_clk_hold() 171 pr_debug("%s: ungated MCI clock\n", mmc_hostname(host)); mmc_host_clk_hold() 173 host->clk_requests++; mmc_host_clk_hold() 174 spin_unlock_irqrestore(&host->clk_lock, flags); mmc_host_clk_hold() 175 mutex_unlock(&host->clk_gate_mutex); mmc_host_clk_hold() 200 * @host: host to gate. 202 * Calls the host driver with ios.clock set to zero as often as possible 206 void mmc_host_clk_release(struct mmc_host *host) mmc_host_clk_release() argument 210 spin_lock_irqsave(&host->clk_lock, flags); mmc_host_clk_release() 211 host->clk_requests--; mmc_host_clk_release() 212 if (mmc_host_may_gate_card(host->card) && mmc_host_clk_release() 213 !host->clk_requests) mmc_host_clk_release() 214 schedule_delayed_work(&host->clk_gate_work, mmc_host_clk_release() 215 msecs_to_jiffies(host->clkgate_delay)); mmc_host_clk_release() 216 spin_unlock_irqrestore(&host->clk_lock, flags); mmc_host_clk_release() 221 * @host: host to get the clock frequency for. 225 unsigned int mmc_host_clk_rate(struct mmc_host *host) mmc_host_clk_rate() argument 230 spin_lock_irqsave(&host->clk_lock, flags); mmc_host_clk_rate() 231 if (host->clk_gated) mmc_host_clk_rate() 232 freq = host->clk_old; mmc_host_clk_rate() 234 freq = host->ios.clock; mmc_host_clk_rate() 235 spin_unlock_irqrestore(&host->clk_lock, flags); mmc_host_clk_rate() 241 * @host: host with potential clock to control 243 static inline void mmc_host_clk_init(struct mmc_host *host) mmc_host_clk_init() argument 245 host->clk_requests = 0; mmc_host_clk_init() 247 host->clk_delay = 8; mmc_host_clk_init() 252 host->clkgate_delay = 0; mmc_host_clk_init() 253 host->clk_gated = false; mmc_host_clk_init() 254 INIT_DELAYED_WORK(&host->clk_gate_work, mmc_host_clk_gate_work); mmc_host_clk_init() 255 spin_lock_init(&host->clk_lock); mmc_host_clk_init() 256 mutex_init(&host->clk_gate_mutex); mmc_host_clk_init() 261 * @host: host with potential clock to control 263 static inline void mmc_host_clk_exit(struct mmc_host *host) mmc_host_clk_exit() argument 269 if (cancel_delayed_work_sync(&host->clk_gate_work)) mmc_host_clk_exit() 270 mmc_host_clk_gate_delayed(host); mmc_host_clk_exit() 271 if (host->clk_gated) mmc_host_clk_exit() 272 mmc_host_clk_hold(host); mmc_host_clk_exit() 274 WARN_ON(host->clk_requests > 1); mmc_host_clk_exit() 277 static inline void mmc_host_clk_sysfs_init(struct mmc_host *host) mmc_host_clk_sysfs_init() argument 279 host->clkgate_delay_attr.show = clkgate_delay_show; mmc_host_clk_sysfs_init() 280 host->clkgate_delay_attr.store = clkgate_delay_store; mmc_host_clk_sysfs_init() 281 sysfs_attr_init(&host->clkgate_delay_attr.attr); mmc_host_clk_sysfs_init() 282 host->clkgate_delay_attr.attr.name = "clkgate_delay"; mmc_host_clk_sysfs_init() 283 host->clkgate_delay_attr.attr.mode = S_IRUGO | S_IWUSR; mmc_host_clk_sysfs_init() 284 if (device_create_file(&host->class_dev, &host->clkgate_delay_attr)) mmc_host_clk_sysfs_init() 286 mmc_hostname(host)); mmc_host_clk_sysfs_init() 290 static inline void mmc_host_clk_init(struct mmc_host *host) mmc_host_clk_init() argument 294 static inline void mmc_host_clk_exit(struct mmc_host *host) mmc_host_clk_exit() argument 298 static inline void mmc_host_clk_sysfs_init(struct mmc_host *host) mmc_host_clk_sysfs_init() argument 305 * mmc_of_parse() - parse host's device-tree node 306 * @host: host whose node should be parsed. 309 * used to to instantiate and configure this host instance or not, we 310 * parse the properties and set respective generic mmc-host flags and 313 int mmc_of_parse(struct mmc_host *host) mmc_of_parse() argument 321 if (!host->parent || !host->parent->of_node) mmc_of_parse() 324 np = host->parent->of_node; mmc_of_parse() 328 dev_dbg(host->parent, mmc_of_parse() 335 host->caps |= MMC_CAP_8_BIT_DATA; mmc_of_parse() 338 host->caps |= MMC_CAP_4_BIT_DATA; mmc_of_parse() 343 dev_err(host->parent, mmc_of_parse() 349 of_property_read_u32(np, "max-frequency", &host->f_max); mmc_of_parse() 365 host->caps |= MMC_CAP_NONREMOVABLE; mmc_of_parse() 370 host->caps |= MMC_CAP_NEEDS_POLL; mmc_of_parse() 372 ret = mmc_gpiod_request_cd(host, "cd", 0, true, mmc_of_parse() 375 dev_info(host->parent, "Got CD GPIO\n"); mmc_of_parse() 386 * If the capability on the host AND the GPIO line are mmc_of_parse() 391 host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; mmc_of_parse() 397 ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert); mmc_of_parse() 399 dev_info(host->parent, "Got WP GPIO\n"); mmc_of_parse() 405 host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; mmc_of_parse() 408 host->caps |= MMC_CAP_SD_HIGHSPEED; mmc_of_parse() 410 host->caps |= MMC_CAP_MMC_HIGHSPEED; mmc_of_parse() 412 host->caps |= MMC_CAP_UHS_SDR12; mmc_of_parse() 414 host->caps |= MMC_CAP_UHS_SDR25; mmc_of_parse() 416 host->caps |= MMC_CAP_UHS_SDR50; mmc_of_parse() 418 host->caps |= MMC_CAP_UHS_SDR104; mmc_of_parse() 420 host->caps |= MMC_CAP_UHS_DDR50; mmc_of_parse() 422 host->caps |= MMC_CAP_POWER_OFF_CARD; mmc_of_parse() 424 host->caps |= MMC_CAP_SDIO_IRQ; mmc_of_parse() 426 host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE; mmc_of_parse() 428 host->pm_caps |= MMC_PM_KEEP_POWER; mmc_of_parse() 430 host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ; mmc_of_parse() 432 host->caps |= MMC_CAP_1_8V_DDR; mmc_of_parse() 434 host->caps |= MMC_CAP_1_2V_DDR; mmc_of_parse() 436 host->caps2 |= MMC_CAP2_HS200_1_8V_SDR; mmc_of_parse() 438 host->caps2 |= MMC_CAP2_HS200_1_2V_SDR; mmc_of_parse() 440 host->caps2 |= MMC_CAP2_HS400_1_8V | MMC_CAP2_HS200_1_8V_SDR; mmc_of_parse() 442 host->caps2 |= MMC_CAP2_HS400_1_2V | MMC_CAP2_HS200_1_2V_SDR; mmc_of_parse() 444 host->dsr_req = !of_property_read_u32(np, "dsr", &host->dsr); mmc_of_parse() 445 if (host->dsr_req && (host->dsr & ~0xffff)) { mmc_of_parse() 446 dev_err(host->parent, mmc_of_parse() 448 host->dsr); mmc_of_parse() 449 host->dsr_req = 0; mmc_of_parse() 452 return mmc_pwrseq_alloc(host); mmc_of_parse() 458 * mmc_alloc_host - initialise the per-host structure. 460 * @dev: pointer to host device model structure 462 * Initialise the per-host structure. 467 struct mmc_host *host; mmc_alloc_host() local 469 host = kzalloc(sizeof(struct mmc_host) + extra, GFP_KERNEL); mmc_alloc_host() 470 if (!host) mmc_alloc_host() 474 host->rescan_disable = 1; mmc_alloc_host() 477 err = idr_alloc(&mmc_host_idr, host, 0, 0, GFP_NOWAIT); mmc_alloc_host() 479 host->index = err; mmc_alloc_host() 483 kfree(host); mmc_alloc_host() 487 dev_set_name(&host->class_dev, "mmc%d", host->index); mmc_alloc_host() 489 host->parent = dev; mmc_alloc_host() 490 host->class_dev.parent = dev; mmc_alloc_host() 491 host->class_dev.class = &mmc_host_class; mmc_alloc_host() 492 device_initialize(&host->class_dev); mmc_alloc_host() 494 if (mmc_gpio_alloc(host)) { mmc_alloc_host() 495 put_device(&host->class_dev); mmc_alloc_host() 499 mmc_host_clk_init(host); mmc_alloc_host() 501 spin_lock_init(&host->lock); mmc_alloc_host() 502 init_waitqueue_head(&host->wq); mmc_alloc_host() 503 INIT_DELAYED_WORK(&host->detect, mmc_rescan); mmc_alloc_host() 505 host->pm_notify.notifier_call = mmc_pm_notify; mmc_alloc_host() 512 host->max_segs = 1; mmc_alloc_host() 513 host->max_seg_size = PAGE_CACHE_SIZE; mmc_alloc_host() 515 host->max_req_size = PAGE_CACHE_SIZE; mmc_alloc_host() 516 host->max_blk_size = 512; mmc_alloc_host() 517 host->max_blk_count = PAGE_CACHE_SIZE / 512; mmc_alloc_host() 519 return host; mmc_alloc_host() 525 * mmc_add_host - initialise host hardware 526 * @host: mmc host 528 * Register the host with the driver model. The host must be 532 int mmc_add_host(struct mmc_host *host) mmc_add_host() argument 536 WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) && mmc_add_host() 537 !host->ops->enable_sdio_irq); mmc_add_host() 539 err = device_add(&host->class_dev); mmc_add_host() 543 led_trigger_register_simple(dev_name(&host->class_dev), &host->led); mmc_add_host() 546 mmc_add_host_debugfs(host); mmc_add_host() 548 mmc_host_clk_sysfs_init(host); mmc_add_host() 550 mmc_start_host(host); mmc_add_host() 551 register_pm_notifier(&host->pm_notify); mmc_add_host() 559 * mmc_remove_host - remove host hardware 560 * @host: mmc host 562 * Unregister and remove all cards associated with this host, 566 void mmc_remove_host(struct mmc_host *host) mmc_remove_host() argument 568 unregister_pm_notifier(&host->pm_notify); mmc_remove_host() 569 mmc_stop_host(host); mmc_remove_host() 572 mmc_remove_host_debugfs(host); mmc_remove_host() 575 device_del(&host->class_dev); mmc_remove_host() 577 led_trigger_unregister_simple(host->led); mmc_remove_host() 579 mmc_host_clk_exit(host); mmc_remove_host() 585 * mmc_free_host - free the host structure 586 * @host: mmc host 588 * Free the host once all references to it have been dropped. 590 void mmc_free_host(struct mmc_host *host) mmc_free_host() argument 592 mmc_pwrseq_free(host); mmc_free_host() 593 put_device(&host->class_dev); mmc_free_host()
|
H A D | sdio_irq.c | 24 #include <linux/mmc/host.h> 31 static int process_sdio_pending_irqs(struct mmc_host *host) process_sdio_pending_irqs() argument 33 struct mmc_card *card = host->card; process_sdio_pending_irqs() 44 if (func && host->sdio_irq_pending) { process_sdio_pending_irqs() 57 !(host->caps & MMC_CAP_SDIO_IRQ)) { process_sdio_pending_irqs() 92 void sdio_run_irqs(struct mmc_host *host) sdio_run_irqs() argument 94 mmc_claim_host(host); sdio_run_irqs() 95 host->sdio_irq_pending = true; sdio_run_irqs() 96 process_sdio_pending_irqs(host); sdio_run_irqs() 97 mmc_release_host(host); sdio_run_irqs() 103 struct mmc_host *host = _host; sdio_irq_thread() local 112 * aware hosts. One thing that non SDIO host cannot do is sdio_irq_thread() 117 period = (host->caps & MMC_CAP_SDIO_IRQ) ? sdio_irq_thread() 121 mmc_hostname(host), period); sdio_irq_thread() 125 * We claim the host here on drivers behalf for a couple sdio_irq_thread() 134 * holding of the host lock does not cover too much work sdio_irq_thread() 137 ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort); sdio_irq_thread() 140 ret = process_sdio_pending_irqs(host); sdio_irq_thread() 141 host->sdio_irq_pending = false; sdio_irq_thread() 142 mmc_release_host(host); sdio_irq_thread() 160 if (!(host->caps & MMC_CAP_SDIO_IRQ)) { sdio_irq_thread() 171 if (host->caps & MMC_CAP_SDIO_IRQ) { sdio_irq_thread() 172 mmc_host_clk_hold(host); sdio_irq_thread() 173 host->ops->enable_sdio_irq(host, 1); sdio_irq_thread() 174 mmc_host_clk_release(host); sdio_irq_thread() 181 if (host->caps & MMC_CAP_SDIO_IRQ) { sdio_irq_thread() 182 mmc_host_clk_hold(host); sdio_irq_thread() 183 host->ops->enable_sdio_irq(host, 0); sdio_irq_thread() 184 mmc_host_clk_release(host); sdio_irq_thread() 188 mmc_hostname(host), ret); sdio_irq_thread() 195 struct mmc_host *host = card->host; sdio_card_irq_get() local 197 WARN_ON(!host->claimed); sdio_card_irq_get() 199 if (!host->sdio_irqs++) { sdio_card_irq_get() 200 if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) { sdio_card_irq_get() 201 atomic_set(&host->sdio_irq_thread_abort, 0); sdio_card_irq_get() 202 host->sdio_irq_thread = sdio_card_irq_get() 203 kthread_run(sdio_irq_thread, host, sdio_card_irq_get() 204 "ksdioirqd/%s", mmc_hostname(host)); sdio_card_irq_get() 205 if (IS_ERR(host->sdio_irq_thread)) { sdio_card_irq_get() 206 int err = PTR_ERR(host->sdio_irq_thread); sdio_card_irq_get() 207 host->sdio_irqs--; sdio_card_irq_get() 210 } else if (host->caps & MMC_CAP_SDIO_IRQ) { sdio_card_irq_get() 211 mmc_host_clk_hold(host); sdio_card_irq_get() 212 host->ops->enable_sdio_irq(host, 1); sdio_card_irq_get() 213 mmc_host_clk_release(host); sdio_card_irq_get() 222 struct mmc_host *host = card->host; sdio_card_irq_put() local 224 WARN_ON(!host->claimed); sdio_card_irq_put() 225 BUG_ON(host->sdio_irqs < 1); sdio_card_irq_put() 227 if (!--host->sdio_irqs) { sdio_card_irq_put() 228 if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) { sdio_card_irq_put() 229 atomic_set(&host->sdio_irq_thread_abort, 1); sdio_card_irq_put() 230 kthread_stop(host->sdio_irq_thread); sdio_card_irq_put() 231 } else if (host->caps & MMC_CAP_SDIO_IRQ) { sdio_card_irq_put() 232 mmc_host_clk_hold(host); sdio_card_irq_put() 233 host->ops->enable_sdio_irq(host, 0); sdio_card_irq_put() 234 mmc_host_clk_release(host); sdio_card_irq_put() 248 if ((card->host->caps & MMC_CAP_SDIO_IRQ) && sdio_single_irq_set() 249 card->host->sdio_irqs == 1) sdio_single_irq_set() 265 * handler will be called when that IRQ is asserted. The host is always
|
H A D | pwrseq.c | 16 #include <linux/mmc/host.h> 22 struct mmc_pwrseq *(*alloc)(struct mmc_host *host, struct device *dev); 50 int mmc_pwrseq_alloc(struct mmc_host *host) mmc_pwrseq_alloc() argument 58 np = of_parse_phandle(host->parent->of_node, "mmc-pwrseq", 0); mmc_pwrseq_alloc() 74 pwrseq = match->alloc(host, &pdev->dev); mmc_pwrseq_alloc() 80 host->pwrseq = pwrseq; mmc_pwrseq_alloc() 81 dev_info(host->parent, "allocated mmc-pwrseq\n"); mmc_pwrseq_alloc() 88 void mmc_pwrseq_pre_power_on(struct mmc_host *host) mmc_pwrseq_pre_power_on() argument 90 struct mmc_pwrseq *pwrseq = host->pwrseq; mmc_pwrseq_pre_power_on() 93 pwrseq->ops->pre_power_on(host); mmc_pwrseq_pre_power_on() 96 void mmc_pwrseq_post_power_on(struct mmc_host *host) mmc_pwrseq_post_power_on() argument 98 struct mmc_pwrseq *pwrseq = host->pwrseq; mmc_pwrseq_post_power_on() 101 pwrseq->ops->post_power_on(host); mmc_pwrseq_post_power_on() 104 void mmc_pwrseq_power_off(struct mmc_host *host) mmc_pwrseq_power_off() argument 106 struct mmc_pwrseq *pwrseq = host->pwrseq; mmc_pwrseq_power_off() 109 pwrseq->ops->power_off(host); mmc_pwrseq_power_off() 112 void mmc_pwrseq_free(struct mmc_host *host) mmc_pwrseq_free() argument 114 struct mmc_pwrseq *pwrseq = host->pwrseq; mmc_pwrseq_free() 117 pwrseq->ops->free(host); mmc_pwrseq_free() 119 host->pwrseq = NULL; mmc_pwrseq_free()
|
H A D | core.c | 34 #include <linux/mmc/host.h> 41 #include "host.h" 92 static void mmc_should_fail_request(struct mmc_host *host, mmc_should_fail_request() argument 107 !should_fail(&host->fail_mmc_request, data->blksz * data->blocks)) mmc_should_fail_request() 116 static inline void mmc_should_fail_request(struct mmc_host *host, mmc_should_fail_request() argument 125 * @host: MMC host which completed request 131 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) mmc_request_done() argument 136 if (err && cmd->retries && mmc_host_is_spi(host)) { mmc_request_done() 141 if (err && cmd->retries && !mmc_card_removed(host->card)) { mmc_request_done() 149 mmc_should_fail_request(host, mrq); mmc_request_done() 151 led_trigger_event(host->led, LED_OFF); mmc_request_done() 155 mmc_hostname(host), mrq->sbc->opcode, mmc_request_done() 162 mmc_hostname(host), cmd->opcode, err, mmc_request_done() 168 mmc_hostname(host), mmc_request_done() 174 mmc_hostname(host), mrq->stop->opcode, mmc_request_done() 183 mmc_host_clk_release(host); mmc_request_done() 189 static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) mmc_start_request() argument 195 if (mmc_card_removed(host->card)) mmc_start_request() 200 mmc_hostname(host), mrq->sbc->opcode, mmc_start_request() 205 mmc_hostname(host), mrq->cmd->opcode, mmc_start_request() 211 mmc_hostname(host), mrq->data->blksz, mmc_start_request() 219 mmc_hostname(host), mrq->stop->opcode, mmc_start_request() 223 WARN_ON(!host->claimed); mmc_start_request() 232 BUG_ON(mrq->data->blksz > host->max_blk_size); mmc_start_request() 233 BUG_ON(mrq->data->blocks > host->max_blk_count); mmc_start_request() 235 host->max_req_size); mmc_start_request() 253 mmc_host_clk_hold(host); mmc_start_request() 254 led_trigger_event(host->led, LED_FULL); mmc_start_request() 255 host->ops->request(host, mrq); mmc_start_request() 284 mmc_hostname(card->host), err); mmc_start_bkops() 295 mmc_claim_host(card->host); mmc_start_bkops() 309 mmc_hostname(card->host), err); mmc_start_bkops() 321 mmc_release_host(card->host); mmc_start_bkops() 329 * Wakes up mmc context, passed as a callback to host controller driver 333 struct mmc_context_info *context_info = &mrq->host->context_info; mmc_wait_data_done() 346 * @host: MMC host to start the request 352 static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq) __mmc_start_data_req() argument 357 mrq->host = host; __mmc_start_data_req() 359 err = mmc_start_request(host, mrq); __mmc_start_data_req() 368 static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq) __mmc_start_req() argument 375 err = mmc_start_request(host, mrq); __mmc_start_req() 386 * @host: MMC host to prepare the command. 389 * Blocks MMC context till host controller will ack end of data request 395 static int mmc_wait_for_data_req_done(struct mmc_host *host, mmc_wait_for_data_req_done() argument 400 struct mmc_context_info *context_info = &host->context_info; mmc_wait_for_data_req_done() 417 mmc_card_removed(host->card)) { mmc_wait_for_data_req_done() 418 err = host->areq->err_check(host->card, mmc_wait_for_data_req_done() 419 host->areq); mmc_wait_for_data_req_done() 423 mmc_hostname(host), mmc_wait_for_data_req_done() 427 host->ops->request(host, mrq); mmc_wait_for_data_req_done() 441 static void mmc_wait_for_req_done(struct mmc_host *host, mmc_wait_for_req_done() argument 452 * If host has timed out waiting for the sanitize mmc_wait_for_req_done() 458 if (!mmc_interrupt_hpi(host->card)) { mmc_wait_for_req_done() 460 mmc_hostname(host), __func__); mmc_wait_for_req_done() 465 mmc_hostname(host), __func__); mmc_wait_for_req_done() 469 mmc_card_removed(host->card)) mmc_wait_for_req_done() 473 mmc_hostname(host), cmd->opcode, cmd->error); mmc_wait_for_req_done() 476 host->ops->request(host, mrq); mmc_wait_for_req_done() 482 * @host: MMC host to prepare command 488 * host prepare for the new request. Preparation of a request may be 489 * performed while another request is running on the host. 491 static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq, mmc_pre_req() argument 494 if (host->ops->pre_req) { mmc_pre_req() 495 mmc_host_clk_hold(host); mmc_pre_req() 496 host->ops->pre_req(host, mrq, is_first_req); mmc_pre_req() 497 mmc_host_clk_release(host); mmc_pre_req() 503 * @host: MMC host to post process command 507 * Let the host post process a completed request. Post processing of 510 static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq, mmc_post_req() argument 513 if (host->ops->post_req) { mmc_post_req() 514 mmc_host_clk_hold(host); mmc_post_req() 515 host->ops->post_req(host, mrq, err); mmc_post_req() 516 mmc_host_clk_release(host); mmc_post_req() 522 * @host: MMC host to start command 526 * Start a new MMC custom command request for a host. 536 struct mmc_async_req *mmc_start_req(struct mmc_host *host, mmc_start_req() argument 541 struct mmc_async_req *data = host->areq; mmc_start_req() 545 mmc_pre_req(host, areq->mrq, !host->areq); mmc_start_req() 547 if (host->areq) { mmc_start_req() 548 err = mmc_wait_for_data_req_done(host, host->areq->mrq, areq); mmc_start_req() 561 if (host->card && mmc_card_mmc(host->card) && mmc_start_req() 562 ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) || mmc_start_req() 563 (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) && mmc_start_req() 564 (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) { mmc_start_req() 568 mmc_post_req(host, areq->mrq, -EINVAL); mmc_start_req() 570 mmc_start_bkops(host->card, true); mmc_start_req() 574 mmc_pre_req(host, areq->mrq, !host->areq); mmc_start_req() 579 start_err = __mmc_start_data_req(host, areq->mrq); mmc_start_req() 581 if (host->areq) mmc_start_req() 582 mmc_post_req(host, host->areq->mrq, 0); mmc_start_req() 586 mmc_post_req(host, areq->mrq, -EINVAL); mmc_start_req() 589 host->areq = NULL; mmc_start_req() 591 host->areq = areq; mmc_start_req() 601 * @host: MMC host to start command 604 * Start a new MMC custom command request for a host, and wait 608 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq) mmc_wait_for_req() argument 610 __mmc_start_req(host, mrq); mmc_wait_for_req() 611 mmc_wait_for_req_done(host, mrq); mmc_wait_for_req() 631 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host)); mmc_interrupt_hpi() 635 mmc_claim_host(card->host); mmc_interrupt_hpi() 638 pr_err("%s: Get card status fail\n", mmc_hostname(card->host)); mmc_interrupt_hpi() 657 mmc_hostname(card->host), R1_CURRENT_STATE(status)); mmc_interrupt_hpi() 677 mmc_release_host(card->host); mmc_interrupt_hpi() 684 * @host: MMC host to start command 688 * Start a new MMC command for a host, and wait for the command 692 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries) mmc_wait_for_cmd() argument 696 WARN_ON(!host->claimed); mmc_wait_for_cmd() 704 mmc_wait_for_req(host, &mrq); mmc_wait_for_cmd() 745 mmc_claim_host(card->host); mmc_read_bkops_status() 747 mmc_release_host(card->host); mmc_read_bkops_status() 801 if (mmc_host_clk_rate(card->host)) mmc_set_data_timeout() 803 (mmc_host_clk_rate(card->host) / 1000); mmc_set_data_timeout() 848 if (mmc_host_is_spi(card->host)) { mmc_set_data_timeout() 888 * __mmc_claim_host - exclusively claim a host 889 * @host: mmc host to claim 892 * Claim a host for a set of operations. If @abort is non null and 897 int __mmc_claim_host(struct mmc_host *host, atomic_t *abort) __mmc_claim_host() argument 906 add_wait_queue(&host->wq, &wait); __mmc_claim_host() 907 spin_lock_irqsave(&host->lock, flags); __mmc_claim_host() 911 if (stop || !host->claimed || host->claimer == current) __mmc_claim_host() 913 spin_unlock_irqrestore(&host->lock, flags); __mmc_claim_host() 915 spin_lock_irqsave(&host->lock, flags); __mmc_claim_host() 919 host->claimed = 1; __mmc_claim_host() 920 host->claimer = current; __mmc_claim_host() 921 host->claim_cnt += 1; __mmc_claim_host() 922 if (host->claim_cnt == 1) __mmc_claim_host() 925 wake_up(&host->wq); __mmc_claim_host() 926 spin_unlock_irqrestore(&host->lock, flags); __mmc_claim_host() 927 remove_wait_queue(&host->wq, &wait); __mmc_claim_host() 930 pm_runtime_get_sync(mmc_dev(host)); __mmc_claim_host() 937 * mmc_release_host - release a host 938 * @host: mmc host to release 940 * Release a MMC host, allowing others to claim the host 943 void mmc_release_host(struct mmc_host *host) mmc_release_host() argument 947 WARN_ON(!host->claimed); mmc_release_host() 949 spin_lock_irqsave(&host->lock, flags); mmc_release_host() 950 if (--host->claim_cnt) { mmc_release_host() 952 spin_unlock_irqrestore(&host->lock, flags); mmc_release_host() 954 host->claimed = 0; mmc_release_host() 955 host->claimer = NULL; mmc_release_host() 956 spin_unlock_irqrestore(&host->lock, flags); mmc_release_host() 957 wake_up(&host->wq); mmc_release_host() 958 pm_runtime_mark_last_busy(mmc_dev(host)); mmc_release_host() 959 pm_runtime_put_autosuspend(mmc_dev(host)); mmc_release_host() 966 * card device and also claims the host. 971 mmc_claim_host(card->host); mmc_get_card() 976 * This is a helper function, which releases the host and drops the runtime 981 mmc_release_host(card->host); mmc_put_card() 988 * Internal function that does the actual ios call to the host driver, 991 static inline void mmc_set_ios(struct mmc_host *host) mmc_set_ios() argument 993 struct mmc_ios *ios = &host->ios; mmc_set_ios() 997 mmc_hostname(host), ios->clock, ios->bus_mode, mmc_set_ios() 1002 mmc_set_ungated(host); mmc_set_ios() 1003 host->ops->set_ios(host, ios); mmc_set_ios() 1007 * Control chip select pin on a host. 1009 void mmc_set_chip_select(struct mmc_host *host, int mode) mmc_set_chip_select() argument 1011 mmc_host_clk_hold(host); mmc_set_chip_select() 1012 host->ios.chip_select = mode; mmc_set_chip_select() 1013 mmc_set_ios(host); mmc_set_chip_select() 1014 mmc_host_clk_release(host); mmc_set_chip_select() 1018 * Sets the host clock to the highest possible frequency that 1021 static void __mmc_set_clock(struct mmc_host *host, unsigned int hz) __mmc_set_clock() argument 1023 WARN_ON(hz && hz < host->f_min); __mmc_set_clock() 1025 if (hz > host->f_max) __mmc_set_clock() 1026 hz = host->f_max; __mmc_set_clock() 1028 host->ios.clock = hz; __mmc_set_clock() 1029 mmc_set_ios(host); __mmc_set_clock() 1032 void mmc_set_clock(struct mmc_host *host, unsigned int hz) mmc_set_clock() argument 1034 mmc_host_clk_hold(host); mmc_set_clock() 1035 __mmc_set_clock(host, hz); mmc_set_clock() 1036 mmc_host_clk_release(host); mmc_set_clock() 1043 void mmc_gate_clock(struct mmc_host *host) mmc_gate_clock() argument 1047 spin_lock_irqsave(&host->clk_lock, flags); mmc_gate_clock() 1048 host->clk_old = host->ios.clock; mmc_gate_clock() 1049 host->ios.clock = 0; mmc_gate_clock() 1050 host->clk_gated = true; mmc_gate_clock() 1051 spin_unlock_irqrestore(&host->clk_lock, flags); mmc_gate_clock() 1052 mmc_set_ios(host); mmc_gate_clock() 1059 void mmc_ungate_clock(struct mmc_host *host) mmc_ungate_clock() argument 1068 if (host->clk_old) { mmc_ungate_clock() 1069 BUG_ON(host->ios.clock); mmc_ungate_clock() 1070 /* This call will also set host->clk_gated to false */ mmc_ungate_clock() 1071 __mmc_set_clock(host, host->clk_old); mmc_ungate_clock() 1075 void mmc_set_ungated(struct mmc_host *host) mmc_set_ungated() argument 1083 spin_lock_irqsave(&host->clk_lock, flags); mmc_set_ungated() 1084 host->clk_gated = false; mmc_set_ungated() 1085 spin_unlock_irqrestore(&host->clk_lock, flags); mmc_set_ungated() 1089 void mmc_set_ungated(struct mmc_host *host) mmc_set_ungated() argument 1096 struct mmc_host *host = card->host; mmc_execute_tuning() local 1100 if (!host->ops->execute_tuning) mmc_execute_tuning() 1108 mmc_host_clk_hold(host); mmc_execute_tuning() 1109 err = host->ops->execute_tuning(host, opcode); mmc_execute_tuning() 1110 mmc_host_clk_release(host); mmc_execute_tuning() 1113 pr_err("%s: tuning execution failed\n", mmc_hostname(host)); mmc_execute_tuning() 1119 * Change the bus mode (open drain/push-pull) of a host. 1121 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode) mmc_set_bus_mode() argument 1123 mmc_host_clk_hold(host); mmc_set_bus_mode() 1124 host->ios.bus_mode = mode; mmc_set_bus_mode() 1125 mmc_set_ios(host); mmc_set_bus_mode() 1126 mmc_host_clk_release(host); mmc_set_bus_mode() 1130 * Change data bus width of a host. 1132 void mmc_set_bus_width(struct mmc_host *host, unsigned int width) mmc_set_bus_width() argument 1134 mmc_host_clk_hold(host); mmc_set_bus_width() 1135 host->ios.bus_width = width; mmc_set_bus_width() 1136 mmc_set_ios(host); mmc_set_bus_width() 1137 mmc_host_clk_release(host); mmc_set_bus_width() 1143 void mmc_set_initial_state(struct mmc_host *host) mmc_set_initial_state() argument 1145 if (mmc_host_is_spi(host)) mmc_set_initial_state() 1146 host->ios.chip_select = MMC_CS_HIGH; mmc_set_initial_state() 1148 host->ios.chip_select = MMC_CS_DONTCARE; mmc_set_initial_state() 1149 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; mmc_set_initial_state() 1150 host->ios.bus_width = MMC_BUS_WIDTH_1; mmc_set_initial_state() 1151 host->ios.timing = MMC_TIMING_LEGACY; mmc_set_initial_state() 1153 mmc_set_ios(host); mmc_set_initial_state() 1285 struct device_node *mmc_of_find_child_device(struct mmc_host *host, mmc_of_find_child_device() argument 1290 if (!host->parent || !host->parent->of_node) mmc_of_find_child_device() 1293 for_each_child_of_node(host->parent->of_node, node) { mmc_of_find_child_device() 1310 * MMC host adapter. 1347 * mmc_regulator_set_ocr - set regulator to match host->ios voltage 1348 * @mmc: the host to regulate 1350 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd) 1354 * MMC host drivers may use this to enable or disable a regulator using 1438 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr) mmc_select_voltage() argument 1447 dev_warn(mmc_dev(host), mmc_select_voltage() 1452 ocr &= host->ocr_avail; mmc_select_voltage() 1454 dev_warn(mmc_dev(host), "no support for card's volts\n"); mmc_select_voltage() 1458 if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) { mmc_select_voltage() 1461 mmc_power_cycle(host, ocr); mmc_select_voltage() 1465 if (bit != host->ios.vdd) mmc_select_voltage() 1466 dev_warn(mmc_dev(host), "exceeding card's volts\n"); mmc_select_voltage() 1472 int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage) __mmc_set_signal_voltage() argument 1475 int old_signal_voltage = host->ios.signal_voltage; __mmc_set_signal_voltage() 1477 host->ios.signal_voltage = signal_voltage; __mmc_set_signal_voltage() 1478 if (host->ops->start_signal_voltage_switch) { __mmc_set_signal_voltage() 1479 mmc_host_clk_hold(host); __mmc_set_signal_voltage() 1480 err = host->ops->start_signal_voltage_switch(host, &host->ios); __mmc_set_signal_voltage() 1481 mmc_host_clk_release(host); __mmc_set_signal_voltage() 1485 host->ios.signal_voltage = old_signal_voltage; __mmc_set_signal_voltage() 1491 int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr) mmc_set_signal_voltage() argument 1497 BUG_ON(!host); mmc_set_signal_voltage() 1504 return __mmc_set_signal_voltage(host, signal_voltage); mmc_set_signal_voltage() 1510 if (!host->ops->start_signal_voltage_switch) mmc_set_signal_voltage() 1512 if (!host->ops->card_busy) mmc_set_signal_voltage() 1514 mmc_hostname(host)); mmc_set_signal_voltage() 1516 mmc_host_clk_hold(host); mmc_set_signal_voltage() 1522 err = mmc_wait_for_cmd(host, &cmd, 0); mmc_set_signal_voltage() 1526 if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) { mmc_set_signal_voltage() 1535 if (host->ops->card_busy && !host->ops->card_busy(host)) { mmc_set_signal_voltage() 1543 clock = host->ios.clock; mmc_set_signal_voltage() 1544 host->ios.clock = 0; mmc_set_signal_voltage() 1545 mmc_set_ios(host); mmc_set_signal_voltage() 1547 if (__mmc_set_signal_voltage(host, signal_voltage)) { mmc_set_signal_voltage() 1558 host->ios.clock = clock; mmc_set_signal_voltage() 1559 mmc_set_ios(host); mmc_set_signal_voltage() 1568 if (host->ops->card_busy && host->ops->card_busy(host)) mmc_set_signal_voltage() 1574 "power cycling card\n", mmc_hostname(host)); mmc_set_signal_voltage() 1575 mmc_power_cycle(host, ocr); mmc_set_signal_voltage() 1579 mmc_host_clk_release(host); mmc_set_signal_voltage() 1585 * Select timing parameters for host. 1587 void mmc_set_timing(struct mmc_host *host, unsigned int timing) mmc_set_timing() argument 1589 mmc_host_clk_hold(host); mmc_set_timing() 1590 host->ios.timing = timing; mmc_set_timing() 1591 mmc_set_ios(host); mmc_set_timing() 1592 mmc_host_clk_release(host); mmc_set_timing() 1596 * Select appropriate driver type for host. 1598 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type) mmc_set_driver_type() argument 1600 mmc_host_clk_hold(host); mmc_set_driver_type() 1601 host->ios.drv_type = drv_type; mmc_set_driver_type() 1602 mmc_set_ios(host); mmc_set_driver_type() 1603 mmc_host_clk_release(host); mmc_set_driver_type() 1614 * If a host does all the power sequencing itself, ignore the 1617 void mmc_power_up(struct mmc_host *host, u32 ocr) mmc_power_up() argument 1619 if (host->ios.power_mode == MMC_POWER_ON) mmc_power_up() 1622 mmc_host_clk_hold(host); mmc_power_up() 1624 mmc_pwrseq_pre_power_on(host); mmc_power_up() 1626 host->ios.vdd = fls(ocr) - 1; mmc_power_up() 1627 host->ios.power_mode = MMC_POWER_UP; mmc_power_up() 1629 mmc_set_initial_state(host); mmc_power_up() 1632 if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330) == 0) mmc_power_up() 1633 dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n"); mmc_power_up() 1634 else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180) == 0) mmc_power_up() 1635 dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n"); mmc_power_up() 1636 else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120) == 0) mmc_power_up() 1637 dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n"); mmc_power_up() 1645 mmc_pwrseq_post_power_on(host); mmc_power_up() 1647 host->ios.clock = host->f_init; mmc_power_up() 1649 host->ios.power_mode = MMC_POWER_ON; mmc_power_up() 1650 mmc_set_ios(host); mmc_power_up() 1658 mmc_host_clk_release(host); mmc_power_up() 1661 void mmc_power_off(struct mmc_host *host) mmc_power_off() argument 1663 if (host->ios.power_mode == MMC_POWER_OFF) mmc_power_off() 1666 mmc_host_clk_hold(host); mmc_power_off() 1668 mmc_pwrseq_power_off(host); mmc_power_off() 1670 host->ios.clock = 0; mmc_power_off() 1671 host->ios.vdd = 0; mmc_power_off() 1673 host->ios.power_mode = MMC_POWER_OFF; mmc_power_off() 1675 mmc_set_initial_state(host); mmc_power_off() 1684 mmc_host_clk_release(host); mmc_power_off() 1687 void mmc_power_cycle(struct mmc_host *host, u32 ocr) mmc_power_cycle() argument 1689 mmc_power_off(host); mmc_power_cycle() 1692 mmc_power_up(host, ocr); mmc_power_cycle() 1698 static void __mmc_release_bus(struct mmc_host *host) __mmc_release_bus() argument 1700 BUG_ON(!host); __mmc_release_bus() 1701 BUG_ON(host->bus_refs); __mmc_release_bus() 1702 BUG_ON(!host->bus_dead); __mmc_release_bus() 1704 host->bus_ops = NULL; __mmc_release_bus() 1710 static inline void mmc_bus_get(struct mmc_host *host) mmc_bus_get() argument 1714 spin_lock_irqsave(&host->lock, flags); mmc_bus_get() 1715 host->bus_refs++; mmc_bus_get() 1716 spin_unlock_irqrestore(&host->lock, flags); mmc_bus_get() 1723 static inline void mmc_bus_put(struct mmc_host *host) mmc_bus_put() argument 1727 spin_lock_irqsave(&host->lock, flags); mmc_bus_put() 1728 host->bus_refs--; mmc_bus_put() 1729 if ((host->bus_refs == 0) && host->bus_ops) mmc_bus_put() 1730 __mmc_release_bus(host); mmc_bus_put() 1731 spin_unlock_irqrestore(&host->lock, flags); mmc_bus_put() 1735 * Assign a mmc bus handler to a host. Only one bus handler may control a 1736 * host at any given time. 1738 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops) mmc_attach_bus() argument 1742 BUG_ON(!host); mmc_attach_bus() 1745 WARN_ON(!host->claimed); mmc_attach_bus() 1747 spin_lock_irqsave(&host->lock, flags); mmc_attach_bus() 1749 BUG_ON(host->bus_ops); mmc_attach_bus() 1750 BUG_ON(host->bus_refs); mmc_attach_bus() 1752 host->bus_ops = ops; mmc_attach_bus() 1753 host->bus_refs = 1; mmc_attach_bus() 1754 host->bus_dead = 0; mmc_attach_bus() 1756 spin_unlock_irqrestore(&host->lock, flags); mmc_attach_bus() 1760 * Remove the current bus handler from a host. 1762 void mmc_detach_bus(struct mmc_host *host) mmc_detach_bus() argument 1766 BUG_ON(!host); mmc_detach_bus() 1768 WARN_ON(!host->claimed); mmc_detach_bus() 1769 WARN_ON(!host->bus_ops); mmc_detach_bus() 1771 spin_lock_irqsave(&host->lock, flags); mmc_detach_bus() 1773 host->bus_dead = 1; mmc_detach_bus() 1775 spin_unlock_irqrestore(&host->lock, flags); mmc_detach_bus() 1777 mmc_bus_put(host); mmc_detach_bus() 1780 static void _mmc_detect_change(struct mmc_host *host, unsigned long delay, _mmc_detect_change() argument 1785 spin_lock_irqsave(&host->lock, flags); _mmc_detect_change() 1786 WARN_ON(host->removed); _mmc_detect_change() 1787 spin_unlock_irqrestore(&host->lock, flags); _mmc_detect_change() 1794 if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) && _mmc_detect_change() 1795 device_can_wakeup(mmc_dev(host))) _mmc_detect_change() 1796 pm_wakeup_event(mmc_dev(host), 5000); _mmc_detect_change() 1798 host->detect_change = 1; _mmc_detect_change() 1799 mmc_schedule_delayed_work(&host->detect, delay); _mmc_detect_change() 1804 * @host: host which changed state. 1812 void mmc_detect_change(struct mmc_host *host, unsigned long delay) mmc_detect_change() argument 1814 _mmc_detect_change(host, delay, true); mmc_detect_change() 1900 (mmc_host_clk_rate(card->host) / 1000); mmc_mmc_erase_timeout() 1926 if (mmc_host_is_spi(card->host) && erase_timeout < 1000) mmc_mmc_erase_timeout() 2011 err = mmc_wait_for_cmd(card->host, &cmd, 0); mmc_do_erase() 2026 err = mmc_wait_for_cmd(card->host, &cmd, 0); mmc_do_erase() 2039 err = mmc_wait_for_cmd(card->host, &cmd, 0); mmc_do_erase() 2047 if (mmc_host_is_spi(card->host)) mmc_do_erase() 2057 err = mmc_wait_for_cmd(card->host, &cmd, 0); mmc_do_erase() 2070 mmc_hostname(card->host), __func__); mmc_do_erase() 2088 * Caller must claim host before calling this function. 2095 if (!(card->host->caps & MMC_CAP_ERASE) || mmc_erase() 2150 if ((card->host->caps & MMC_CAP_ERASE) && mmc_can_erase() 2210 struct mmc_host *host = card->host; mmc_do_calc_max_discard() local 2226 if (timeout > host->max_busy_timeout) mmc_do_calc_max_discard() 2255 struct mmc_host *host = card->host; mmc_calc_max_discard() local 2258 if (!host->max_busy_timeout) mmc_calc_max_discard() 2278 mmc_hostname(host), max_discard, host->max_busy_timeout); mmc_calc_max_discard() 2293 return mmc_wait_for_cmd(card->host, &cmd, 5); mmc_set_blocklen() 2307 return mmc_wait_for_cmd(card->host, &cmd, 5); mmc_set_blockcount() 2311 static void mmc_hw_reset_for_init(struct mmc_host *host) mmc_hw_reset_for_init() argument 2313 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset) mmc_hw_reset_for_init() 2315 mmc_host_clk_hold(host); mmc_hw_reset_for_init() 2316 host->ops->hw_reset(host); mmc_hw_reset_for_init() 2317 mmc_host_clk_release(host); mmc_hw_reset_for_init() 2320 int mmc_hw_reset(struct mmc_host *host) mmc_hw_reset() argument 2324 if (!host->card) mmc_hw_reset() 2327 mmc_bus_get(host); mmc_hw_reset() 2328 if (!host->bus_ops || host->bus_dead || !host->bus_ops->reset) { mmc_hw_reset() 2329 mmc_bus_put(host); mmc_hw_reset() 2333 ret = host->bus_ops->reset(host); mmc_hw_reset() 2334 mmc_bus_put(host); mmc_hw_reset() 2336 pr_warn("%s: tried to reset card\n", mmc_hostname(host)); mmc_hw_reset() 2342 static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq) mmc_rescan_try_freq() argument 2344 host->f_init = freq; mmc_rescan_try_freq() 2348 mmc_hostname(host), __func__, host->f_init); mmc_rescan_try_freq() 2350 mmc_power_up(host, host->ocr_avail); mmc_rescan_try_freq() 2356 mmc_hw_reset_for_init(host); mmc_rescan_try_freq() 2363 sdio_reset(host); mmc_rescan_try_freq() 2364 mmc_go_idle(host); mmc_rescan_try_freq() 2366 mmc_send_if_cond(host, host->ocr_avail); mmc_rescan_try_freq() 2369 if (!mmc_attach_sdio(host)) mmc_rescan_try_freq() 2371 if (!mmc_attach_sd(host)) mmc_rescan_try_freq() 2373 if (!mmc_attach_mmc(host)) mmc_rescan_try_freq() 2376 mmc_power_off(host); mmc_rescan_try_freq() 2380 int _mmc_detect_card_removed(struct mmc_host *host) _mmc_detect_card_removed() argument 2384 if (host->caps & MMC_CAP_NONREMOVABLE) _mmc_detect_card_removed() 2387 if (!host->card || mmc_card_removed(host->card)) _mmc_detect_card_removed() 2390 ret = host->bus_ops->alive(host); _mmc_detect_card_removed() 2399 if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) { _mmc_detect_card_removed() 2400 mmc_detect_change(host, msecs_to_jiffies(200)); _mmc_detect_card_removed() 2401 pr_debug("%s: card removed too slowly\n", mmc_hostname(host)); _mmc_detect_card_removed() 2405 mmc_card_set_removed(host->card); _mmc_detect_card_removed() 2406 pr_debug("%s: card remove detected\n", mmc_hostname(host)); _mmc_detect_card_removed() 2412 int mmc_detect_card_removed(struct mmc_host *host) mmc_detect_card_removed() argument 2414 struct mmc_card *card = host->card; mmc_detect_card_removed() 2417 WARN_ON(!host->claimed); mmc_detect_card_removed() 2425 * detect a change or host requires polling to provide card detection. mmc_detect_card_removed() 2427 if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL)) mmc_detect_card_removed() 2430 host->detect_change = 0; mmc_detect_card_removed() 2432 ret = _mmc_detect_card_removed(host); mmc_detect_card_removed() 2433 if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) { mmc_detect_card_removed() 2438 cancel_delayed_work(&host->detect); mmc_detect_card_removed() 2439 _mmc_detect_change(host, 0, false); mmc_detect_card_removed() 2449 struct mmc_host *host = mmc_rescan() local 2453 if (host->trigger_card_event && host->ops->card_event) { mmc_rescan() 2454 host->ops->card_event(host); mmc_rescan() 2455 host->trigger_card_event = false; mmc_rescan() 2458 if (host->rescan_disable) mmc_rescan() 2462 if ((host->caps & MMC_CAP_NONREMOVABLE) && host->rescan_entered) mmc_rescan() 2464 host->rescan_entered = 1; mmc_rescan() 2466 mmc_bus_get(host); mmc_rescan() 2472 if (host->bus_ops && !host->bus_dead mmc_rescan() 2473 && !(host->caps & MMC_CAP_NONREMOVABLE)) mmc_rescan() 2474 host->bus_ops->detect(host); mmc_rescan() 2476 host->detect_change = 0; mmc_rescan() 2482 mmc_bus_put(host); mmc_rescan() 2483 mmc_bus_get(host); mmc_rescan() 2486 if (host->bus_ops != NULL) { mmc_rescan() 2487 mmc_bus_put(host); mmc_rescan() 2495 mmc_bus_put(host); mmc_rescan() 2497 if (!(host->caps & MMC_CAP_NONREMOVABLE) && host->ops->get_cd && mmc_rescan() 2498 host->ops->get_cd(host) == 0) { mmc_rescan() 2499 mmc_claim_host(host); mmc_rescan() 2500 mmc_power_off(host); mmc_rescan() 2501 mmc_release_host(host); mmc_rescan() 2505 mmc_claim_host(host); mmc_rescan() 2507 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) mmc_rescan() 2509 if (freqs[i] <= host->f_min) mmc_rescan() 2512 mmc_release_host(host); mmc_rescan() 2515 if (host->caps & MMC_CAP_NEEDS_POLL) mmc_rescan() 2516 mmc_schedule_delayed_work(&host->detect, HZ); mmc_rescan() 2519 void mmc_start_host(struct mmc_host *host) mmc_start_host() argument 2521 host->f_init = max(freqs[0], host->f_min); mmc_start_host() 2522 host->rescan_disable = 0; mmc_start_host() 2523 host->ios.power_mode = MMC_POWER_UNDEFINED; mmc_start_host() 2524 if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP) mmc_start_host() 2525 mmc_power_off(host); mmc_start_host() 2527 mmc_power_up(host, host->ocr_avail); mmc_start_host() 2528 mmc_gpiod_request_cd_irq(host); mmc_start_host() 2529 _mmc_detect_change(host, 0, false); mmc_start_host() 2532 void mmc_stop_host(struct mmc_host *host) mmc_stop_host() argument 2536 spin_lock_irqsave(&host->lock, flags); mmc_stop_host() 2537 host->removed = 1; mmc_stop_host() 2538 spin_unlock_irqrestore(&host->lock, flags); mmc_stop_host() 2540 if (host->slot.cd_irq >= 0) mmc_stop_host() 2541 disable_irq(host->slot.cd_irq); mmc_stop_host() 2543 host->rescan_disable = 1; mmc_stop_host() 2544 cancel_delayed_work_sync(&host->detect); mmc_stop_host() 2548 host->pm_flags = 0; mmc_stop_host() 2550 mmc_bus_get(host); mmc_stop_host() 2551 if (host->bus_ops && !host->bus_dead) { mmc_stop_host() 2552 /* Calling bus_ops->remove() with a claimed host can deadlock */ mmc_stop_host() 2553 host->bus_ops->remove(host); mmc_stop_host() 2554 mmc_claim_host(host); mmc_stop_host() 2555 mmc_detach_bus(host); mmc_stop_host() 2556 mmc_power_off(host); mmc_stop_host() 2557 mmc_release_host(host); mmc_stop_host() 2558 mmc_bus_put(host); mmc_stop_host() 2561 mmc_bus_put(host); mmc_stop_host() 2563 BUG_ON(host->card); mmc_stop_host() 2565 mmc_power_off(host); mmc_stop_host() 2568 int mmc_power_save_host(struct mmc_host *host) mmc_power_save_host() argument 2573 pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__); mmc_power_save_host() 2576 mmc_bus_get(host); mmc_power_save_host() 2578 if (!host->bus_ops || host->bus_dead) { mmc_power_save_host() 2579 mmc_bus_put(host); mmc_power_save_host() 2583 if (host->bus_ops->power_save) mmc_power_save_host() 2584 ret = host->bus_ops->power_save(host); mmc_power_save_host() 2586 mmc_bus_put(host); mmc_power_save_host() 2588 mmc_power_off(host); mmc_power_save_host() 2594 int mmc_power_restore_host(struct mmc_host *host) mmc_power_restore_host() argument 2599 pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__); mmc_power_restore_host() 2602 mmc_bus_get(host); mmc_power_restore_host() 2604 if (!host->bus_ops || host->bus_dead) { mmc_power_restore_host() 2605 mmc_bus_put(host); mmc_power_restore_host() 2609 mmc_power_up(host, host->card->ocr); mmc_power_restore_host() 2610 ret = host->bus_ops->power_restore(host); mmc_power_restore_host() 2612 mmc_bus_put(host); mmc_power_restore_host() 2632 mmc_hostname(card->host), err); mmc_flush_cache() 2648 struct mmc_host *host = container_of( mmc_pm_notify() local 2657 spin_lock_irqsave(&host->lock, flags); mmc_pm_notify() 2658 host->rescan_disable = 1; mmc_pm_notify() 2659 spin_unlock_irqrestore(&host->lock, flags); mmc_pm_notify() 2660 cancel_delayed_work_sync(&host->detect); mmc_pm_notify() 2662 if (!host->bus_ops) mmc_pm_notify() 2666 if (host->bus_ops->pre_suspend) mmc_pm_notify() 2667 err = host->bus_ops->pre_suspend(host); mmc_pm_notify() 2671 /* Calling bus_ops->remove() with a claimed host can deadlock */ mmc_pm_notify() 2672 host->bus_ops->remove(host); mmc_pm_notify() 2673 mmc_claim_host(host); mmc_pm_notify() 2674 mmc_detach_bus(host); mmc_pm_notify() 2675 mmc_power_off(host); mmc_pm_notify() 2676 mmc_release_host(host); mmc_pm_notify() 2677 host->pm_flags = 0; mmc_pm_notify() 2684 spin_lock_irqsave(&host->lock, flags); mmc_pm_notify() 2685 host->rescan_disable = 0; mmc_pm_notify() 2686 spin_unlock_irqrestore(&host->lock, flags); mmc_pm_notify() 2687 _mmc_detect_change(host, 0, false); mmc_pm_notify() 2697 * @host: mmc host 2700 * request mechanism, used by mmc core, host driver and mmc requests 2703 void mmc_init_context_info(struct mmc_host *host) mmc_init_context_info() argument 2705 spin_lock_init(&host->context_info.lock); mmc_init_context_info() 2706 host->context_info.is_new_req = false; mmc_init_context_info() 2707 host->context_info.is_done_rcv = false; mmc_init_context_info() 2708 host->context_info.is_waiting_last_req = false; mmc_init_context_info() 2709 init_waitqueue_head(&host->context_info.wait); mmc_init_context_info()
|
H A D | mmc_ops.h | 16 int mmc_deselect_cards(struct mmc_host *host); 17 int mmc_set_dsr(struct mmc_host *host); 18 int mmc_go_idle(struct mmc_host *host); 19 int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr); 20 int mmc_all_send_cid(struct mmc_host *host, u32 *cid); 24 int mmc_send_cid(struct mmc_host *host, u32 *cid); 25 int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp); 26 int mmc_spi_set_crc(struct mmc_host *host, int use_crc);
|
H A D | sd.h | 8 int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr); 9 int mmc_sd_get_csd(struct mmc_host *host, struct mmc_card *card); 11 int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
|
H A D | sd.c | 19 #include <linux/mmc/host.h> 178 mmc_hostname(card->host), csd_struct); mmc_decode_csd() 202 mmc_hostname(card->host), scr_struct); mmc_decode_scr() 233 mmc_hostname(card->host)); mmc_read_ssr() 244 mmc_hostname(card->host)); mmc_read_ssr() 269 mmc_hostname(card->host)); mmc_read_ssr() 290 mmc_hostname(card->host)); mmc_read_switch() 300 mmc_hostname(card->host)); mmc_read_switch() 312 * If the host or the card can't do the switch, mmc_read_switch() 319 mmc_hostname(card->host)); mmc_read_switch() 354 if (!(card->host->caps & MMC_CAP_SD_HIGHSPEED)) mmc_sd_switch_hs() 365 "switch capabilities.\n", mmc_hostname(card->host)); mmc_sd_switch_hs() 375 mmc_hostname(card->host)); mmc_sd_switch_hs() 395 * If the host doesn't support any of the Driver Types A,C or D, sd_select_driver_type() 399 if (!(card->host->caps & (MMC_CAP_DRIVER_TYPE_A | MMC_CAP_DRIVER_TYPE_C sd_select_driver_type() 403 if (!card->host->ops->select_drive_strength) sd_select_driver_type() 406 if (card->host->caps & MMC_CAP_DRIVER_TYPE_A) sd_select_driver_type() 409 if (card->host->caps & MMC_CAP_DRIVER_TYPE_C) sd_select_driver_type() 412 if (card->host->caps & MMC_CAP_DRIVER_TYPE_D) sd_select_driver_type() 430 mmc_host_clk_hold(card->host); sd_select_driver_type() 431 drive_strength = card->host->ops->select_drive_strength( sd_select_driver_type() 434 mmc_host_clk_release(card->host); sd_select_driver_type() 442 mmc_hostname(card->host)); sd_select_driver_type() 446 mmc_set_driver_type(card->host, drive_strength); sd_select_driver_type() 454 * If the host doesn't support any of the UHS-I modes, fallback on sd_update_bus_speed_mode() 457 if (!mmc_host_uhs(card->host)) { sd_update_bus_speed_mode() 462 if ((card->host->caps & MMC_CAP_UHS_SDR104) && sd_update_bus_speed_mode() 465 } else if ((card->host->caps & MMC_CAP_UHS_DDR50) && sd_update_bus_speed_mode() 468 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | sd_update_bus_speed_mode() 472 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | sd_update_bus_speed_mode() 476 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | sd_update_bus_speed_mode() 520 mmc_hostname(card->host)); sd_set_bus_speed_mode() 522 mmc_set_timing(card->host, timing); sd_set_bus_speed_mode() 523 mmc_set_clock(card->host, card->sw_caps.uhs_max_dtr); sd_set_bus_speed_mode() 529 /* Get host's max current setting at its current voltage */ sd_get_host_max_current() 530 static u32 sd_get_host_max_current(struct mmc_host *host) sd_get_host_max_current() argument 534 voltage = 1 << host->ios.vdd; sd_get_host_max_current() 537 max_current = host->max_current_180; sd_get_host_max_current() 541 max_current = host->max_current_300; sd_get_host_max_current() 545 max_current = host->max_current_330; sd_get_host_max_current() 574 max_current = sd_get_host_max_current(card->host); sd_set_current_limit() 577 * We only check host's capability here, if we set a limit that is sd_set_current_limit() 582 * maximum 300ma from the host. sd_set_current_limit() 600 mmc_hostname(card->host)); sd_set_current_limit() 624 "switch capabilities.\n", mmc_hostname(card->host)); mmc_sd_init_uhs_card() 629 if ((card->host->caps & MMC_CAP_4_BIT_DATA) && mmc_sd_init_uhs_card() 635 mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4); mmc_sd_init_uhs_card() 639 * Select the bus speed mode depending on host mmc_sd_init_uhs_card() 663 if (!mmc_host_is_spi(card->host) && mmc_sd_init_uhs_card() 664 (card->host->ios.timing == MMC_TIMING_UHS_SDR50 || mmc_sd_init_uhs_card() 665 card->host->ios.timing == MMC_TIMING_UHS_DDR50 || mmc_sd_init_uhs_card() 666 card->host->ios.timing == MMC_TIMING_UHS_SDR104)) { mmc_sd_init_uhs_card() 676 if (err && card->host->ios.timing == MMC_TIMING_UHS_DDR50) { mmc_sd_init_uhs_card() 678 mmc_hostname(card->host)); mmc_sd_init_uhs_card() 729 int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr) mmc_sd_get_cid() argument 739 pr_warn("%s: Skipping voltage switch\n", mmc_hostname(host)); mmc_sd_get_cid() 748 mmc_go_idle(host); mmc_sd_get_cid() 756 err = mmc_send_if_cond(host, ocr); mmc_sd_get_cid() 761 * If the host supports one of UHS-I modes, request the card mmc_sd_get_cid() 765 if (retries && mmc_host_uhs(host)) mmc_sd_get_cid() 769 * If the host can supply more than 150mA at current voltage, mmc_sd_get_cid() 772 max_current = sd_get_host_max_current(host); mmc_sd_get_cid() 776 err = mmc_send_app_op_cond(host, ocr, rocr); mmc_sd_get_cid() 784 if (!mmc_host_is_spi(host) && rocr && mmc_sd_get_cid() 786 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, mmc_sd_get_cid() 797 if (mmc_host_is_spi(host)) mmc_sd_get_cid() 798 err = mmc_send_cid(host, cid); mmc_sd_get_cid() 800 err = mmc_all_send_cid(host, cid); mmc_sd_get_cid() 805 int mmc_sd_get_csd(struct mmc_host *host, struct mmc_card *card) mmc_sd_get_csd() argument 823 int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card, mmc_sd_setup_card() argument 864 if (mmc_host_is_spi(host)) { mmc_sd_setup_card() 865 err = mmc_spi_set_crc(host, use_spi_crc); mmc_sd_setup_card() 876 if (host->ops->get_ro) { mmc_sd_setup_card() 877 mmc_host_clk_hold(card->host); mmc_sd_setup_card() 878 ro = host->ops->get_ro(host); mmc_sd_setup_card() 879 mmc_host_clk_release(card->host); mmc_sd_setup_card() 883 pr_warn("%s: host does not support reading read-only switch, assuming write-enable\n", mmc_sd_setup_card() 884 mmc_hostname(host)); mmc_sd_setup_card() 913 static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, mmc_sd_init_card() argument 921 BUG_ON(!host); mmc_sd_init_card() 922 WARN_ON(!host->claimed); mmc_sd_init_card() 924 err = mmc_sd_get_cid(host, ocr, cid, &rocr); mmc_sd_init_card() 937 card = mmc_alloc_card(host, &sd_type); mmc_sd_init_card() 949 if (host->ops->init_card) mmc_sd_init_card() 950 host->ops->init_card(host, card); mmc_sd_init_card() 955 if (!mmc_host_is_spi(host)) { mmc_sd_init_card() 956 err = mmc_send_relative_addr(host, &card->rca); mmc_sd_init_card() 962 err = mmc_sd_get_csd(host, card); mmc_sd_init_card() 973 if (card->csd.dsr_imp && host->dsr_req) mmc_sd_init_card() 974 mmc_set_dsr(host); mmc_sd_init_card() 979 if (!mmc_host_is_spi(host)) { mmc_sd_init_card() 985 err = mmc_sd_setup_card(host, card, oldcard != NULL); mmc_sd_init_card() 1000 mmc_set_timing(card->host, MMC_TIMING_SD_HS); mmc_sd_init_card() 1007 mmc_set_clock(host, mmc_sd_get_max_clock(card)); mmc_sd_init_card() 1012 if ((host->caps & MMC_CAP_4_BIT_DATA) && mmc_sd_init_card() 1018 mmc_set_bus_width(host, MMC_BUS_WIDTH_4); mmc_sd_init_card() 1022 host->card = card; mmc_sd_init_card() 1035 static void mmc_sd_remove(struct mmc_host *host) mmc_sd_remove() argument 1037 BUG_ON(!host); mmc_sd_remove() 1038 BUG_ON(!host->card); mmc_sd_remove() 1040 mmc_remove_card(host->card); mmc_sd_remove() 1041 host->card = NULL; mmc_sd_remove() 1047 static int mmc_sd_alive(struct mmc_host *host) mmc_sd_alive() argument 1049 return mmc_send_status(host->card, NULL); mmc_sd_alive() 1053 * Card detection callback from host. 1055 static void mmc_sd_detect(struct mmc_host *host) mmc_sd_detect() argument 1059 BUG_ON(!host); mmc_sd_detect() 1060 BUG_ON(!host->card); mmc_sd_detect() 1062 mmc_get_card(host->card); mmc_sd_detect() 1067 err = _mmc_detect_card_removed(host); mmc_sd_detect() 1069 mmc_put_card(host->card); mmc_sd_detect() 1072 mmc_sd_remove(host); mmc_sd_detect() 1074 mmc_claim_host(host); mmc_sd_detect() 1075 mmc_detach_bus(host); mmc_sd_detect() 1076 mmc_power_off(host); mmc_sd_detect() 1077 mmc_release_host(host); mmc_sd_detect() 1081 static int _mmc_sd_suspend(struct mmc_host *host) _mmc_sd_suspend() argument 1085 BUG_ON(!host); _mmc_sd_suspend() 1086 BUG_ON(!host->card); _mmc_sd_suspend() 1088 mmc_claim_host(host); _mmc_sd_suspend() 1090 if (mmc_card_suspended(host->card)) _mmc_sd_suspend() 1093 if (!mmc_host_is_spi(host)) _mmc_sd_suspend() 1094 err = mmc_deselect_cards(host); _mmc_sd_suspend() 1097 mmc_power_off(host); _mmc_sd_suspend() 1098 mmc_card_set_suspended(host->card); _mmc_sd_suspend() 1102 mmc_release_host(host); _mmc_sd_suspend() 1109 static int mmc_sd_suspend(struct mmc_host *host) mmc_sd_suspend() argument 1113 err = _mmc_sd_suspend(host); mmc_sd_suspend() 1115 pm_runtime_disable(&host->card->dev); mmc_sd_suspend() 1116 pm_runtime_set_suspended(&host->card->dev); mmc_sd_suspend() 1126 static int _mmc_sd_resume(struct mmc_host *host) _mmc_sd_resume() argument 1130 BUG_ON(!host); _mmc_sd_resume() 1131 BUG_ON(!host->card); _mmc_sd_resume() 1133 mmc_claim_host(host); _mmc_sd_resume() 1135 if (!mmc_card_suspended(host->card)) _mmc_sd_resume() 1138 mmc_power_up(host, host->card->ocr); _mmc_sd_resume() 1139 err = mmc_sd_init_card(host, host->card->ocr, host->card); _mmc_sd_resume() 1140 mmc_card_clr_suspended(host->card); _mmc_sd_resume() 1143 mmc_release_host(host); _mmc_sd_resume() 1150 static int mmc_sd_resume(struct mmc_host *host) mmc_sd_resume() argument 1154 if (!(host->caps & MMC_CAP_RUNTIME_RESUME)) { mmc_sd_resume() 1155 err = _mmc_sd_resume(host); mmc_sd_resume() 1156 pm_runtime_set_active(&host->card->dev); mmc_sd_resume() 1157 pm_runtime_mark_last_busy(&host->card->dev); mmc_sd_resume() 1159 pm_runtime_enable(&host->card->dev); mmc_sd_resume() 1167 static int mmc_sd_runtime_suspend(struct mmc_host *host) mmc_sd_runtime_suspend() argument 1171 if (!(host->caps & MMC_CAP_AGGRESSIVE_PM)) mmc_sd_runtime_suspend() 1174 err = _mmc_sd_suspend(host); mmc_sd_runtime_suspend() 1177 mmc_hostname(host), err); mmc_sd_runtime_suspend() 1185 static int mmc_sd_runtime_resume(struct mmc_host *host) mmc_sd_runtime_resume() argument 1189 if (!(host->caps & (MMC_CAP_AGGRESSIVE_PM | MMC_CAP_RUNTIME_RESUME))) mmc_sd_runtime_resume() 1192 err = _mmc_sd_resume(host); mmc_sd_runtime_resume() 1195 mmc_hostname(host), err); mmc_sd_runtime_resume() 1200 static int mmc_sd_power_restore(struct mmc_host *host) mmc_sd_power_restore() argument 1204 mmc_claim_host(host); mmc_sd_power_restore() 1205 ret = mmc_sd_init_card(host, host->card->ocr, host->card); mmc_sd_power_restore() 1206 mmc_release_host(host); mmc_sd_power_restore() 1211 static int mmc_sd_reset(struct mmc_host *host) mmc_sd_reset() argument 1213 mmc_power_cycle(host, host->card->ocr); mmc_sd_reset() 1214 return mmc_sd_power_restore(host); mmc_sd_reset() 1233 int mmc_attach_sd(struct mmc_host *host) mmc_attach_sd() argument 1238 BUG_ON(!host); mmc_attach_sd() 1239 WARN_ON(!host->claimed); mmc_attach_sd() 1241 err = mmc_send_app_op_cond(host, 0, &ocr); mmc_attach_sd() 1245 mmc_attach_bus(host, &mmc_sd_ops); mmc_attach_sd() 1246 if (host->ocr_avail_sd) mmc_attach_sd() 1247 host->ocr_avail = host->ocr_avail_sd; mmc_attach_sd() 1252 if (mmc_host_is_spi(host)) { mmc_attach_sd() 1253 mmc_go_idle(host); mmc_attach_sd() 1255 err = mmc_spi_read_ocr(host, 0, &ocr); mmc_attach_sd() 1260 rocr = mmc_select_voltage(host, ocr); mmc_attach_sd() 1273 err = mmc_sd_init_card(host, rocr, NULL); mmc_attach_sd() 1277 mmc_release_host(host); mmc_attach_sd() 1278 err = mmc_add_card(host->card); mmc_attach_sd() 1279 mmc_claim_host(host); mmc_attach_sd() 1286 mmc_release_host(host); mmc_attach_sd() 1287 mmc_remove_card(host->card); mmc_attach_sd() 1288 host->card = NULL; mmc_attach_sd() 1289 mmc_claim_host(host); mmc_attach_sd() 1291 mmc_detach_bus(host); mmc_attach_sd() 1294 mmc_hostname(host), err); mmc_attach_sd()
|
H A D | sdio.c | 15 #include <linux/mmc/host.h> 95 * the host lock as we haven't registered the device yet. sdio_init_func() 119 mmc_hostname(card->host), cccr_vsn); sdio_read_cccr() 160 if (mmc_host_uhs(card->host)) { sdio_read_cccr() 208 if (!(card->host->caps & MMC_CAP_4_BIT_DATA)) sdio_enable_wide() 220 mmc_hostname(card->host), ctrl); sdio_enable_wide() 265 if (!(card->host->caps & MMC_CAP_4_BIT_DATA)) sdio_disable_wide() 285 mmc_set_bus_width(card->host, MMC_BUS_WIDTH_1); sdio_disable_wide() 297 else if ((card->host->caps & MMC_CAP_4_BIT_DATA) && sdio_enable_4bit_bus() 309 mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4); sdio_enable_4bit_bus() 325 if (!(card->host->caps & MMC_CAP_SD_HIGHSPEED)) mmc_sdio_switch_hs() 412 * If the host doesn't support any of the Driver Types A,C or D, sdio_select_driver_type() 416 if (!(card->host->caps & sdio_select_driver_type() 422 if (!card->host->ops->select_drive_strength) sdio_select_driver_type() 425 if (card->host->caps & MMC_CAP_DRIVER_TYPE_A) sdio_select_driver_type() 428 if (card->host->caps & MMC_CAP_DRIVER_TYPE_C) sdio_select_driver_type() 431 if (card->host->caps & MMC_CAP_DRIVER_TYPE_D) sdio_select_driver_type() 449 drive_strength = card->host->ops->select_drive_strength( sdio_select_driver_type() 467 mmc_set_driver_type(card->host, drive_strength); sdio_select_driver_type() 478 * If the host doesn't support any of the UHS-I modes, fallback on sdio_set_bus_speed_mode() 481 if (!mmc_host_uhs(card->host)) sdio_set_bus_speed_mode() 486 if ((card->host->caps & MMC_CAP_UHS_SDR104) && sdio_set_bus_speed_mode() 492 } else if ((card->host->caps & MMC_CAP_UHS_DDR50) && sdio_set_bus_speed_mode() 498 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | sdio_set_bus_speed_mode() 505 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | sdio_set_bus_speed_mode() 512 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | sdio_set_bus_speed_mode() 533 mmc_set_timing(card->host, timing); sdio_set_bus_speed_mode() 534 mmc_set_clock(card->host, card->sw_caps.uhs_max_dtr); sdio_set_bus_speed_mode() 553 if (card->host->caps & MMC_CAP_4_BIT_DATA) mmc_sdio_init_uhs_card() 568 if (!mmc_host_is_spi(card->host) && mmc_sdio_init_uhs_card() 569 ((card->host->ios.timing == MMC_TIMING_UHS_SDR50) || mmc_sdio_init_uhs_card() 570 (card->host->ios.timing == MMC_TIMING_UHS_SDR104))) mmc_sdio_init_uhs_card() 582 static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr, mmc_sdio_init_card() argument 591 BUG_ON(!host); mmc_sdio_init_card() 592 WARN_ON(!host->claimed); mmc_sdio_init_card() 595 if (mmc_host_uhs(host)) mmc_sdio_init_card() 600 pr_warn("%s: Skipping voltage switch\n", mmc_hostname(host)); mmc_sdio_init_card() 608 err = mmc_send_io_op_cond(host, ocr, &rocr); mmc_sdio_init_card() 616 if (mmc_host_is_spi(host)) { mmc_sdio_init_card() 617 err = mmc_spi_set_crc(host, use_spi_crc); mmc_sdio_init_card() 625 card = mmc_alloc_card(host, NULL); mmc_sdio_init_card() 632 mmc_sd_get_cid(host, ocr & rocr, card->raw_cid, NULL) == 0) { mmc_sdio_init_card() 652 if (host->ops->init_card) mmc_sdio_init_card() 653 host->ops->init_card(host, card); mmc_sdio_init_card() 656 * If the host and card support UHS-I mode request the card mmc_sdio_init_card() 663 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, mmc_sdio_init_card() 666 sdio_reset(host); mmc_sdio_init_card() 667 mmc_go_idle(host); mmc_sdio_init_card() 668 mmc_send_if_cond(host, host->ocr_avail); mmc_sdio_init_card() 683 if (!powered_resume && !mmc_host_is_spi(host)) { mmc_sdio_init_card() 684 err = mmc_send_relative_addr(host, &card->rca); mmc_sdio_init_card() 701 err = mmc_sd_get_csd(host, card); mmc_sdio_init_card() 711 if (!powered_resume && !mmc_host_is_spi(host)) { mmc_sdio_init_card() 721 * It's host's responsibility to fill cccr and cis mmc_sdio_init_card() 724 mmc_set_clock(host, card->cis.max_dtr); mmc_sdio_init_card() 727 mmc_set_timing(card->host, MMC_TIMING_SD_HS); mmc_sdio_init_card() 760 err = mmc_sd_setup_card(host, card, oldcard != NULL); mmc_sdio_init_card() 763 mmc_go_idle(host); mmc_sdio_init_card() 764 if (mmc_host_is_spi(host)) mmc_sdio_init_card() 766 mmc_spi_set_crc(host, use_spi_crc); mmc_sdio_init_card() 791 mmc_set_timing(card->host, MMC_TIMING_SD_HS); mmc_sdio_init_card() 798 mmc_set_clock(host, mmc_sdio_get_max_clock(card)); mmc_sdio_init_card() 809 host->card = card; mmc_sdio_init_card() 823 static void mmc_sdio_remove(struct mmc_host *host) mmc_sdio_remove() argument 827 BUG_ON(!host); mmc_sdio_remove() 828 BUG_ON(!host->card); mmc_sdio_remove() 830 for (i = 0;i < host->card->sdio_funcs;i++) { mmc_sdio_remove() 831 if (host->card->sdio_func[i]) { mmc_sdio_remove() 832 sdio_remove_func(host->card->sdio_func[i]); mmc_sdio_remove() 833 host->card->sdio_func[i] = NULL; mmc_sdio_remove() 837 mmc_remove_card(host->card); mmc_sdio_remove() 838 host->card = NULL; mmc_sdio_remove() 844 static int mmc_sdio_alive(struct mmc_host *host) mmc_sdio_alive() argument 846 return mmc_select_card(host->card); mmc_sdio_alive() 850 * Card detection callback from host. 852 static void mmc_sdio_detect(struct mmc_host *host) mmc_sdio_detect() argument 856 BUG_ON(!host); mmc_sdio_detect() 857 BUG_ON(!host->card); mmc_sdio_detect() 860 if (host->caps & MMC_CAP_POWER_OFF_CARD) { mmc_sdio_detect() 861 err = pm_runtime_get_sync(&host->card->dev); mmc_sdio_detect() 863 pm_runtime_put_noidle(&host->card->dev); mmc_sdio_detect() 868 mmc_claim_host(host); mmc_sdio_detect() 873 err = _mmc_detect_card_removed(host); mmc_sdio_detect() 875 mmc_release_host(host); mmc_sdio_detect() 888 if (host->caps & MMC_CAP_POWER_OFF_CARD) mmc_sdio_detect() 889 pm_runtime_put_sync(&host->card->dev); mmc_sdio_detect() 893 mmc_sdio_remove(host); mmc_sdio_detect() 895 mmc_claim_host(host); mmc_sdio_detect() 896 mmc_detach_bus(host); mmc_sdio_detect() 897 mmc_power_off(host); mmc_sdio_detect() 898 mmc_release_host(host); mmc_sdio_detect() 907 static int mmc_sdio_pre_suspend(struct mmc_host *host) mmc_sdio_pre_suspend() argument 911 for (i = 0; i < host->card->sdio_funcs; i++) { mmc_sdio_pre_suspend() 912 struct sdio_func *func = host->card->sdio_func[i]; mmc_sdio_pre_suspend() 929 static int mmc_sdio_suspend(struct mmc_host *host) mmc_sdio_suspend() argument 931 if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) { mmc_sdio_suspend() 932 mmc_claim_host(host); mmc_sdio_suspend() 933 sdio_disable_wide(host->card); mmc_sdio_suspend() 934 mmc_release_host(host); mmc_sdio_suspend() 937 if (!mmc_card_keep_power(host)) mmc_sdio_suspend() 938 mmc_power_off(host); mmc_sdio_suspend() 943 static int mmc_sdio_resume(struct mmc_host *host) mmc_sdio_resume() argument 947 BUG_ON(!host); mmc_sdio_resume() 948 BUG_ON(!host->card); mmc_sdio_resume() 951 mmc_claim_host(host); mmc_sdio_resume() 954 if (!mmc_card_keep_power(host)) { mmc_sdio_resume() 955 mmc_power_up(host, host->card->ocr); mmc_sdio_resume() 962 if (host->caps & MMC_CAP_POWER_OFF_CARD) { mmc_sdio_resume() 963 pm_runtime_disable(&host->card->dev); mmc_sdio_resume() 964 pm_runtime_set_active(&host->card->dev); mmc_sdio_resume() 965 pm_runtime_enable(&host->card->dev); mmc_sdio_resume() 970 if (mmc_card_is_removable(host) || !mmc_card_keep_power(host)) { mmc_sdio_resume() 971 sdio_reset(host); mmc_sdio_resume() 972 mmc_go_idle(host); mmc_sdio_resume() 973 mmc_send_if_cond(host, host->card->ocr); mmc_sdio_resume() 974 err = mmc_send_io_op_cond(host, 0, NULL); mmc_sdio_resume() 976 err = mmc_sdio_init_card(host, host->card->ocr, mmc_sdio_resume() 977 host->card, mmc_sdio_resume() 978 mmc_card_keep_power(host)); mmc_sdio_resume() 979 } else if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) { mmc_sdio_resume() 981 err = sdio_enable_4bit_bus(host->card); mmc_sdio_resume() 984 if (!err && host->sdio_irqs) { mmc_sdio_resume() 985 if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) { mmc_sdio_resume() 986 wake_up_process(host->sdio_irq_thread); mmc_sdio_resume() 987 } else if (host->caps & MMC_CAP_SDIO_IRQ) { mmc_sdio_resume() 988 mmc_host_clk_hold(host); mmc_sdio_resume() 989 host->ops->enable_sdio_irq(host, 1); mmc_sdio_resume() 990 mmc_host_clk_release(host); mmc_sdio_resume() 994 mmc_release_host(host); mmc_sdio_resume() 996 host->pm_flags &= ~MMC_PM_KEEP_POWER; mmc_sdio_resume() 1000 static int mmc_sdio_power_restore(struct mmc_host *host) mmc_sdio_power_restore() argument 1004 BUG_ON(!host); mmc_sdio_power_restore() 1005 BUG_ON(!host->card); mmc_sdio_power_restore() 1007 mmc_claim_host(host); mmc_sdio_power_restore() 1026 sdio_reset(host); mmc_sdio_power_restore() 1027 mmc_go_idle(host); mmc_sdio_power_restore() 1028 mmc_send_if_cond(host, host->card->ocr); mmc_sdio_power_restore() 1030 ret = mmc_send_io_op_cond(host, 0, NULL); mmc_sdio_power_restore() 1034 ret = mmc_sdio_init_card(host, host->card->ocr, host->card, mmc_sdio_power_restore() 1035 mmc_card_keep_power(host)); mmc_sdio_power_restore() 1036 if (!ret && host->sdio_irqs) mmc_sdio_power_restore() 1037 mmc_signal_sdio_irq(host); mmc_sdio_power_restore() 1040 mmc_release_host(host); mmc_sdio_power_restore() 1045 static int mmc_sdio_runtime_suspend(struct mmc_host *host) mmc_sdio_runtime_suspend() argument 1048 mmc_power_off(host); mmc_sdio_runtime_suspend() 1052 static int mmc_sdio_runtime_resume(struct mmc_host *host) mmc_sdio_runtime_resume() argument 1055 mmc_power_up(host, host->card->ocr); mmc_sdio_runtime_resume() 1056 return mmc_sdio_power_restore(host); mmc_sdio_runtime_resume() 1075 int mmc_attach_sdio(struct mmc_host *host) mmc_attach_sdio() argument 1081 BUG_ON(!host); mmc_attach_sdio() 1082 WARN_ON(!host->claimed); mmc_attach_sdio() 1084 err = mmc_send_io_op_cond(host, 0, &ocr); mmc_attach_sdio() 1088 mmc_attach_bus(host, &mmc_sdio_ops); mmc_attach_sdio() 1089 if (host->ocr_avail_sdio) mmc_attach_sdio() 1090 host->ocr_avail = host->ocr_avail_sdio; mmc_attach_sdio() 1093 rocr = mmc_select_voltage(host, ocr); mmc_attach_sdio() 1106 err = mmc_sdio_init_card(host, rocr, NULL, 0); mmc_attach_sdio() 1110 card = host->card; mmc_attach_sdio() 1113 * Enable runtime PM only if supported by host+card+board mmc_attach_sdio() 1115 if (host->caps & MMC_CAP_POWER_OFF_CARD) { mmc_attach_sdio() 1140 err = sdio_init_func(host->card, i + 1); mmc_attach_sdio() 1147 if (host->caps & MMC_CAP_POWER_OFF_CARD) mmc_attach_sdio() 1154 mmc_release_host(host); mmc_attach_sdio() 1155 err = mmc_add_card(host->card); mmc_attach_sdio() 1163 err = sdio_add_func(host->card->sdio_func[i]); mmc_attach_sdio() 1168 mmc_claim_host(host); mmc_attach_sdio() 1174 mmc_sdio_remove(host); mmc_attach_sdio() 1175 mmc_claim_host(host); mmc_attach_sdio() 1178 mmc_release_host(host); mmc_attach_sdio() 1179 if (host->card) mmc_attach_sdio() 1180 mmc_sdio_remove(host); mmc_attach_sdio() 1181 mmc_claim_host(host); mmc_attach_sdio() 1183 mmc_detach_bus(host); mmc_attach_sdio() 1186 mmc_hostname(host), err); mmc_attach_sdio()
|
H A D | slot-gpio.c | 16 #include <linux/mmc/host.h> 36 struct mmc_host *host = dev_id; mmc_gpio_cd_irqt() local 38 host->trigger_card_event = true; mmc_gpio_cd_irqt() 39 mmc_detect_change(host, msecs_to_jiffies(200)); mmc_gpio_cd_irqt() 44 int mmc_gpio_alloc(struct mmc_host *host) mmc_gpio_alloc() argument 46 size_t len = strlen(dev_name(host->parent)) + 4; mmc_gpio_alloc() 47 struct mmc_gpio *ctx = devm_kzalloc(host->parent, mmc_gpio_alloc() 52 snprintf(ctx->cd_label, len, "%s cd", dev_name(host->parent)); mmc_gpio_alloc() 53 snprintf(ctx->ro_label, len, "%s ro", dev_name(host->parent)); mmc_gpio_alloc() 54 host->slot.handler_priv = ctx; mmc_gpio_alloc() 55 host->slot.cd_irq = -EINVAL; mmc_gpio_alloc() 61 int mmc_gpio_get_ro(struct mmc_host *host) mmc_gpio_get_ro() argument 63 struct mmc_gpio *ctx = host->slot.handler_priv; mmc_gpio_get_ro() 70 !!(host->caps2 & MMC_CAP2_RO_ACTIVE_HIGH); mmc_gpio_get_ro() 76 int mmc_gpio_get_cd(struct mmc_host *host) mmc_gpio_get_cd() argument 78 struct mmc_gpio *ctx = host->slot.handler_priv; mmc_gpio_get_cd() 85 !!(host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH); mmc_gpio_get_cd() 93 * @host: mmc host 101 int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio) mmc_gpio_request_ro() argument 103 struct mmc_gpio *ctx = host->slot.handler_priv; mmc_gpio_request_ro() 109 ret = devm_gpio_request_one(host->parent, gpio, GPIOF_DIR_IN, mmc_gpio_request_ro() 121 void mmc_gpiod_request_cd_irq(struct mmc_host *host) mmc_gpiod_request_cd_irq() argument 123 struct mmc_gpio *ctx = host->slot.handler_priv; mmc_gpiod_request_cd_irq() 126 if (host->slot.cd_irq >= 0 || !ctx || !ctx->cd_gpio) mmc_gpiod_request_cd_irq() 136 if (irq >= 0 && host->caps & MMC_CAP_NEEDS_POLL) mmc_gpiod_request_cd_irq() 142 ret = devm_request_threaded_irq(host->parent, irq, mmc_gpiod_request_cd_irq() 145 ctx->cd_label, host); mmc_gpiod_request_cd_irq() 150 host->slot.cd_irq = irq; mmc_gpiod_request_cd_irq() 153 host->caps |= MMC_CAP_NEEDS_POLL; mmc_gpiod_request_cd_irq() 160 void mmc_gpio_set_cd_isr(struct mmc_host *host, mmc_gpio_set_cd_isr() argument 163 struct mmc_gpio *ctx = host->slot.handler_priv; mmc_gpio_set_cd_isr() 172 * @host: mmc host 185 int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio, mmc_gpio_request_cd() argument 188 struct mmc_gpio *ctx = host->slot.handler_priv; mmc_gpio_request_cd() 191 ret = devm_gpio_request_one(host->parent, gpio, GPIOF_DIR_IN, mmc_gpio_request_cd() 216 * @host: mmc host 230 int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id, mmc_gpiod_request_cd() argument 234 struct mmc_gpio *ctx = host->slot.handler_priv; mmc_gpiod_request_cd() 241 desc = devm_gpiod_get_index(host->parent, con_id, idx, GPIOD_IN); mmc_gpiod_request_cd() 263 * @host: mmc host 276 int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id, mmc_gpiod_request_ro() argument 280 struct mmc_gpio *ctx = host->slot.handler_priv; mmc_gpiod_request_ro() 287 desc = devm_gpiod_get_index(host->parent, con_id, idx, GPIOD_IN); mmc_gpiod_request_ro()
|
H A D | mmc.c | 19 #include <linux/mmc/host.h> 109 mmc_hostname(card->host), card->csd.mmca_vsn); mmc_decode_cid() 143 mmc_hostname(card->host), csd->structure); mmc_decode_csd() 183 struct mmc_host *host = card->host; mmc_select_card_type() local 185 u32 caps = host->caps, caps2 = host->caps2; mmc_select_card_type() 284 mmc_hostname(card->host)); mmc_manage_enhanced_area() 314 mmc_hostname(card->host)); mmc_manage_gp_partitions() 351 "version %d\n", mmc_hostname(card->host), mmc_decode_ext_csd() 358 np = mmc_of_find_child_device(card->host, 0); mmc_decode_ext_csd() 422 if (ext_csd[EXT_CSD_BOOT_MULT] && mmc_boot_partition_access(card->host)) { mmc_decode_ext_csd() 508 mmc_hostname(card->host)); mmc_decode_ext_csd() 533 if (ext_csd[EXT_CSD_RPMB_MULT] && mmc_host_cmd23(card->host)) { mmc_decode_ext_csd() 606 /* If the host or the card can't do the switch, mmc_read_ext_csd() 619 mmc_hostname(card->host)); mmc_read_ext_csd() 622 mmc_hostname(card->host)); mmc_read_ext_csd() 778 struct mmc_host *host = card->host; __mmc_select_powerclass() local 783 switch (1 << host->ios.vdd) { __mmc_select_powerclass() 785 if (host->ios.clock <= MMC_HIGH_26_MAX_DTR) __mmc_select_powerclass() 787 else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR) __mmc_select_powerclass() 791 else if (host->ios.clock <= MMC_HS200_MAX_DTR) __mmc_select_powerclass() 803 if (host->ios.clock <= MMC_HIGH_26_MAX_DTR) __mmc_select_powerclass() 805 else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR) __mmc_select_powerclass() 809 else if (host->ios.clock <= MMC_HS200_MAX_DTR) __mmc_select_powerclass() 816 mmc_hostname(host)); __mmc_select_powerclass() 840 struct mmc_host *host = card->host; mmc_select_powerclass() local 848 bus_width = host->ios.bus_width; mmc_select_powerclass() 864 mmc_hostname(host), 1 << bus_width, ddr); mmc_select_powerclass() 884 mmc_set_clock(card->host, max_dtr); mmc_set_bus_speed() 902 struct mmc_host *host = card->host; mmc_select_bus_width() local 907 !(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) mmc_select_bus_width() 910 idx = (host->caps & MMC_CAP_8_BIT_DATA) ? 0 : 1; mmc_select_bus_width() 924 * bus width on the host. mmc_select_bus_width() 934 mmc_set_bus_width(host, bus_width); mmc_select_bus_width() 941 if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST)) mmc_select_bus_width() 951 mmc_hostname(host), ext_csd_bits[idx]); mmc_select_bus_width() 970 mmc_set_timing(card->host, MMC_TIMING_MMC_HS); mmc_select_hs() 980 struct mmc_host *host = card->host; mmc_select_hs_ddr() local 987 bus_width = host->ios.bus_width; mmc_select_hs_ddr() 1000 mmc_hostname(host), 1 << bus_width); mmc_select_hs_ddr() 1014 * host controller can support this, like some of the SDHCI mmc_select_hs_ddr() 1016 * host controller still needs to use 1.8v vccq for supporting mmc_select_hs_ddr() 1020 * if (host and device can both support 1.2v IO) mmc_select_hs_ddr() 1022 * else if (host and device can both support 1.8v IO) mmc_select_hs_ddr() 1024 * so if host and device can only support 3.3v IO, this is the mmc_select_hs_ddr() 1031 err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120); mmc_select_hs_ddr() 1034 err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180); mmc_select_hs_ddr() 1038 err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330); mmc_select_hs_ddr() 1041 mmc_set_timing(host, MMC_TIMING_MMC_DDR52); mmc_select_hs_ddr() 1048 struct mmc_host *host = card->host; mmc_select_hs400() local 1055 host->ios.bus_width == MMC_BUS_WIDTH_8)) mmc_select_hs400() 1062 mmc_set_timing(card->host, MMC_TIMING_MMC_HS); mmc_select_hs400() 1071 mmc_hostname(host), err); mmc_select_hs400() 1081 mmc_hostname(host), err); mmc_select_hs400() 1091 mmc_hostname(host), err); mmc_select_hs400() 1095 mmc_set_timing(host, MMC_TIMING_MMC_HS400); mmc_select_hs400() 1110 struct mmc_host *host = card->host; mmc_select_hs200() local 1114 err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120); mmc_select_hs200() 1117 err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180); mmc_select_hs200() 1124 * Set the bus width(4 or 8) with host's support and mmc_select_hs200() 1134 mmc_set_timing(host, MMC_TIMING_MMC_HS200); mmc_select_hs200() 1162 mmc_hostname(card->host)); mmc_select_timing() 1181 struct mmc_host *host = card->host; mmc_hs200_tuning() local 1188 host->ios.bus_width == MMC_BUS_WIDTH_8) mmc_hs200_tuning() 1189 if (host->ops->prepare_hs400_tuning) mmc_hs200_tuning() 1190 host->ops->prepare_hs400_tuning(host, &host->ios); mmc_hs200_tuning() 1201 static int mmc_init_card(struct mmc_host *host, u32 ocr, mmc_init_card() argument 1209 BUG_ON(!host); mmc_init_card() 1210 WARN_ON(!host->claimed); mmc_init_card() 1213 if (!mmc_host_is_spi(host)) mmc_init_card() 1214 mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN); mmc_init_card() 1223 mmc_go_idle(host); mmc_init_card() 1226 err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr); mmc_init_card() 1233 if (mmc_host_is_spi(host)) { mmc_init_card() 1234 err = mmc_spi_set_crc(host, use_spi_crc); mmc_init_card() 1242 if (mmc_host_is_spi(host)) mmc_init_card() 1243 err = mmc_send_cid(host, cid); mmc_init_card() 1245 err = mmc_all_send_cid(host, cid); mmc_init_card() 1260 card = mmc_alloc_card(host, &mmc_type); mmc_init_card() 1275 if (host->ops->init_card) mmc_init_card() 1276 host->ops->init_card(host, card); mmc_init_card() 1281 if (!mmc_host_is_spi(host)) { mmc_init_card() 1286 mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL); mmc_init_card() 1309 if (card->csd.dsr_imp && host->dsr_req) mmc_init_card() 1310 mmc_set_dsr(host); mmc_init_card() 1315 if (!mmc_host_is_spi(host)) { mmc_init_card() 1340 * If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF mmc_init_card() 1344 (card->ext_csd.rev >= 3 && (host->caps2 & MMC_CAP2_HC_ERASE_SZ))) { mmc_init_card() 1444 mmc_hostname(card->host)); mmc_init_card() 1466 mmc_hostname(card->host), err); mmc_init_card() 1480 host->caps2 & MMC_CAP2_PACKED_CMD) { mmc_init_card() 1489 mmc_hostname(card->host)); mmc_init_card() 1498 host->card = card; mmc_init_card() 1514 static int mmc_sleep(struct mmc_host *host) mmc_sleep() argument 1517 struct mmc_card *card = host->card; mmc_sleep() 1521 err = mmc_deselect_cards(host); mmc_sleep() 1530 * If the max_busy_timeout of the host is specified, validate it against mmc_sleep() 1531 * the sleep cmd timeout. A failure means we need to prevent the host mmc_sleep() 1535 if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout)) { mmc_sleep() 1542 err = mmc_wait_for_cmd(host, &cmd, 0); mmc_sleep() 1547 * If the host does not wait while the card signals busy, then we will mmc_sleep() 1552 if (!cmd.busy_timeout || !(host->caps & MMC_CAP_WAIT_WHILE_BUSY)) mmc_sleep() 1579 mmc_hostname(card->host), timeout); mmc_poweroff_notify() 1590 static void mmc_remove(struct mmc_host *host) mmc_remove() argument 1592 BUG_ON(!host); mmc_remove() 1593 BUG_ON(!host->card); mmc_remove() 1595 mmc_remove_card(host->card); mmc_remove() 1596 host->card = NULL; mmc_remove() 1602 static int mmc_alive(struct mmc_host *host) mmc_alive() argument 1604 return mmc_send_status(host->card, NULL); mmc_alive() 1608 * Card detection callback from host. 1610 static void mmc_detect(struct mmc_host *host) mmc_detect() argument 1614 BUG_ON(!host); mmc_detect() 1615 BUG_ON(!host->card); mmc_detect() 1617 mmc_get_card(host->card); mmc_detect() 1622 err = _mmc_detect_card_removed(host); mmc_detect() 1624 mmc_put_card(host->card); mmc_detect() 1627 mmc_remove(host); mmc_detect() 1629 mmc_claim_host(host); mmc_detect() 1630 mmc_detach_bus(host); mmc_detect() 1631 mmc_power_off(host); mmc_detect() 1632 mmc_release_host(host); mmc_detect() 1636 static int _mmc_suspend(struct mmc_host *host, bool is_suspend) _mmc_suspend() argument 1642 BUG_ON(!host); _mmc_suspend() 1643 BUG_ON(!host->card); _mmc_suspend() 1645 mmc_claim_host(host); _mmc_suspend() 1647 if (mmc_card_suspended(host->card)) _mmc_suspend() 1650 if (mmc_card_doing_bkops(host->card)) { _mmc_suspend() 1651 err = mmc_stop_bkops(host->card); _mmc_suspend() 1656 err = mmc_flush_cache(host->card); _mmc_suspend() 1660 if (mmc_can_poweroff_notify(host->card) && _mmc_suspend() 1661 ((host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) || !is_suspend)) _mmc_suspend() 1662 err = mmc_poweroff_notify(host->card, notify_type); _mmc_suspend() 1663 else if (mmc_can_sleep(host->card)) _mmc_suspend() 1664 err = mmc_sleep(host); _mmc_suspend() 1665 else if (!mmc_host_is_spi(host)) _mmc_suspend() 1666 err = mmc_deselect_cards(host); _mmc_suspend() 1669 mmc_power_off(host); _mmc_suspend() 1670 mmc_card_set_suspended(host->card); _mmc_suspend() 1673 mmc_release_host(host); _mmc_suspend() 1680 static int mmc_suspend(struct mmc_host *host) mmc_suspend() argument 1684 err = _mmc_suspend(host, true); mmc_suspend() 1686 pm_runtime_disable(&host->card->dev); mmc_suspend() 1687 pm_runtime_set_suspended(&host->card->dev); mmc_suspend() 1697 static int _mmc_resume(struct mmc_host *host) _mmc_resume() argument 1701 BUG_ON(!host); _mmc_resume() 1702 BUG_ON(!host->card); _mmc_resume() 1704 mmc_claim_host(host); _mmc_resume() 1706 if (!mmc_card_suspended(host->card)) _mmc_resume() 1709 mmc_power_up(host, host->card->ocr); _mmc_resume() 1710 err = mmc_init_card(host, host->card->ocr, host->card); _mmc_resume() 1711 mmc_card_clr_suspended(host->card); _mmc_resume() 1714 mmc_release_host(host); _mmc_resume() 1721 static int mmc_shutdown(struct mmc_host *host) mmc_shutdown() argument 1729 if (mmc_can_poweroff_notify(host->card) && mmc_shutdown() 1730 !(host->caps2 & MMC_CAP2_FULL_PWR_CYCLE)) mmc_shutdown() 1731 err = _mmc_resume(host); mmc_shutdown() 1734 err = _mmc_suspend(host, false); mmc_shutdown() 1742 static int mmc_resume(struct mmc_host *host) mmc_resume() argument 1746 if (!(host->caps & MMC_CAP_RUNTIME_RESUME)) { mmc_resume() 1747 err = _mmc_resume(host); mmc_resume() 1748 pm_runtime_set_active(&host->card->dev); mmc_resume() 1749 pm_runtime_mark_last_busy(&host->card->dev); mmc_resume() 1751 pm_runtime_enable(&host->card->dev); mmc_resume() 1759 static int mmc_runtime_suspend(struct mmc_host *host) mmc_runtime_suspend() argument 1763 if (!(host->caps & MMC_CAP_AGGRESSIVE_PM)) mmc_runtime_suspend() 1766 err = _mmc_suspend(host, true); mmc_runtime_suspend() 1769 mmc_hostname(host), err); mmc_runtime_suspend() 1777 static int mmc_runtime_resume(struct mmc_host *host) mmc_runtime_resume() argument 1781 if (!(host->caps & (MMC_CAP_AGGRESSIVE_PM | MMC_CAP_RUNTIME_RESUME))) mmc_runtime_resume() 1784 err = _mmc_resume(host); mmc_runtime_resume() 1787 mmc_hostname(host), err); mmc_runtime_resume() 1792 static int mmc_power_restore(struct mmc_host *host) mmc_power_restore() argument 1796 mmc_claim_host(host); mmc_power_restore() 1797 ret = mmc_init_card(host, host->card->ocr, host->card); mmc_power_restore() 1798 mmc_release_host(host); mmc_power_restore() 1814 static int mmc_reset(struct mmc_host *host) mmc_reset() argument 1816 struct mmc_card *card = host->card; mmc_reset() 1819 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset) mmc_reset() 1825 mmc_host_clk_hold(host); mmc_reset() 1826 mmc_set_clock(host, host->f_init); mmc_reset() 1828 host->ops->hw_reset(host); mmc_reset() 1832 mmc_host_clk_release(host); mmc_reset() 1837 mmc_set_initial_state(host); mmc_reset() 1838 mmc_host_clk_release(host); mmc_reset() 1840 return mmc_power_restore(host); mmc_reset() 1859 int mmc_attach_mmc(struct mmc_host *host) mmc_attach_mmc() argument 1864 BUG_ON(!host); mmc_attach_mmc() 1865 WARN_ON(!host->claimed); mmc_attach_mmc() 1868 if (!mmc_host_is_spi(host)) mmc_attach_mmc() 1869 mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN); mmc_attach_mmc() 1871 err = mmc_send_op_cond(host, 0, &ocr); mmc_attach_mmc() 1875 mmc_attach_bus(host, &mmc_ops); mmc_attach_mmc() 1876 if (host->ocr_avail_mmc) mmc_attach_mmc() 1877 host->ocr_avail = host->ocr_avail_mmc; mmc_attach_mmc() 1882 if (mmc_host_is_spi(host)) { mmc_attach_mmc() 1883 err = mmc_spi_read_ocr(host, 1, &ocr); mmc_attach_mmc() 1888 rocr = mmc_select_voltage(host, ocr); mmc_attach_mmc() 1901 err = mmc_init_card(host, rocr, NULL); mmc_attach_mmc() 1905 mmc_release_host(host); mmc_attach_mmc() 1906 err = mmc_add_card(host->card); mmc_attach_mmc() 1907 mmc_claim_host(host); mmc_attach_mmc() 1914 mmc_release_host(host); mmc_attach_mmc() 1915 mmc_remove_card(host->card); mmc_attach_mmc() 1916 mmc_claim_host(host); mmc_attach_mmc() 1917 host->card = NULL; mmc_attach_mmc() 1919 mmc_detach_bus(host); mmc_attach_mmc() 1922 mmc_hostname(host), err); mmc_attach_mmc()
|
H A D | bus.c | 23 #include <linux/mmc/host.h> 132 struct mmc_host *host = card->host; mmc_bus_shutdown() local 138 if (host->bus_ops->shutdown) { mmc_bus_shutdown() 139 ret = host->bus_ops->shutdown(host); mmc_bus_shutdown() 142 mmc_hostname(host), ret); mmc_bus_shutdown() 150 struct mmc_host *host = card->host; mmc_bus_suspend() local 157 ret = host->bus_ops->suspend(host); mmc_bus_suspend() 164 struct mmc_host *host = card->host; mmc_bus_resume() local 167 ret = host->bus_ops->resume(host); mmc_bus_resume() 170 mmc_hostname(host), ret); mmc_bus_resume() 181 struct mmc_host *host = card->host; mmc_runtime_suspend() local 183 return host->bus_ops->runtime_suspend(host); mmc_runtime_suspend() 189 struct mmc_host *host = card->host; mmc_runtime_resume() local 191 return host->bus_ops->runtime_resume(host); mmc_runtime_resume() 259 struct mmc_card *mmc_alloc_card(struct mmc_host *host, struct device_type *type) mmc_alloc_card() argument 267 card->host = host; mmc_alloc_card() 271 card->dev.parent = mmc_classdev(host); mmc_alloc_card() 296 dev_set_name(&card->dev, "%s:%04x", mmc_hostname(card->host), card->rca); mmc_add_card() 328 if (mmc_host_is_spi(card->host)) { mmc_add_card() 330 mmc_hostname(card->host), mmc_add_card() 336 mmc_hostname(card->host), mmc_add_card() 348 mmc_init_context_info(card->host); mmc_add_card() 350 card->dev.of_node = mmc_of_find_child_device(card->host, 0); mmc_add_card() 372 if (mmc_host_is_spi(card->host)) { mmc_remove_card() 374 mmc_hostname(card->host)); mmc_remove_card() 377 mmc_hostname(card->host), card->rca); mmc_remove_card()
|
H A D | sd_ops.c | 17 #include <linux/mmc/host.h> 25 int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card) mmc_app_cmd() argument 30 BUG_ON(!host); mmc_app_cmd() 31 BUG_ON(card && (card->host != host)); mmc_app_cmd() 43 err = mmc_wait_for_cmd(host, &cmd, 0); mmc_app_cmd() 48 if (!mmc_host_is_spi(host) && !(cmd.resp[0] & R1_APP_CMD)) mmc_app_cmd() 58 * @host: MMC host to start command 68 int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card, mmc_wait_for_app_cmd() argument 85 err = mmc_app_cmd(host, card); mmc_wait_for_app_cmd() 88 if (mmc_host_is_spi(host)) { mmc_wait_for_app_cmd() 103 mmc_wait_for_req(host, &mrq); mmc_wait_for_app_cmd() 110 if (mmc_host_is_spi(host)) { mmc_wait_for_app_cmd() 127 BUG_ON(!card->host); mmc_app_set_bus_width() 143 err = mmc_wait_for_app_cmd(card->host, card, &cmd, MMC_CMD_RETRIES); mmc_app_set_bus_width() 150 int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) mmc_send_app_op_cond() argument 155 BUG_ON(!host); mmc_send_app_op_cond() 158 if (mmc_host_is_spi(host)) mmc_send_app_op_cond() 165 err = mmc_wait_for_app_cmd(host, NULL, &cmd, MMC_CMD_RETRIES); mmc_send_app_op_cond() 174 if (mmc_host_is_spi(host)) { mmc_send_app_op_cond() 188 pr_err("%s: card never left busy state\n", mmc_hostname(host)); mmc_send_app_op_cond() 190 if (rocr && !mmc_host_is_spi(host)) mmc_send_app_op_cond() 196 int mmc_send_if_cond(struct mmc_host *host, u32 ocr) mmc_send_if_cond() argument 212 err = mmc_wait_for_cmd(host, &cmd, 0); mmc_send_if_cond() 216 if (mmc_host_is_spi(host)) mmc_send_if_cond() 227 int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca) mmc_send_relative_addr() argument 232 BUG_ON(!host); mmc_send_relative_addr() 239 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); mmc_send_relative_addr() 258 BUG_ON(!card->host); mmc_app_send_scr() 263 err = mmc_app_cmd(card->host, card); mmc_app_send_scr() 291 mmc_wait_for_req(card->host, &mrq); mmc_app_send_scr() 316 BUG_ON(!card->host); mmc_sd_switch() 342 mmc_wait_for_req(card->host, &mrq); mmc_sd_switch() 361 BUG_ON(!card->host); mmc_app_sd_status() 366 err = mmc_app_cmd(card->host, card); mmc_app_sd_status() 387 mmc_wait_for_req(card->host, &mrq); mmc_app_sd_status()
|
H A D | slot-gpio.h | 11 int mmc_gpio_alloc(struct mmc_host *host);
|
H A D | sdio_ops.c | 14 #include <linux/mmc/host.h> 22 int mmc_send_io_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) mmc_send_io_op_cond() argument 27 BUG_ON(!host); mmc_send_io_op_cond() 34 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); mmc_send_io_op_cond() 43 if (mmc_host_is_spi(host)) { mmc_send_io_op_cond() 63 *rocr = cmd.resp[mmc_host_is_spi(host) ? 1 : 0]; mmc_send_io_op_cond() 68 static int mmc_io_rw_direct_host(struct mmc_host *host, int write, unsigned fn, mmc_io_rw_direct_host() argument 74 BUG_ON(!host); mmc_io_rw_direct_host() 89 err = mmc_wait_for_cmd(host, &cmd, 0); mmc_io_rw_direct_host() 93 if (mmc_host_is_spi(host)) { mmc_io_rw_direct_host() 94 /* host driver already reported errors */ mmc_io_rw_direct_host() 105 if (mmc_host_is_spi(host)) mmc_io_rw_direct_host() 118 return mmc_io_rw_direct_host(card->host, write, fn, addr, in, out); mmc_io_rw_direct() 130 unsigned int seg_size = card->host->max_seg_size; mmc_io_rw_extended() 155 /* Code in host drivers/fwk assumes that "blocks" always is >=1 */ mmc_io_rw_extended() 183 mmc_wait_for_req(card->host, &mrq); 193 if (mmc_host_is_spi(card->host)) { 194 /* host driver already reported errors */ 207 int sdio_reset(struct mmc_host *host) sdio_reset() argument 214 ret = mmc_io_rw_direct_host(host, 0, 0, SDIO_CCCR_ABORT, 0, &abort); sdio_reset() 220 ret = mmc_io_rw_direct_host(host, 1, 0, SDIO_CCCR_ABORT, abort, NULL); sdio_reset()
|
H A D | host.h | 2 * linux/drivers/mmc/core/host.h 13 #include <linux/mmc/host.h>
|
H A D | pwrseq_simple.c | 18 #include <linux/mmc/host.h> 40 static void mmc_pwrseq_simple_pre_power_on(struct mmc_host *host) mmc_pwrseq_simple_pre_power_on() argument 42 struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq, mmc_pwrseq_simple_pre_power_on() 53 static void mmc_pwrseq_simple_post_power_on(struct mmc_host *host) mmc_pwrseq_simple_post_power_on() argument 55 struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq, mmc_pwrseq_simple_post_power_on() 61 static void mmc_pwrseq_simple_power_off(struct mmc_host *host) mmc_pwrseq_simple_power_off() argument 63 struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq, mmc_pwrseq_simple_power_off() 74 static void mmc_pwrseq_simple_free(struct mmc_host *host) mmc_pwrseq_simple_free() argument 76 struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq, mmc_pwrseq_simple_free() 97 struct mmc_pwrseq *mmc_pwrseq_simple_alloc(struct mmc_host *host, mmc_pwrseq_simple_alloc() argument
|
H A D | mmc_ops.c | 17 #include <linux/mmc/host.h> 63 BUG_ON(!card->host); __mmc_send_status() 66 if (!mmc_host_is_spi(card->host)) __mmc_send_status() 72 err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); __mmc_send_status() 90 static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card) _mmc_select_card() argument 95 BUG_ON(!host); _mmc_select_card() 107 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); _mmc_select_card() 118 return _mmc_select_card(card->host, card); mmc_select_card() 121 int mmc_deselect_cards(struct mmc_host *host) mmc_deselect_cards() argument 123 return _mmc_select_card(host, NULL); mmc_deselect_cards() 134 int mmc_set_dsr(struct mmc_host *host) mmc_set_dsr() argument 140 cmd.arg = (host->dsr << 16) | 0xffff; mmc_set_dsr() 143 return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); mmc_set_dsr() 146 int mmc_go_idle(struct mmc_host *host) mmc_go_idle() argument 160 if (!mmc_host_is_spi(host)) { mmc_go_idle() 161 mmc_set_chip_select(host, MMC_CS_HIGH); mmc_go_idle() 169 err = mmc_wait_for_cmd(host, &cmd, 0); mmc_go_idle() 173 if (!mmc_host_is_spi(host)) { mmc_go_idle() 174 mmc_set_chip_select(host, MMC_CS_DONTCARE); mmc_go_idle() 178 host->use_spi_crc = 0; mmc_go_idle() 183 int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) mmc_send_op_cond() argument 188 BUG_ON(!host); mmc_send_op_cond() 191 cmd.arg = mmc_host_is_spi(host) ? 0 : ocr; mmc_send_op_cond() 195 err = mmc_wait_for_cmd(host, &cmd, 0); mmc_send_op_cond() 204 if (mmc_host_is_spi(host)) { mmc_send_op_cond() 217 if (rocr && !mmc_host_is_spi(host)) mmc_send_op_cond() 223 int mmc_all_send_cid(struct mmc_host *host, u32 *cid) mmc_all_send_cid() argument 228 BUG_ON(!host); mmc_all_send_cid() 235 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); mmc_all_send_cid() 250 BUG_ON(!card->host); mmc_set_relative_addr() 256 err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); mmc_set_relative_addr() 264 mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode) mmc_send_cxd_native() argument 269 BUG_ON(!host); mmc_send_cxd_native() 276 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); mmc_send_cxd_native() 290 mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host, mmc_send_cxd_data() argument 329 mmc_wait_for_req(host, &mrq); mmc_send_cxd_data() 344 if (!mmc_host_is_spi(card->host)) mmc_send_csd() 345 return mmc_send_cxd_native(card->host, card->rca << 16, mmc_send_csd() 352 ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16); mmc_send_csd() 364 int mmc_send_cid(struct mmc_host *host, u32 *cid) mmc_send_cid() argument 369 if (!mmc_host_is_spi(host)) { mmc_send_cid() 370 if (!host->card) mmc_send_cid() 372 return mmc_send_cxd_native(host, host->card->rca << 16, mmc_send_cid() 380 ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16); mmc_send_cid() 411 err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd, mmc_get_ext_csd() 422 int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp) mmc_spi_read_ocr() argument 431 err = mmc_wait_for_cmd(host, &cmd, 0); mmc_spi_read_ocr() 437 int mmc_spi_set_crc(struct mmc_host *host, int use_crc) mmc_spi_set_crc() argument 446 err = mmc_wait_for_cmd(host, &cmd, 0); mmc_spi_set_crc() 448 host->use_spi_crc = use_crc; mmc_spi_set_crc() 470 struct mmc_host *host = card->host; __mmc_switch() local 478 * If the cmd timeout and the max_busy_timeout of the host are both __mmc_switch() 480 * the host from doing hw busy detection, which is done by converting __mmc_switch() 483 if (timeout_ms && host->max_busy_timeout && __mmc_switch() 484 (timeout_ms > host->max_busy_timeout)) __mmc_switch() 496 * A busy_timeout of zero means the host can decide to use __mmc_switch() 507 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); __mmc_switch() 519 if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) __mmc_switch() 534 if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) __mmc_switch() 536 if (mmc_host_is_spi(host)) __mmc_switch() 540 * We are not allowed to issue a status command and the host __mmc_switch() 552 mmc_hostname(host), __func__); __mmc_switch() 557 if (mmc_host_is_spi(host)) { __mmc_switch() 563 mmc_hostname(host), status); __mmc_switch() 580 int mmc_send_tuning(struct mmc_host *host) mmc_send_tuning() argument 586 struct mmc_ios *ios = &host->ios; mmc_send_tuning() 628 mmc_wait_for_req(host, &mrq); mmc_send_tuning() 650 mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode, mmc_send_bus_test() argument 676 mmc_hostname(host), len); mmc_send_bus_test() 707 mmc_wait_for_req(host, &mrq); mmc_send_bus_test() 743 mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width); mmc_bus_test() 744 err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width); mmc_bus_test() 756 mmc_hostname(card->host)); mmc_send_hpi_cmd() 769 err = mmc_wait_for_cmd(card->host, &cmd, 0); mmc_send_hpi_cmd() 772 "HPI command response %#x\n", mmc_hostname(card->host), mmc_send_hpi_cmd()
|
H A D | debugfs.c | 20 #include <linux/mmc/host.h> 55 struct mmc_host *host = s->private; mmc_ios_show() local 56 struct mmc_ios *ios = &host->ios; mmc_ios_show() 60 if (host->actual_clock) mmc_ios_show() 61 seq_printf(s, "actual clock:\t%u Hz\n", host->actual_clock); mmc_ios_show() 186 struct mmc_host *host = data; mmc_clock_opt_get() local 188 *val = host->ios.clock; mmc_clock_opt_get() 195 struct mmc_host *host = data; mmc_clock_opt_set() local 198 if (val > host->f_max) mmc_clock_opt_set() 201 mmc_claim_host(host); mmc_clock_opt_set() 202 mmc_set_clock(host, (unsigned int) val); mmc_clock_opt_set() 203 mmc_release_host(host); mmc_clock_opt_set() 211 void mmc_add_host_debugfs(struct mmc_host *host) mmc_add_host_debugfs() argument 215 root = debugfs_create_dir(mmc_hostname(host), NULL); mmc_add_host_debugfs() 224 host->debugfs_root = root; mmc_add_host_debugfs() 226 if (!debugfs_create_file("ios", S_IRUSR, root, host, &mmc_ios_fops)) mmc_add_host_debugfs() 229 if (!debugfs_create_file("clock", S_IRUSR | S_IWUSR, root, host, mmc_add_host_debugfs() 235 root, &host->clk_delay)) mmc_add_host_debugfs() 241 host->fail_mmc_request = fail_default_attr; mmc_add_host_debugfs() 244 &host->fail_mmc_request))) mmc_add_host_debugfs() 251 host->debugfs_root = NULL; mmc_add_host_debugfs() 253 dev_err(&host->class_dev, "failed to initialize debugfs\n"); mmc_add_host_debugfs() 256 void mmc_remove_host_debugfs(struct mmc_host *host) mmc_remove_host_debugfs() argument 258 debugfs_remove_recursive(host->debugfs_root); mmc_remove_host_debugfs() 338 struct mmc_host *host = card->host; mmc_add_card_debugfs() local 341 if (!host->debugfs_root) mmc_add_card_debugfs() 344 root = debugfs_create_dir(mmc_card_id(card), host->debugfs_root); mmc_add_card_debugfs()
|
H A D | sd_ops.h | 16 int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr); 17 int mmc_send_if_cond(struct mmc_host *host, u32 ocr); 18 int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca);
|
H A D | Makefile | 6 mmc_core-y := core.o bus.o host.o \
|
H A D | pwrseq_emmc.c | 18 #include <linux/mmc/host.h> 36 static void mmc_pwrseq_emmc_reset(struct mmc_host *host) mmc_pwrseq_emmc_reset() argument 38 struct mmc_pwrseq_emmc *pwrseq = container_of(host->pwrseq, mmc_pwrseq_emmc_reset() 44 static void mmc_pwrseq_emmc_free(struct mmc_host *host) mmc_pwrseq_emmc_free() argument 46 struct mmc_pwrseq_emmc *pwrseq = container_of(host->pwrseq, mmc_pwrseq_emmc_free() 69 struct mmc_pwrseq *mmc_pwrseq_emmc_alloc(struct mmc_host *host, mmc_pwrseq_emmc_alloc() argument
|
H A D | sdio_ops.h | 15 int mmc_send_io_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr); 20 int sdio_reset(struct mmc_host *host);
|
/linux-4.1.27/drivers/scsi/arm/ |
H A D | acornscsi.c | 179 static void acornscsi_done(AS_Host *host, struct scsi_cmnd **SCpntp, 181 static int acornscsi_reconnect_finish(AS_Host *host); 182 static void acornscsi_dma_cleanup(AS_Host *host); 183 static void acornscsi_abortcmd(AS_Host *host, unsigned char tag); 198 static inline void sbic_arm_write(AS_Host *host, unsigned int reg, unsigned int value) sbic_arm_write() argument 200 writeb(reg, host->base + SBIC_REGIDX); sbic_arm_write() 201 writeb(value, host->base + SBIC_REGVAL); sbic_arm_write() 204 static inline int sbic_arm_read(AS_Host *host, unsigned int reg) sbic_arm_read() argument 207 return readl(host->base + SBIC_REGIDX) & 255; sbic_arm_read() 208 writeb(reg, host->base + SBIC_REGIDX); sbic_arm_read() 209 return readl(host->base + SBIC_REGVAL) & 255; sbic_arm_read() 212 #define sbic_arm_writenext(host, val) writeb((val), (host)->base + SBIC_REGVAL) 213 #define sbic_arm_readnext(host) readb((host)->base + SBIC_REGVAL) 216 #define dmac_read(host,reg) \ 217 readb((host)->base + DMAC_OFFSET + ((reg) << 2)) 219 #define dmac_write(host,reg,value) \ 220 ({ writeb((value), (host)->base + DMAC_OFFSET + ((reg) << 2)); }) 222 #define dmac_clearintr(host) writeb(0, (host)->fast + INT_REG) 224 static inline unsigned int dmac_address(AS_Host *host) dmac_address() argument 226 return dmac_read(host, DMAC_TXADRHI) << 16 | dmac_address() 227 dmac_read(host, DMAC_TXADRMD) << 8 | dmac_address() 228 dmac_read(host, DMAC_TXADRLO); dmac_address() 232 void acornscsi_dumpdma(AS_Host *host, char *where) acornscsi_dumpdma() argument 236 mode = dmac_read(host, DMAC_MODECON); acornscsi_dumpdma() 237 addr = dmac_address(host); acornscsi_dumpdma() 238 len = dmac_read(host, DMAC_TXCNTHI) << 8 | acornscsi_dumpdma() 239 dmac_read(host, DMAC_TXCNTLO); acornscsi_dumpdma() 242 host->host->host_no, where, acornscsi_dumpdma() 244 dmac_read(host, DMAC_MASKREG)); acornscsi_dumpdma() 246 printk("DMA @%06x, ", host->dma.start_addr); acornscsi_dumpdma() 247 printk("BH @%p +%04x, ", host->scsi.SCp.ptr, acornscsi_dumpdma() 248 host->scsi.SCp.this_residual); acornscsi_dumpdma() 249 printk("DT @+%04x ST @+%04x", host->dma.transferred, acornscsi_dumpdma() 250 host->scsi.SCp.scsi_xferred); acornscsi_dumpdma() 256 unsigned long acornscsi_sbic_xfcount(AS_Host *host) acornscsi_sbic_xfcount() argument 260 length = sbic_arm_read(host, SBIC_TRANSCNTH) << 16; acornscsi_sbic_xfcount() 261 length |= sbic_arm_readnext(host) << 8; acornscsi_sbic_xfcount() 262 length |= sbic_arm_readnext(host); acornscsi_sbic_xfcount() 268 acornscsi_sbic_wait(AS_Host *host, int stat_mask, int stat, int timeout, char *msg) acornscsi_sbic_wait() argument 273 asr = sbic_arm_read(host, SBIC_ASR); acornscsi_sbic_wait() 281 printk("scsi%d: timeout while %s\n", host->host->host_no, msg); acornscsi_sbic_wait() 287 int acornscsi_sbic_issuecmd(AS_Host *host, int command) acornscsi_sbic_issuecmd() argument 289 if (acornscsi_sbic_wait(host, ASR_CIP, 0, 1000, "issuing command")) acornscsi_sbic_issuecmd() 292 sbic_arm_write(host, SBIC_CMND, command); acornscsi_sbic_issuecmd() 313 void acornscsi_resetcard(AS_Host *host) acornscsi_resetcard() argument 318 host->card.page_reg = 0x80; acornscsi_resetcard() 319 writeb(host->card.page_reg, host->fast + PAGE_REG); acornscsi_resetcard() 324 host->card.page_reg = 0; acornscsi_resetcard() 325 writeb(host->card.page_reg, host->fast + PAGE_REG); acornscsi_resetcard() 332 if (readb(host->fast + INT_REG) & 8) acornscsi_resetcard() 339 host->host->host_no); acornscsi_resetcard() 341 sbic_arm_read(host, SBIC_ASR); acornscsi_resetcard() 342 sbic_arm_read(host, SBIC_SSR); acornscsi_resetcard() 345 sbic_arm_write(host, SBIC_OWNID, OWNID_EAF | host->host->this_id); acornscsi_resetcard() 346 sbic_arm_write(host, SBIC_CMND, CMND_RESET); acornscsi_resetcard() 353 if (readb(host->fast + INT_REG) & 8) acornscsi_resetcard() 360 host->host->host_no); acornscsi_resetcard() 362 sbic_arm_read(host, SBIC_ASR); acornscsi_resetcard() 363 if (sbic_arm_read(host, SBIC_SSR) != 0x01) acornscsi_resetcard() 365 host->host->host_no); acornscsi_resetcard() 367 sbic_arm_write(host, SBIC_CTRL, INIT_SBICDMA | CTRL_IDI); acornscsi_resetcard() 368 sbic_arm_write(host, SBIC_TIMEOUT, TIMEOUT_TIME); acornscsi_resetcard() 369 sbic_arm_write(host, SBIC_SYNCHTRANSFER, SYNCHTRANSFER_2DBA); acornscsi_resetcard() 370 sbic_arm_write(host, SBIC_SOURCEID, SOURCEID_ER | SOURCEID_DSP); acornscsi_resetcard() 372 host->card.page_reg = 0x40; acornscsi_resetcard() 373 writeb(host->card.page_reg, host->fast + PAGE_REG); acornscsi_resetcard() 376 dmac_write(host, DMAC_INIT, 0); acornscsi_resetcard() 378 dmac_write(host, DMAC_INIT, INIT_8BIT); acornscsi_resetcard() 379 dmac_write(host, DMAC_CHANNEL, CHANNEL_0); acornscsi_resetcard() 380 dmac_write(host, DMAC_DEVCON0, INIT_DEVCON0); acornscsi_resetcard() 381 dmac_write(host, DMAC_DEVCON1, INIT_DEVCON1); acornscsi_resetcard() 384 host->SCpnt = NULL; acornscsi_resetcard() 385 host->scsi.phase = PHASE_IDLE; acornscsi_resetcard() 386 host->scsi.disconnectable = 0; acornscsi_resetcard() 388 memset(host->busyluns, 0, sizeof(host->busyluns)); acornscsi_resetcard() 391 host->device[i].sync_state = SYNC_NEGOCIATE; acornscsi_resetcard() 392 host->device[i].disconnect_ok = 1; acornscsi_resetcard() 498 acornscsi_dumplogline(AS_Host *host, int target, int line) acornscsi_dumplogline() argument 503 ptr = host->status_ptr[target] - STATUS_BUFFER_TO_PRINT; acornscsi_dumplogline() 510 prev = host->status[target][ptr].when; acornscsi_dumplogline() 512 for (; ptr != host->status_ptr[target]; ptr = (ptr + 1) & (STATUS_BUFFER_SIZE - 1)) { acornscsi_dumplogline() 515 if (!host->status[target][ptr].when) acornscsi_dumplogline() 520 printk("%c%02X", host->status[target][ptr].irq ? '-' : ' ', acornscsi_dumplogline() 521 host->status[target][ptr].ph); acornscsi_dumplogline() 525 printk(" %02X", host->status[target][ptr].ssr); acornscsi_dumplogline() 529 time_diff = host->status[target][ptr].when - prev; acornscsi_dumplogline() 530 prev = host->status[target][ptr].when; acornscsi_dumplogline() 545 void acornscsi_dumplog(AS_Host *host, int target) acornscsi_dumplog() argument 548 acornscsi_dumplogline(host, target, 0); acornscsi_dumplog() 549 acornscsi_dumplogline(host, target, 1); acornscsi_dumplog() 550 acornscsi_dumplogline(host, target, 2); acornscsi_dumplog() 560 char acornscsi_target(AS_Host *host) acornscsi_target() argument 562 if (host->SCpnt) acornscsi_target() 563 return '0' + host->SCpnt->device->id; acornscsi_target() 690 * Function: acornscsi_kick(AS_Host *host) 692 * Params : host - host to send command to 697 intr_ret_t acornscsi_kick(AS_Host *host) acornscsi_kick() argument 703 SCpnt = host->origSCpnt; acornscsi_kick() 704 host->origSCpnt = NULL; acornscsi_kick() 708 SCpnt = queue_remove_exclude(&host->queues.issue, host->busyluns); acornscsi_kick() 715 if (host->scsi.disconnectable && host->SCpnt) { acornscsi_kick() 716 queue_add_cmd_tail(&host->queues.disconnected, host->SCpnt); acornscsi_kick() 717 host->scsi.disconnectable = 0; acornscsi_kick() 719 DBG(host->SCpnt, printk("scsi%d.%c: moved command to disconnected queue\n", acornscsi_kick() 720 host->host->host_no, acornscsi_target(host))); acornscsi_kick() 722 host->SCpnt = NULL; acornscsi_kick() 729 if (!(sbic_arm_read(host, SBIC_ASR) & (ASR_INT|ASR_BSY|ASR_CIP))) { acornscsi_kick() 730 sbic_arm_write(host, SBIC_DESTID, SCpnt->device->id); acornscsi_kick() 731 sbic_arm_write(host, SBIC_CMND, CMND_SELWITHATN); acornscsi_kick() 735 * claim host busy - all of these must happen atomically wrt acornscsi_kick() 738 host->scsi.phase = PHASE_CONNECTING; acornscsi_kick() 739 host->SCpnt = SCpnt; acornscsi_kick() 740 host->scsi.SCp = SCpnt->SCp; acornscsi_kick() 741 host->dma.xfer_setup = 0; acornscsi_kick() 742 host->dma.xfer_required = 0; acornscsi_kick() 743 host->dma.xfer_done = 0; acornscsi_kick() 747 host->host->host_no, '0' + SCpnt->device->id, acornscsi_kick() 764 (u8)(SCpnt->device->lun & 0x07), host->busyluns); acornscsi_kick() 766 host->stats.removes += 1; acornscsi_kick() 770 host->stats.writes += 1; acornscsi_kick() 773 host->stats.reads += 1; acornscsi_kick() 776 host->stats.miscs += 1; acornscsi_kick() 785 * Function: void acornscsi_done(AS_Host *host, struct scsi_cmnd **SCpntp, unsigned int result) 787 * Params : host - interface that completed 790 static void acornscsi_done(AS_Host *host, struct scsi_cmnd **SCpntp, acornscsi_done() argument 796 sbic_arm_write(host, SBIC_SOURCEID, SOURCEID_ER | SOURCEID_DSP); acornscsi_done() 798 host->stats.fins += 1; acornscsi_done() 803 acornscsi_dma_cleanup(host); acornscsi_done() 805 SCpnt->result = result << 16 | host->scsi.SCp.Message << 8 | host->scsi.SCp.Status; acornscsi_done() 819 if (host->scsi.SCp.ptr && acornscsi_done() 823 if (host->scsi.SCp.scsi_xferred < SCpnt->underflow || acornscsi_done() 824 host->scsi.SCp.scsi_xferred != host->dma.transferred) acornscsi_done() 840 if (host->dma.xfer_done) acornscsi_done() 857 acornscsi_dumpdma(host, "done"); acornscsi_done() 858 acornscsi_dumplog(host, SCpnt->device->id); acornscsi_done() 865 panic("scsi%d.H: null scsi_done function in acornscsi_done", host->host->host_no); acornscsi_done() 868 (u8)(SCpnt->device->lun & 0x7), host->busyluns); acornscsi_done() 872 printk("scsi%d: null command in acornscsi_done", host->host->host_no); acornscsi_done() 874 host->scsi.phase = PHASE_IDLE; acornscsi_done() 885 void acornscsi_data_updateptr(AS_Host *host, struct scsi_pointer *SCp, unsigned int length) acornscsi_data_updateptr() argument 891 host->dma.xfer_done = 1; acornscsi_data_updateptr() 895 * Prototype: void acornscsi_data_read(AS_Host *host, char *ptr, 898 * Params : host - host to transfer from 900 * start_addr - host mem address 905 void acornscsi_data_read(AS_Host *host, char *ptr, acornscsi_data_read() argument 914 writeb((page & 0x3f) | host->card.page_reg, host->fast + PAGE_REG); acornscsi_data_read() 924 __acornscsi_in(host->base + (offset << 1), ptr, this_len); acornscsi_data_read() 933 writeb((page & 0x3f) | host->card.page_reg, host->fast + PAGE_REG); acornscsi_data_read() 936 writeb(host->card.page_reg, host->fast + PAGE_REG); acornscsi_data_read() 940 * Prototype: void acornscsi_data_write(AS_Host *host, char *ptr, 943 * Params : host - host to transfer from 945 * start_addr - host mem address 950 void acornscsi_data_write(AS_Host *host, char *ptr, acornscsi_data_write() argument 959 writeb((page & 0x3f) | host->card.page_reg, host->fast + PAGE_REG); acornscsi_data_write() 969 __acornscsi_out(host->base + (offset << 1), ptr, this_len); acornscsi_data_write() 978 writeb((page & 0x3f) | host->card.page_reg, host->fast + PAGE_REG); acornscsi_data_write() 981 writeb(host->card.page_reg, host->fast + PAGE_REG); acornscsi_data_write() 989 * Prototype: void acornscsi_dmastop(AS_Host *host) 991 * Params : host - host on which to stop DMA 996 void acornscsi_dma_stop(AS_Host *host) acornscsi_dma_stop() argument 998 dmac_write(host, DMAC_MASKREG, MASK_ON); acornscsi_dma_stop() 999 dmac_clearintr(host); acornscsi_dma_stop() 1002 DBG(host->SCpnt, acornscsi_dumpdma(host, "stop")); acornscsi_dma_stop() 1007 * Function: void acornscsi_dma_setup(AS_Host *host, dmadir_t direction) 1009 * Params : host - host to setup 1015 void acornscsi_dma_setup(AS_Host *host, dmadir_t direction) acornscsi_dma_setup() argument 1019 host->dma.direction = direction; acornscsi_dma_setup() 1021 dmac_write(host, DMAC_MASKREG, MASK_ON); acornscsi_dma_setup() 1025 if (NO_WRITE & (1 << host->SCpnt->device->id)) { acornscsi_dma_setup() 1027 host->host->host_no, acornscsi_target(host)); acornscsi_dma_setup() 1038 length = min_t(unsigned int, host->scsi.SCp.this_residual, DMAC_BUFFER_SIZE / 2); acornscsi_dma_setup() 1040 host->dma.start_addr = address = host->dma.free_addr; acornscsi_dma_setup() 1041 host->dma.free_addr = (host->dma.free_addr + length) & acornscsi_dma_setup() 1048 acornscsi_data_write(host, host->scsi.SCp.ptr, host->dma.start_addr, acornscsi_dma_setup() 1052 dmac_write(host, DMAC_TXCNTLO, length); acornscsi_dma_setup() 1053 dmac_write(host, DMAC_TXCNTHI, length >> 8); acornscsi_dma_setup() 1054 dmac_write(host, DMAC_TXADRLO, address); acornscsi_dma_setup() 1055 dmac_write(host, DMAC_TXADRMD, address >> 8); acornscsi_dma_setup() 1056 dmac_write(host, DMAC_TXADRHI, 0); acornscsi_dma_setup() 1057 dmac_write(host, DMAC_MODECON, mode); acornscsi_dma_setup() 1058 dmac_write(host, DMAC_MASKREG, MASK_OFF); acornscsi_dma_setup() 1061 DBG(host->SCpnt, acornscsi_dumpdma(host, "strt")); acornscsi_dma_setup() 1063 host->dma.xfer_setup = 1; acornscsi_dma_setup() 1068 * Function: void acornscsi_dma_cleanup(AS_Host *host) 1069 * Purpose : ensure that all DMA transfers are up-to-date & host->scsi.SCp is correct 1070 * Params : host - host to finish 1076 void acornscsi_dma_cleanup(AS_Host *host) acornscsi_dma_cleanup() argument 1078 dmac_write(host, DMAC_MASKREG, MASK_ON); acornscsi_dma_cleanup() 1079 dmac_clearintr(host); acornscsi_dma_cleanup() 1084 if (host->dma.xfer_required) { acornscsi_dma_cleanup() 1085 host->dma.xfer_required = 0; acornscsi_dma_cleanup() 1086 if (host->dma.direction == DMA_IN) acornscsi_dma_cleanup() 1087 acornscsi_data_read(host, host->dma.xfer_ptr, acornscsi_dma_cleanup() 1088 host->dma.xfer_start, host->dma.xfer_length); acornscsi_dma_cleanup() 1094 if (host->dma.xfer_setup) { acornscsi_dma_cleanup() 1097 host->dma.xfer_setup = 0; acornscsi_dma_cleanup() 1100 DBG(host->SCpnt, acornscsi_dumpdma(host, "cupi")); acornscsi_dma_cleanup() 1106 transferred = dmac_address(host) - host->dma.start_addr; acornscsi_dma_cleanup() 1107 host->dma.transferred += transferred; acornscsi_dma_cleanup() 1109 if (host->dma.direction == DMA_IN) acornscsi_dma_cleanup() 1110 acornscsi_data_read(host, host->scsi.SCp.ptr, acornscsi_dma_cleanup() 1111 host->dma.start_addr, transferred); acornscsi_dma_cleanup() 1116 acornscsi_data_updateptr(host, &host->scsi.SCp, transferred); acornscsi_dma_cleanup() 1118 DBG(host->SCpnt, acornscsi_dumpdma(host, "cupo")); acornscsi_dma_cleanup() 1124 * Function: void acornscsi_dmacintr(AS_Host *host) 1126 * Params : host - host to process 1134 void acornscsi_dma_intr(AS_Host *host) acornscsi_dma_intr() argument 1139 DBG(host->SCpnt, acornscsi_dumpdma(host, "inti")); acornscsi_dma_intr() 1142 dmac_write(host, DMAC_MASKREG, MASK_ON); acornscsi_dma_intr() 1143 dmac_clearintr(host); acornscsi_dma_intr() 1148 transferred = dmac_address(host) - host->dma.start_addr; acornscsi_dma_intr() 1149 host->dma.transferred += transferred; acornscsi_dma_intr() 1154 if (host->dma.direction == DMA_IN) { acornscsi_dma_intr() 1155 host->dma.xfer_start = host->dma.start_addr; acornscsi_dma_intr() 1156 host->dma.xfer_length = transferred; acornscsi_dma_intr() 1157 host->dma.xfer_ptr = host->scsi.SCp.ptr; acornscsi_dma_intr() 1158 host->dma.xfer_required = 1; acornscsi_dma_intr() 1161 acornscsi_data_updateptr(host, &host->scsi.SCp, transferred); acornscsi_dma_intr() 1166 length = min_t(unsigned int, host->scsi.SCp.this_residual, DMAC_BUFFER_SIZE / 2); acornscsi_dma_intr() 1168 host->dma.start_addr = address = host->dma.free_addr; acornscsi_dma_intr() 1169 host->dma.free_addr = (host->dma.free_addr + length) & acornscsi_dma_intr() 1175 if (host->dma.direction == DMA_OUT) acornscsi_dma_intr() 1176 acornscsi_data_write(host, host->scsi.SCp.ptr, host->dma.start_addr, acornscsi_dma_intr() 1180 dmac_write(host, DMAC_TXCNTLO, length); acornscsi_dma_intr() 1181 dmac_write(host, DMAC_TXCNTHI, length >> 8); acornscsi_dma_intr() 1182 dmac_write(host, DMAC_TXADRLO, address); acornscsi_dma_intr() 1183 dmac_write(host, DMAC_TXADRMD, address >> 8); acornscsi_dma_intr() 1184 dmac_write(host, DMAC_TXADRHI, 0); acornscsi_dma_intr() 1185 dmac_write(host, DMAC_MASKREG, MASK_OFF); acornscsi_dma_intr() 1188 DBG(host->SCpnt, acornscsi_dumpdma(host, "into")); acornscsi_dma_intr() 1191 host->dma.xfer_setup = 0; acornscsi_dma_intr() 1199 if (dmac_read(host, DMAC_STATUS) & STATUS_RQ0) { acornscsi_dma_intr() 1200 acornscsi_abortcmd(host, host->SCpnt->tag); acornscsi_dma_intr() 1202 dmac_write(host, DMAC_TXCNTLO, 0); acornscsi_dma_intr() 1203 dmac_write(host, DMAC_TXCNTHI, 0); acornscsi_dma_intr() 1204 dmac_write(host, DMAC_TXADRLO, 0); acornscsi_dma_intr() 1205 dmac_write(host, DMAC_TXADRMD, 0); acornscsi_dma_intr() 1206 dmac_write(host, DMAC_TXADRHI, 0); acornscsi_dma_intr() 1207 dmac_write(host, DMAC_MASKREG, MASK_OFF); acornscsi_dma_intr() 1214 * Function: void acornscsi_dma_xfer(AS_Host *host) 1216 * Params : host - host to process 1219 void acornscsi_dma_xfer(AS_Host *host) acornscsi_dma_xfer() argument 1221 host->dma.xfer_required = 0; acornscsi_dma_xfer() 1223 if (host->dma.direction == DMA_IN) acornscsi_dma_xfer() 1224 acornscsi_data_read(host, host->dma.xfer_ptr, acornscsi_dma_xfer() 1225 host->dma.xfer_start, host->dma.xfer_length); acornscsi_dma_xfer() 1229 * Function: void acornscsi_dma_adjust(AS_Host *host) 1232 * Params : host - host to adjust DMA count for 1235 void acornscsi_dma_adjust(AS_Host *host) acornscsi_dma_adjust() argument 1237 if (host->dma.xfer_setup) { acornscsi_dma_adjust() 1240 DBG(host->SCpnt, acornscsi_dumpdma(host, "adji")); acornscsi_dma_adjust() 1245 * host->scsi.SCp.scsi_xferred is the number of bytes acornscsi_dma_adjust() 1247 * host->dma.transferred is the number of bytes transferred acornscsi_dma_adjust() 1248 * over DMA since host->dma.start_addr was last set. acornscsi_dma_adjust() 1250 * real_dma_addr = host->dma.start_addr + host->scsi.SCp.scsi_xferred acornscsi_dma_adjust() 1251 * - host->dma.transferred acornscsi_dma_adjust() 1253 transferred = host->scsi.SCp.scsi_xferred - host->dma.transferred; acornscsi_dma_adjust() 1256 host->host->host_no, acornscsi_target(host), transferred); acornscsi_dma_adjust() 1258 host->dma.xfer_setup = 0; acornscsi_dma_adjust() 1260 transferred += host->dma.start_addr; acornscsi_dma_adjust() 1261 dmac_write(host, DMAC_TXADRLO, transferred); acornscsi_dma_adjust() 1262 dmac_write(host, DMAC_TXADRMD, transferred >> 8); acornscsi_dma_adjust() 1263 dmac_write(host, DMAC_TXADRHI, transferred >> 16); acornscsi_dma_adjust() 1265 DBG(host->SCpnt, acornscsi_dumpdma(host, "adjo")); acornscsi_dma_adjust() 1276 acornscsi_write_pio(AS_Host *host, char *bytes, int *ptr, int len, unsigned int max_timeout) acornscsi_write_pio() argument 1282 asr = sbic_arm_read(host, SBIC_ASR); acornscsi_write_pio() 1287 sbic_arm_write(host, SBIC_DATA, bytes[my_ptr++]); acornscsi_write_pio() 1301 * Function: void acornscsi_sendcommand(AS_Host *host) 1303 * Params : host - host which is connected to target 1306 acornscsi_sendcommand(AS_Host *host) acornscsi_sendcommand() argument 1308 struct scsi_cmnd *SCpnt = host->SCpnt; acornscsi_sendcommand() 1310 sbic_arm_write(host, SBIC_TRANSCNTH, 0); acornscsi_sendcommand() 1311 sbic_arm_writenext(host, 0); acornscsi_sendcommand() 1312 sbic_arm_writenext(host, SCpnt->cmd_len - host->scsi.SCp.sent_command); acornscsi_sendcommand() 1314 acornscsi_sbic_issuecmd(host, CMND_XFERINFO); acornscsi_sendcommand() 1316 if (acornscsi_write_pio(host, SCpnt->cmnd, acornscsi_sendcommand() 1317 (int *)&host->scsi.SCp.sent_command, SCpnt->cmd_len, 1000000)) acornscsi_sendcommand() 1318 printk("scsi%d: timeout while sending command\n", host->host->host_no); acornscsi_sendcommand() 1320 host->scsi.phase = PHASE_COMMAND; acornscsi_sendcommand() 1324 void acornscsi_sendmessage(AS_Host *host) acornscsi_sendmessage() argument 1326 unsigned int message_length = msgqueue_msglength(&host->scsi.msgs); acornscsi_sendmessage() 1332 host->host->host_no, acornscsi_target(host)); acornscsi_sendmessage() 1337 acornscsi_sbic_issuecmd(host, CMND_XFERINFO | CMND_SBT); acornscsi_sendmessage() 1339 acornscsi_sbic_wait(host, ASR_DBR, ASR_DBR, 1000, "sending message 1"); acornscsi_sendmessage() 1341 sbic_arm_write(host, SBIC_DATA, NOP); acornscsi_sendmessage() 1343 host->scsi.last_message = NOP; acornscsi_sendmessage() 1350 acornscsi_sbic_issuecmd(host, CMND_XFERINFO | CMND_SBT); acornscsi_sendmessage() 1351 msg = msgqueue_getmsg(&host->scsi.msgs, 0); acornscsi_sendmessage() 1353 acornscsi_sbic_wait(host, ASR_DBR, ASR_DBR, 1000, "sending message 2"); acornscsi_sendmessage() 1355 sbic_arm_write(host, SBIC_DATA, msg->msg[0]); acornscsi_sendmessage() 1357 host->scsi.last_message = msg->msg[0]; acornscsi_sendmessage() 1372 sbic_arm_write(host, SBIC_TRANSCNTH, 0); acornscsi_sendmessage() 1373 sbic_arm_writenext(host, 0); acornscsi_sendmessage() 1374 sbic_arm_writenext(host, message_length); acornscsi_sendmessage() 1375 acornscsi_sbic_issuecmd(host, CMND_XFERINFO); acornscsi_sendmessage() 1378 while ((msg = msgqueue_getmsg(&host->scsi.msgs, msgnr++)) != NULL) { acornscsi_sendmessage() 1384 if (acornscsi_write_pio(host, msg->msg, &i, msg->length, 1000000)) acornscsi_sendmessage() 1385 printk("scsi%d: timeout while sending message\n", host->host->host_no); acornscsi_sendmessage() 1387 host->scsi.last_message = msg->msg[0]; acornscsi_sendmessage() 1389 host->scsi.last_message |= msg->msg[2] << 8; acornscsi_sendmessage() 1402 * Function: void acornscsi_readstatusbyte(AS_Host *host) 1404 * Params : host - host connected to target 1407 void acornscsi_readstatusbyte(AS_Host *host) acornscsi_readstatusbyte() argument 1409 acornscsi_sbic_issuecmd(host, CMND_XFERINFO|CMND_SBT); acornscsi_readstatusbyte() 1410 acornscsi_sbic_wait(host, ASR_DBR, ASR_DBR, 1000, "reading status byte"); acornscsi_readstatusbyte() 1411 host->scsi.SCp.Status = sbic_arm_read(host, SBIC_DATA); acornscsi_readstatusbyte() 1415 * Function: unsigned char acornscsi_readmessagebyte(AS_Host *host) 1417 * Params : host - host connected to target 1420 unsigned char acornscsi_readmessagebyte(AS_Host *host) acornscsi_readmessagebyte() argument 1424 acornscsi_sbic_issuecmd(host, CMND_XFERINFO | CMND_SBT); acornscsi_readmessagebyte() 1426 acornscsi_sbic_wait(host, ASR_DBR, ASR_DBR, 1000, "for message byte"); acornscsi_readmessagebyte() 1428 message = sbic_arm_read(host, SBIC_DATA); acornscsi_readmessagebyte() 1431 acornscsi_sbic_wait(host, ASR_INT, ASR_INT, 1000, "for interrupt after message byte"); acornscsi_readmessagebyte() 1433 sbic_arm_read(host, SBIC_SSR); acornscsi_readmessagebyte() 1439 * Function: void acornscsi_message(AS_Host *host) 1441 * Params : host - host connected to target 1444 void acornscsi_message(AS_Host *host) acornscsi_message() argument 1450 message[msgidx] = acornscsi_readmessagebyte(host); acornscsi_message() 1466 acornscsi_sbic_issuecmd(host, CMND_NEGATEACK); acornscsi_message() 1469 acornscsi_sbic_wait(host, ASR_INT, ASR_INT, 1000, "for interrupt after negate ack"); acornscsi_message() 1470 sbic_arm_read(host, SBIC_SSR); acornscsi_message() 1476 host->host->host_no, acornscsi_target(host)); acornscsi_message() 1481 if (host->scsi.phase == PHASE_RECONNECTED) { acornscsi_message() 1489 host->scsi.reconnected.tag = message[1]; acornscsi_message() 1490 if (acornscsi_reconnect_finish(host)) acornscsi_message() 1491 host->scsi.phase = PHASE_MSGIN; acornscsi_message() 1498 if (host->scsi.phase != PHASE_STATUSIN) { acornscsi_message() 1500 host->host->host_no, acornscsi_target(host)); acornscsi_message() 1501 acornscsi_dumplog(host, host->SCpnt->device->id); acornscsi_message() 1503 host->scsi.phase = PHASE_DONE; acornscsi_message() 1504 host->scsi.SCp.Message = message[0]; acornscsi_message() 1514 acornscsi_dma_cleanup(host); acornscsi_message() 1515 host->SCpnt->SCp = host->scsi.SCp; acornscsi_message() 1516 host->SCpnt->SCp.sent_command = 0; acornscsi_message() 1517 host->scsi.phase = PHASE_MSGIN; acornscsi_message() 1530 acornscsi_dma_cleanup(host); acornscsi_message() 1531 host->scsi.SCp = host->SCpnt->SCp; acornscsi_message() 1532 host->scsi.phase = PHASE_MSGIN; acornscsi_message() 1545 acornscsi_dma_cleanup(host); acornscsi_message() 1546 host->scsi.phase = PHASE_DISCONNECT; acornscsi_message() 1557 if (host->device[host->SCpnt->device->id].sync_state == SYNC_SENT_REQUEST) acornscsi_message() 1558 host->device[host->SCpnt->device->id].sync_state = SYNC_NEGOCIATE; acornscsi_message() 1564 if (msgqueue_msglength(&host->scsi.msgs)) acornscsi_message() 1565 acornscsi_sbic_issuecmd(host, CMND_ASSERTATN); acornscsi_message() 1567 switch (host->scsi.last_message) { acornscsi_message() 1579 host->host->host_no, acornscsi_target(host)); acornscsi_message() 1580 host->SCpnt->device->simple_tags = 0; acornscsi_message() 1581 set_bit(host->SCpnt->device->id * 8 + acornscsi_message() 1582 (u8)(host->SCpnt->device->lun & 0x7), host->busyluns); acornscsi_message() 1590 host->host->host_no, acornscsi_target(host)); acornscsi_message() 1591 host->device[host->SCpnt->device->id].sync_xfer = SYNCHTRANSFER_2DBA; acornscsi_message() 1592 host->device[host->SCpnt->device->id].sync_state = SYNC_ASYNCHRONOUS; acornscsi_message() 1593 sbic_arm_write(host, SBIC_SYNCHTRANSFER, host->device[host->SCpnt->device->id].sync_xfer); acornscsi_message() 1608 host->host->host_no, acornscsi_target(host), acornscsi_message() 1616 if (host->device[host->SCpnt->device->id].sync_state == SYNC_SENT_REQUEST) { acornscsi_message() 1623 host->device[host->SCpnt->device->id].sync_state = SYNC_COMPLETED; acornscsi_message() 1625 host->host->host_no, acornscsi_target(host), acornscsi_message() 1627 host->device[host->SCpnt->device->id].sync_xfer = acornscsi_message() 1635 acornscsi_sbic_issuecmd(host, CMND_ASSERTATN); acornscsi_message() 1638 msgqueue_addmsg(&host->scsi.msgs, 5, EXTENDED_MESSAGE, 3, acornscsi_message() 1640 host->device[host->SCpnt->device->id].sync_xfer = acornscsi_message() 1643 sbic_arm_write(host, SBIC_SYNCHTRANSFER, host->device[host->SCpnt->device->id].sync_xfer); acornscsi_message() 1656 acornscsi_sbic_issuecmd(host, CMND_ASSERTATN); acornscsi_message() 1657 msgqueue_flush(&host->scsi.msgs); acornscsi_message() 1658 msgqueue_addmsg(&host->scsi.msgs, 1, MESSAGE_REJECT); acornscsi_message() 1665 host->host->host_no, acornscsi_target(host), acornscsi_message() 1667 acornscsi_sbic_issuecmd(host, CMND_ASSERTATN); acornscsi_message() 1668 msgqueue_flush(&host->scsi.msgs); acornscsi_message() 1669 msgqueue_addmsg(&host->scsi.msgs, 1, MESSAGE_REJECT); acornscsi_message() 1670 host->scsi.phase = PHASE_MSGIN; acornscsi_message() 1673 acornscsi_sbic_issuecmd(host, CMND_NEGATEACK); acornscsi_message() 1677 * Function: int acornscsi_buildmessages(AS_Host *host) 1678 * Purpose : build the connection messages for a host 1679 * Params : host - host to add messages to 1682 void acornscsi_buildmessages(AS_Host *host) acornscsi_buildmessages() argument 1687 msgqueue_addmsg(&host->scsi.msgs, 1, BUS_DEVICE_RESET); acornscsi_buildmessages() 1692 msgqueue_addmsg(&host->scsi.msgs, 1, acornscsi_buildmessages() 1693 IDENTIFY(host->device[host->SCpnt->device->id].disconnect_ok, acornscsi_buildmessages() 1694 host->SCpnt->device->lun)); acornscsi_buildmessages() 1699 acornscsi_abortcmd(host->SCpnt->tag); acornscsi_buildmessages() 1705 if (host->SCpnt->tag) { acornscsi_buildmessages() 1708 if (host->SCpnt->cmnd[0] == REQUEST_SENSE || acornscsi_buildmessages() 1709 host->SCpnt->cmnd[0] == TEST_UNIT_READY || acornscsi_buildmessages() 1710 host->SCpnt->cmnd[0] == INQUIRY) acornscsi_buildmessages() 1714 msgqueue_addmsg(&host->scsi.msgs, 2, tag_type, host->SCpnt->tag); acornscsi_buildmessages() 1719 if (host->device[host->SCpnt->device->id].sync_state == SYNC_NEGOCIATE) { acornscsi_buildmessages() 1720 host->device[host->SCpnt->device->id].sync_state = SYNC_SENT_REQUEST; acornscsi_buildmessages() 1721 msgqueue_addmsg(&host->scsi.msgs, 5, acornscsi_buildmessages() 1729 * Function: int acornscsi_starttransfer(AS_Host *host) 1731 * Params : host - host to which target is connected 1735 int acornscsi_starttransfer(AS_Host *host) acornscsi_starttransfer() argument 1739 if (!host->scsi.SCp.ptr /*&& host->scsi.SCp.this_residual*/) { acornscsi_starttransfer() 1741 host->host->host_no, acornscsi_target(host)); acornscsi_starttransfer() 1745 residual = scsi_bufflen(host->SCpnt) - host->scsi.SCp.scsi_xferred; acornscsi_starttransfer() 1747 sbic_arm_write(host, SBIC_SYNCHTRANSFER, host->device[host->SCpnt->device->id].sync_xfer); acornscsi_starttransfer() 1748 sbic_arm_writenext(host, residual >> 16); acornscsi_starttransfer() 1749 sbic_arm_writenext(host, residual >> 8); acornscsi_starttransfer() 1750 sbic_arm_writenext(host, residual); acornscsi_starttransfer() 1751 acornscsi_sbic_issuecmd(host, CMND_XFERINFO); acornscsi_starttransfer() 1759 * Function : acornscsi_reconnect(AS_Host *host) 1761 * Params : host - host specific data 1767 int acornscsi_reconnect(AS_Host *host) acornscsi_reconnect() argument 1771 target = sbic_arm_read(host, SBIC_SOURCEID); acornscsi_reconnect() 1776 host->host->host_no); acornscsi_reconnect() 1780 if (host->SCpnt && !host->scsi.disconnectable) { acornscsi_reconnect() 1783 host->host->host_no, target, host->SCpnt->device->id); acornscsi_reconnect() 1784 host->SCpnt = NULL; acornscsi_reconnect() 1787 lun = sbic_arm_read(host, SBIC_DATA) & 7; acornscsi_reconnect() 1789 host->scsi.reconnected.target = target; acornscsi_reconnect() 1790 host->scsi.reconnected.lun = lun; acornscsi_reconnect() 1791 host->scsi.reconnected.tag = 0; acornscsi_reconnect() 1793 if (host->scsi.disconnectable && host->SCpnt && acornscsi_reconnect() 1794 host->SCpnt->device->id == target && host->SCpnt->device->lun == lun) acornscsi_reconnect() 1797 if (!ok && queue_probetgtlun(&host->queues.disconnected, target, lun)) acornscsi_reconnect() 1800 ADD_STATUS(target, 0x81, host->scsi.phase, 0); acornscsi_reconnect() 1803 host->scsi.phase = PHASE_RECONNECTED; acornscsi_reconnect() 1808 host->host->host_no, '0' + target); acornscsi_reconnect() 1809 acornscsi_dumplog(host, target); acornscsi_reconnect() 1810 acornscsi_abortcmd(host, 0); acornscsi_reconnect() 1811 if (host->SCpnt) { acornscsi_reconnect() 1812 queue_add_cmd_tail(&host->queues.disconnected, host->SCpnt); acornscsi_reconnect() 1813 host->SCpnt = NULL; acornscsi_reconnect() 1816 acornscsi_sbic_issuecmd(host, CMND_NEGATEACK); acornscsi_reconnect() 1821 * Function: int acornscsi_reconect_finish(AS_Host *host) 1823 * Params : host - host to complete 1827 int acornscsi_reconnect_finish(AS_Host *host) acornscsi_reconnect_finish() argument 1829 if (host->scsi.disconnectable && host->SCpnt) { acornscsi_reconnect_finish() 1830 host->scsi.disconnectable = 0; acornscsi_reconnect_finish() 1831 if (host->SCpnt->device->id == host->scsi.reconnected.target && acornscsi_reconnect_finish() 1832 host->SCpnt->device->lun == host->scsi.reconnected.lun && acornscsi_reconnect_finish() 1833 host->SCpnt->tag == host->scsi.reconnected.tag) { acornscsi_reconnect_finish() 1835 DBG(host->SCpnt, printk("scsi%d.%c: reconnected", acornscsi_reconnect_finish() 1836 host->host->host_no, acornscsi_target(host))); acornscsi_reconnect_finish() 1839 queue_add_cmd_tail(&host->queues.disconnected, host->SCpnt); acornscsi_reconnect_finish() 1841 DBG(host->SCpnt, printk("scsi%d.%c: had to move command " acornscsi_reconnect_finish() 1843 host->host->host_no, acornscsi_target(host))); acornscsi_reconnect_finish() 1845 host->SCpnt = NULL; acornscsi_reconnect_finish() 1848 if (!host->SCpnt) { acornscsi_reconnect_finish() 1849 host->SCpnt = queue_remove_tgtluntag(&host->queues.disconnected, acornscsi_reconnect_finish() 1850 host->scsi.reconnected.target, acornscsi_reconnect_finish() 1851 host->scsi.reconnected.lun, acornscsi_reconnect_finish() 1852 host->scsi.reconnected.tag); acornscsi_reconnect_finish() 1854 DBG(host->SCpnt, printk("scsi%d.%c: had to get command", acornscsi_reconnect_finish() 1855 host->host->host_no, acornscsi_target(host))); acornscsi_reconnect_finish() 1859 if (!host->SCpnt) acornscsi_reconnect_finish() 1860 acornscsi_abortcmd(host, host->scsi.reconnected.tag); acornscsi_reconnect_finish() 1865 host->scsi.SCp = host->SCpnt->SCp; acornscsi_reconnect_finish() 1868 host->scsi.SCp.ptr, host->scsi.SCp.this_residual); acornscsi_reconnect_finish() 1875 host->dma.transferred = host->scsi.SCp.scsi_xferred; acornscsi_reconnect_finish() 1877 return host->SCpnt != NULL; acornscsi_reconnect_finish() 1881 * Function: void acornscsi_disconnect_unexpected(AS_Host *host) 1883 * Params : host - host on which disconnect occurred 1886 void acornscsi_disconnect_unexpected(AS_Host *host) acornscsi_disconnect_unexpected() argument 1889 host->host->host_no, acornscsi_target(host)); acornscsi_disconnect_unexpected() 1891 acornscsi_dumplog(host, 8); acornscsi_disconnect_unexpected() 1894 acornscsi_done(host, &host->SCpnt, DID_ERROR); acornscsi_disconnect_unexpected() 1898 * Function: void acornscsi_abortcmd(AS_host *host, unsigned char tag) 1900 * Params : host - host with connected command to abort 1904 void acornscsi_abortcmd(AS_Host *host, unsigned char tag) acornscsi_abortcmd() argument 1906 host->scsi.phase = PHASE_ABORTED; acornscsi_abortcmd() 1907 sbic_arm_write(host, SBIC_CMND, CMND_ASSERTATN); acornscsi_abortcmd() 1909 msgqueue_flush(&host->scsi.msgs); acornscsi_abortcmd() 1912 msgqueue_addmsg(&host->scsi.msgs, 2, ABORT_TAG, tag); acornscsi_abortcmd() 1915 msgqueue_addmsg(&host->scsi.msgs, 1, ABORT); acornscsi_abortcmd() 1922 * Function: int acornscsi_sbicintr(AS_Host *host) 1924 * Params : host - host to process 1930 intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq) acornscsi_sbicintr() argument 1934 asr = sbic_arm_read(host, SBIC_ASR); acornscsi_sbicintr() 1938 ssr = sbic_arm_read(host, SBIC_SSR); acornscsi_sbicintr() 1941 print_sbic_status(asr, ssr, host->scsi.phase); acornscsi_sbicintr() 1944 ADD_STATUS(8, ssr, host->scsi.phase, in_irq); acornscsi_sbicintr() 1946 if (host->SCpnt && !host->scsi.disconnectable) acornscsi_sbicintr() 1947 ADD_STATUS(host->SCpnt->device->id, ssr, host->scsi.phase, in_irq); acornscsi_sbicintr() 1952 host->host->host_no); acornscsi_sbicintr() 1954 sbic_arm_write(host, SBIC_OWNID, OWNID_EAF | host->host->this_id); acornscsi_sbicintr() 1955 sbic_arm_write(host, SBIC_CMND, CMND_RESET); acornscsi_sbicintr() 1959 sbic_arm_write(host, SBIC_CTRL, INIT_SBICDMA | CTRL_IDI); acornscsi_sbicintr() 1960 sbic_arm_write(host, SBIC_TIMEOUT, TIMEOUT_TIME); acornscsi_sbicintr() 1961 sbic_arm_write(host, SBIC_SYNCHTRANSFER, SYNCHTRANSFER_2DBA); acornscsi_sbicintr() 1962 sbic_arm_write(host, SBIC_SOURCEID, SOURCEID_ER | SOURCEID_DSP); acornscsi_sbicintr() 1963 msgqueue_flush(&host->scsi.msgs); acornscsi_sbicintr() 1967 acornscsi_disconnect_unexpected(host); acornscsi_sbicintr() 1971 switch (host->scsi.phase) { acornscsi_sbicintr() 1976 host->scsi.phase = PHASE_CONNECTED; acornscsi_sbicintr() 1977 msgqueue_flush(&host->scsi.msgs); acornscsi_sbicintr() 1978 host->dma.transferred = host->scsi.SCp.scsi_xferred; acornscsi_sbicintr() 1980 asr = sbic_arm_read(host, SBIC_ASR); acornscsi_sbicintr() 1983 ssr = sbic_arm_read(host, SBIC_SSR); acornscsi_sbicintr() 1984 ADD_STATUS(8, ssr, host->scsi.phase, 1); acornscsi_sbicintr() 1985 ADD_STATUS(host->SCpnt->device->id, ssr, host->scsi.phase, 1); acornscsi_sbicintr() 1990 acornscsi_done(host, &host->SCpnt, DID_NO_CONNECT); acornscsi_sbicintr() 1995 host->origSCpnt = host->SCpnt; acornscsi_sbicintr() 1996 host->SCpnt = NULL; acornscsi_sbicintr() 1997 msgqueue_flush(&host->scsi.msgs); acornscsi_sbicintr() 1998 acornscsi_reconnect(host); acornscsi_sbicintr() 2003 host->host->host_no, acornscsi_target(host), ssr); acornscsi_sbicintr() 2004 acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); acornscsi_sbicintr() 2005 acornscsi_abortcmd(host, host->SCpnt->tag); acornscsi_sbicintr() 2015 acornscsi_sendcommand(host); acornscsi_sbicintr() 2020 acornscsi_readstatusbyte(host); acornscsi_sbicintr() 2021 host->scsi.phase = PHASE_STATUSIN; acornscsi_sbicintr() 2027 host->scsi.phase = PHASE_MSGOUT; acornscsi_sbicintr() 2028 acornscsi_buildmessages(host); acornscsi_sbicintr() 2029 acornscsi_sendmessage(host); acornscsi_sbicintr() 2034 acornscsi_done(host, &host->SCpnt, DID_ERROR); acornscsi_sbicintr() 2039 host->host->host_no, acornscsi_target(host), ssr); acornscsi_sbicintr() 2040 acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); acornscsi_sbicintr() 2041 acornscsi_abortcmd(host, host->SCpnt->tag); acornscsi_sbicintr() 2054 acornscsi_sendcommand(host); acornscsi_sbicintr() 2060 acornscsi_readstatusbyte(host); acornscsi_sbicintr() 2061 host->scsi.phase = PHASE_STATUSIN; acornscsi_sbicintr() 2066 acornscsi_sendmessage(host); acornscsi_sbicintr() 2072 acornscsi_message(host); acornscsi_sbicintr() 2077 host->host->host_no, acornscsi_target(host), ssr); acornscsi_sbicintr() 2078 acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); acornscsi_sbicintr() 2086 if (host->scsi.SCp.sent_command != host->SCpnt->cmd_len) acornscsi_sbicintr() 2087 acornscsi_abortcmd(host, host->SCpnt->tag); acornscsi_sbicintr() 2088 acornscsi_dma_setup(host, DMA_OUT); acornscsi_sbicintr() 2089 if (!acornscsi_starttransfer(host)) acornscsi_sbicintr() 2090 acornscsi_abortcmd(host, host->SCpnt->tag); acornscsi_sbicintr() 2091 host->scsi.phase = PHASE_DATAOUT; acornscsi_sbicintr() 2096 if (host->scsi.SCp.sent_command != host->SCpnt->cmd_len) acornscsi_sbicintr() 2097 acornscsi_abortcmd(host, host->SCpnt->tag); acornscsi_sbicintr() 2098 acornscsi_dma_setup(host, DMA_IN); acornscsi_sbicintr() 2099 if (!acornscsi_starttransfer(host)) acornscsi_sbicintr() 2100 acornscsi_abortcmd(host, host->SCpnt->tag); acornscsi_sbicintr() 2101 host->scsi.phase = PHASE_DATAIN; acornscsi_sbicintr() 2106 acornscsi_readstatusbyte(host); acornscsi_sbicintr() 2107 host->scsi.phase = PHASE_STATUSIN; acornscsi_sbicintr() 2112 acornscsi_sendmessage(host); acornscsi_sbicintr() 2117 acornscsi_message(host); acornscsi_sbicintr() 2122 host->host->host_no, acornscsi_target(host), ssr); acornscsi_sbicintr() 2123 acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); acornscsi_sbicintr() 2129 host->scsi.disconnectable = 1; acornscsi_sbicintr() 2130 host->scsi.reconnected.tag = 0; acornscsi_sbicintr() 2131 host->scsi.phase = PHASE_IDLE; acornscsi_sbicintr() 2132 host->stats.disconnects += 1; acornscsi_sbicintr() 2135 host->host->host_no, acornscsi_target(host), ssr); acornscsi_sbicintr() 2136 acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); acornscsi_sbicintr() 2142 acornscsi_reconnect(host); acornscsi_sbicintr() 2145 host->host->host_no, acornscsi_target(host), ssr); acornscsi_sbicintr() 2146 acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); acornscsi_sbicintr() 2159 if (ssr != 0x8f && !acornscsi_reconnect_finish(host)) acornscsi_sbicintr() 2161 ADD_STATUS(host->SCpnt->device->id, ssr, host->scsi.phase, in_irq); acornscsi_sbicintr() 2166 acornscsi_dma_setup(host, DMA_OUT); acornscsi_sbicintr() 2167 if (!acornscsi_starttransfer(host)) acornscsi_sbicintr() 2168 acornscsi_abortcmd(host, host->SCpnt->tag); acornscsi_sbicintr() 2169 host->scsi.phase = PHASE_DATAOUT; acornscsi_sbicintr() 2175 acornscsi_dma_setup(host, DMA_IN); acornscsi_sbicintr() 2176 if (!acornscsi_starttransfer(host)) acornscsi_sbicintr() 2177 acornscsi_abortcmd(host, host->SCpnt->tag); acornscsi_sbicintr() 2178 host->scsi.phase = PHASE_DATAIN; acornscsi_sbicintr() 2183 acornscsi_sendcommand(host);/* -> PHASE_COMMAND, PHASE_COMMANDPAUSED */ acornscsi_sbicintr() 2189 acornscsi_readstatusbyte(host); acornscsi_sbicintr() 2190 host->scsi.phase = PHASE_STATUSIN; acornscsi_sbicintr() 2196 acornscsi_sendmessage(host); acornscsi_sbicintr() 2200 acornscsi_message(host); /* -> PHASE_MSGIN, PHASE_DISCONNECT */ acornscsi_sbicintr() 2205 host->host->host_no, acornscsi_target(host), ssr); acornscsi_sbicintr() 2206 acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); acornscsi_sbicintr() 2218 acornscsi_abortcmd(host, host->SCpnt->tag); acornscsi_sbicintr() 2225 host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) - acornscsi_sbicintr() 2226 acornscsi_sbic_xfcount(host); acornscsi_sbicintr() 2227 acornscsi_dma_stop(host); acornscsi_sbicintr() 2228 acornscsi_readstatusbyte(host); acornscsi_sbicintr() 2229 host->scsi.phase = PHASE_STATUSIN; acornscsi_sbicintr() 2236 host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) - acornscsi_sbicintr() 2237 acornscsi_sbic_xfcount(host); acornscsi_sbicintr() 2238 acornscsi_dma_stop(host); acornscsi_sbicintr() 2239 acornscsi_sendmessage(host); acornscsi_sbicintr() 2246 host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) - acornscsi_sbicintr() 2247 acornscsi_sbic_xfcount(host); acornscsi_sbicintr() 2248 acornscsi_dma_stop(host); acornscsi_sbicintr() 2249 acornscsi_message(host); /* -> PHASE_MSGIN, PHASE_DISCONNECT */ acornscsi_sbicintr() 2254 host->host->host_no, acornscsi_target(host), ssr); acornscsi_sbicintr() 2255 acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); acornscsi_sbicintr() 2267 acornscsi_abortcmd(host, host->SCpnt->tag); acornscsi_sbicintr() 2274 host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) - acornscsi_sbicintr() 2275 acornscsi_sbic_xfcount(host); acornscsi_sbicintr() 2276 acornscsi_dma_stop(host); acornscsi_sbicintr() 2277 acornscsi_dma_adjust(host); acornscsi_sbicintr() 2278 acornscsi_readstatusbyte(host); acornscsi_sbicintr() 2279 host->scsi.phase = PHASE_STATUSIN; acornscsi_sbicintr() 2286 host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) - acornscsi_sbicintr() 2287 acornscsi_sbic_xfcount(host); acornscsi_sbicintr() 2288 acornscsi_dma_stop(host); acornscsi_sbicintr() 2289 acornscsi_dma_adjust(host); acornscsi_sbicintr() 2290 acornscsi_sendmessage(host); acornscsi_sbicintr() 2297 host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) - acornscsi_sbicintr() 2298 acornscsi_sbic_xfcount(host); acornscsi_sbicintr() 2299 acornscsi_dma_stop(host); acornscsi_sbicintr() 2300 acornscsi_dma_adjust(host); acornscsi_sbicintr() 2301 acornscsi_message(host); /* -> PHASE_MSGIN, PHASE_DISCONNECT */ acornscsi_sbicintr() 2306 host->host->host_no, acornscsi_target(host), ssr); acornscsi_sbicintr() 2307 acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); acornscsi_sbicintr() 2316 acornscsi_message(host); acornscsi_sbicintr() 2322 acornscsi_sendmessage(host); acornscsi_sbicintr() 2327 host->host->host_no, acornscsi_target(host), ssr); acornscsi_sbicintr() 2328 acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); acornscsi_sbicintr() 2338 acornscsi_sendmessage(host); acornscsi_sbicintr() 2345 acornscsi_message(host); acornscsi_sbicintr() 2350 host->host->host_no, acornscsi_target(host)); acornscsi_sbicintr() 2351 acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); acornscsi_sbicintr() 2352 acornscsi_done(host, &host->SCpnt, DID_ERROR); acornscsi_sbicintr() 2357 host->host->host_no, acornscsi_target(host), ssr); acornscsi_sbicintr() 2358 acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); acornscsi_sbicintr() 2365 acornscsi_done(host, &host->SCpnt, DID_OK); acornscsi_sbicintr() 2370 acornscsi_sendmessage(host); acornscsi_sbicintr() 2375 host->host->host_no, acornscsi_target(host), ssr); acornscsi_sbicintr() 2376 acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); acornscsi_sbicintr() 2383 if (host->SCpnt) acornscsi_sbicintr() 2384 acornscsi_done(host, &host->SCpnt, DID_ABORT); acornscsi_sbicintr() 2386 clear_bit(host->scsi.reconnected.target * 8 + host->scsi.reconnected.lun, acornscsi_sbicintr() 2387 host->busyluns); acornscsi_sbicintr() 2388 host->scsi.phase = PHASE_IDLE; acornscsi_sbicintr() 2396 acornscsi_sendmessage(host); acornscsi_sbicintr() 2401 host->host->host_no, acornscsi_target(host), ssr); acornscsi_sbicintr() 2402 acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); acornscsi_sbicintr() 2408 host->host->host_no, acornscsi_target(host), ssr); acornscsi_sbicintr() 2409 acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); acornscsi_sbicintr() 2423 AS_Host *host = (AS_Host *)dev_id; acornscsi_intr() local 2431 iostatus = readb(host->fast + INT_REG); acornscsi_intr() 2434 acornscsi_dma_intr(host); acornscsi_intr() 2435 iostatus = readb(host->fast + INT_REG); acornscsi_intr() 2439 ret = acornscsi_sbicintr(host, in_irq); acornscsi_intr() 2446 if (host->dma.xfer_required) acornscsi_intr() 2447 acornscsi_dma_xfer(host); acornscsi_intr() 2450 ret = acornscsi_kick(host); acornscsi_intr() 2472 AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata; acornscsi_queuecmd_lck() local 2477 host->host->host_no, SCpnt); acornscsi_queuecmd_lck() 2484 host->host->host_no, '0' + SCpnt->device->id); acornscsi_queuecmd_lck() 2501 host->stats.queues += 1; acornscsi_queuecmd_lck() 2506 if (!queue_add_cmd_ordered(&host->queues.issue, SCpnt)) { acornscsi_queuecmd_lck() 2512 if (host->scsi.phase == PHASE_IDLE) acornscsi_queuecmd_lck() 2513 acornscsi_kick(host); acornscsi_queuecmd_lck() 2550 * Purpose : abort a command on this host 2554 static enum res_abort acornscsi_do_abort(AS_Host *host, struct scsi_cmnd *SCpnt) acornscsi_do_abort() argument 2558 if (queue_remove_cmd(&host->queues.issue, SCpnt)) { acornscsi_do_abort() 2569 } else if (queue_remove_cmd(&host->queues.disconnected, SCpnt)) { acornscsi_do_abort() 2581 } else if (host->SCpnt == SCpnt) { acornscsi_do_abort() 2589 switch (host->scsi.phase) { acornscsi_do_abort() 2599 if (host->scsi.disconnectable) { acornscsi_do_abort() 2600 host->scsi.disconnectable = 0; acornscsi_do_abort() 2601 host->SCpnt = NULL; acornscsi_do_abort() 2612 sbic_arm_write(host, SBIC_CMND, CMND_DISCONNECT); acornscsi_do_abort() 2613 host->SCpnt = NULL; acornscsi_do_abort() 2618 acornscsi_abortcmd(host, host->SCpnt->tag); acornscsi_do_abort() 2622 } else if (host->origSCpnt == SCpnt) { acornscsi_do_abort() 2629 host->origSCpnt = NULL; acornscsi_do_abort() 2642 * Purpose : abort a command on this host 2648 AS_Host *host = (AS_Host *) SCpnt->device->host->hostdata; acornscsi_abort() local 2651 host->stats.aborts += 1; acornscsi_abort() 2656 asr = sbic_arm_read(host, SBIC_ASR); acornscsi_abort() 2657 ssr = sbic_arm_read(host, SBIC_SSR); acornscsi_abort() 2660 print_sbic_status(asr, ssr, host->scsi.phase); acornscsi_abort() 2661 acornscsi_dumplog(host, SCpnt->device->id); acornscsi_abort() 2665 printk("scsi%d: ", host->host->host_no); acornscsi_abort() 2667 switch (acornscsi_do_abort(host, SCpnt)) { acornscsi_abort() 2678 (u8)(SCpnt->device->lun & 0x7), host->busyluns); acornscsi_abort() 2710 acornscsi_dumplog(host, SCpnt->device->id); acornscsi_abort() 2723 * Purpose : reset a command on this host/reset this host 2729 AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata; acornscsi_bus_reset() local 2732 host->stats.resets += 1; acornscsi_bus_reset() 2738 asr = sbic_arm_read(host, SBIC_ASR); acornscsi_bus_reset() 2739 ssr = sbic_arm_read(host, SBIC_SSR); acornscsi_bus_reset() 2742 print_sbic_status(asr, ssr, host->scsi.phase); acornscsi_bus_reset() 2743 acornscsi_dumplog(host, SCpnt->device->id); acornscsi_bus_reset() 2747 acornscsi_dma_stop(host); acornscsi_bus_reset() 2750 * do hard reset. This resets all devices on this host, and so we acornscsi_bus_reset() 2753 acornscsi_resetcard(host); acornscsi_bus_reset() 2755 while ((SCptr = queue_remove(&host->queues.disconnected)) != NULL) acornscsi_bus_reset() 2766 * Function: char *acornscsi_info(struct Scsi_Host *host) 2768 * Params : host - host to give information on 2772 char *acornscsi_info(struct Scsi_Host *host) acornscsi_info() argument 2788 , host->hostt->name, host->io_port, host->irq, acornscsi_info() 2797 AS_Host *host; acornscsi_show_info() local 2799 host = (AS_Host *)instance->hostdata; acornscsi_show_info() 2814 host->base + SBIC_REGIDX, host->scsi.irq); acornscsi_show_info() 2817 host->base + DMAC_OFFSET, host->scsi.irq); acornscsi_show_info() 2826 host->stats.queues, host->stats.removes, acornscsi_show_info() 2827 host->stats.fins, host->stats.reads, acornscsi_show_info() 2828 host->stats.writes, host->stats.miscs, acornscsi_show_info() 2829 host->stats.disconnects, host->stats.aborts, acornscsi_show_info() 2830 host->stats.resets); acornscsi_show_info() 2836 statptr = host->status_ptr[devidx] - 10; acornscsi_show_info() 2841 prev = host->status[devidx][statptr].when; acornscsi_show_info() 2843 for (; statptr != host->status_ptr[devidx]; statptr = (statptr + 1) & (STATUS_BUFFER_SIZE - 1)) { acornscsi_show_info() 2844 if (host->status[devidx][statptr].when) { acornscsi_show_info() 2846 host->status[devidx][statptr].irq ? '-' : ' ', acornscsi_show_info() 2847 host->status[devidx][statptr].ph, acornscsi_show_info() 2848 host->status[devidx][statptr].ssr, acornscsi_show_info() 2849 (host->status[devidx][statptr].when - prev) < 100 ? acornscsi_show_info() 2850 (host->status[devidx][statptr].when - prev) : 99); acornscsi_show_info() 2851 prev = host->status[devidx][statptr].when; acornscsi_show_info() 2868 if (host->device[scd->id].sync_xfer & 15) shost_for_each_device() 2870 host->device[scd->id].sync_xfer & 15, shost_for_each_device() 2871 acornscsi_getperiod(host->device[scd->id].sync_xfer)); shost_for_each_device() 2897 struct Scsi_Host *host; acornscsi_probe() local 2905 host = scsi_host_alloc(&acornscsi_template, sizeof(AS_Host)); acornscsi_probe() 2906 if (!host) { acornscsi_probe() 2911 ashost = (AS_Host *)host->hostdata; acornscsi_probe() 2918 host->irq = ec->irq; acornscsi_probe() 2919 ashost->host = host; acornscsi_probe() 2920 ashost->scsi.irq = host->irq; acornscsi_probe() 2925 ret = request_irq(host->irq, acornscsi_intr, 0, "acornscsi", ashost); acornscsi_probe() 2928 host->host_no, ashost->scsi.irq, ret); acornscsi_probe() 2939 ret = scsi_add_host(host, &ec->dev); acornscsi_probe() 2943 scsi_scan_host(host); acornscsi_probe() 2947 free_irq(host->irq, ashost); acornscsi_probe() 2954 scsi_host_put(host); acornscsi_probe() 2963 struct Scsi_Host *host = ecard_get_drvdata(ec); acornscsi_remove() local 2964 AS_Host *ashost = (AS_Host *)host->hostdata; acornscsi_remove() 2967 scsi_remove_host(host); acornscsi_remove() 2974 free_irq(host->irq, ashost); acornscsi_remove() 2981 scsi_host_put(host); acornscsi_remove()
|
H A D | cumana_1.c | 22 #define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata) 48 NCR5380_pwrite(struct Scsi_Host *host, unsigned char *addr, int len) NCR5380_pwrite() argument 51 void __iomem *dma = priv(host)->dma + 0x2000; NCR5380_pwrite() 55 writeb(0x02, priv(host)->base + CTRL); NCR5380_pwrite() 61 status = readb(priv(host)->base + STAT); NCR5380_pwrite() 80 writeb(0x12, priv(host)->base + CTRL); NCR5380_pwrite() 85 status = readb(priv(host)->base + STAT); NCR5380_pwrite() 95 status = readb(priv(host)->base + STAT); NCR5380_pwrite() 106 writeb(priv(host)->ctrl | 0x40, priv(host)->base + CTRL); NCR5380_pwrite() 111 NCR5380_pread(struct Scsi_Host *host, unsigned char *addr, int len) NCR5380_pread() argument 114 void __iomem *dma = priv(host)->dma + 0x2000; NCR5380_pread() 118 writeb(0x00, priv(host)->base + CTRL); NCR5380_pread() 123 status = readb(priv(host)->base + STAT); NCR5380_pread() 142 writeb(0x10, priv(host)->base + CTRL); NCR5380_pread() 147 status = readb(priv(host)->base + STAT); NCR5380_pread() 157 status = readb(priv(host)->base + STAT); NCR5380_pread() 168 writeb(priv(host)->ctrl | 0x40, priv(host)->base + CTRL); NCR5380_pread() 172 static unsigned char cumanascsi_read(struct Scsi_Host *host, unsigned int reg) cumanascsi_read() argument 174 void __iomem *base = priv(host)->base; cumanascsi_read() 181 priv(host)->ctrl = 0x40; cumanascsi_read() 187 static void cumanascsi_write(struct Scsi_Host *host, unsigned int reg, unsigned int value) cumanascsi_write() argument 189 void __iomem *base = priv(host)->base; cumanascsi_write() 195 priv(host)->ctrl = 0x40; cumanascsi_write() 219 struct Scsi_Host *host; cumanascsi1_probe() local 226 host = scsi_host_alloc(&cumanascsi_template, sizeof(struct NCR5380_hostdata)); cumanascsi1_probe() 227 if (!host) { cumanascsi1_probe() 232 priv(host)->base = ioremap(ecard_resource_start(ec, ECARD_RES_IOCSLOW), cumanascsi1_probe() 234 priv(host)->dma = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC), cumanascsi1_probe() 236 if (!priv(host)->base || !priv(host)->dma) { cumanascsi1_probe() 241 host->irq = ec->irq; cumanascsi1_probe() 243 NCR5380_init(host, 0); cumanascsi1_probe() 245 priv(host)->ctrl = 0; cumanascsi1_probe() 246 writeb(0, priv(host)->base + CTRL); cumanascsi1_probe() 248 host->n_io_port = 255; cumanascsi1_probe() 249 if (!(request_region(host->io_port, host->n_io_port, "CumanaSCSI-1"))) { cumanascsi1_probe() 254 ret = request_irq(host->irq, cumanascsi_intr, 0, cumanascsi1_probe() 255 "CumanaSCSI-1", host); cumanascsi1_probe() 258 host->host_no, host->irq, ret); cumanascsi1_probe() 262 ret = scsi_add_host(host, &ec->dev); cumanascsi1_probe() 266 scsi_scan_host(host); cumanascsi1_probe() 270 free_irq(host->irq, host); cumanascsi1_probe() 272 iounmap(priv(host)->base); cumanascsi1_probe() 273 iounmap(priv(host)->dma); cumanascsi1_probe() 274 scsi_host_put(host); cumanascsi1_probe() 283 struct Scsi_Host *host = ecard_get_drvdata(ec); cumanascsi1_remove() local 287 scsi_remove_host(host); cumanascsi1_remove() 288 free_irq(host->irq, host); cumanascsi1_remove() 289 NCR5380_exit(host); cumanascsi1_remove() 290 iounmap(priv(host)->base); cumanascsi1_remove() 291 iounmap(priv(host)->dma); cumanascsi1_remove() 292 scsi_host_put(host); cumanascsi1_remove()
|
H A D | oak.c | 22 #define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata) 24 #define NCR5380_setup(host) _base = priv(host)->base 122 struct Scsi_Host *host; oakscsi_probe() local 129 host = scsi_host_alloc(&oakscsi_template, sizeof(struct NCR5380_hostdata)); oakscsi_probe() 130 if (!host) { oakscsi_probe() 135 priv(host)->base = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC), oakscsi_probe() 137 if (!priv(host)->base) { oakscsi_probe() 142 host->irq = NO_IRQ; oakscsi_probe() 143 host->n_io_port = 255; oakscsi_probe() 145 NCR5380_init(host, 0); oakscsi_probe() 147 ret = scsi_add_host(host, &ec->dev); oakscsi_probe() 151 scsi_scan_host(host); oakscsi_probe() 155 iounmap(priv(host)->base); oakscsi_probe() 157 scsi_host_put(host); oakscsi_probe() 166 struct Scsi_Host *host = ecard_get_drvdata(ec); oakscsi_remove() local 169 scsi_remove_host(host); oakscsi_remove() 171 NCR5380_exit(host); oakscsi_remove() 172 iounmap(priv(host)->base); oakscsi_remove() 173 scsi_host_put(host); oakscsi_remove()
|
H A D | powertec.c | 96 /* Prototype: void powertecscsi_terminator_ctl(host, on_off) 98 * Params : host - card to turn on/off 102 powertecscsi_terminator_ctl(struct Scsi_Host *host, int on_off) powertecscsi_terminator_ctl() argument 104 struct powertec_info *info = (struct powertec_info *)host->hostdata; powertecscsi_terminator_ctl() 122 /* Prototype: fasdmatype_t powertecscsi_dma_setup(host, SCpnt, direction, min_type) 124 * Params : host - host 131 powertecscsi_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp, powertecscsi_dma_setup() argument 134 struct powertec_info *info = (struct powertec_info *)host->hostdata; powertecscsi_dma_setup() 135 struct device *dev = scsi_get_device(host); powertecscsi_dma_setup() 167 /* Prototype: int powertecscsi_dma_stop(host, SCpnt) 169 * Params : host - host 173 powertecscsi_dma_stop(struct Scsi_Host *host, struct scsi_pointer *SCp) powertecscsi_dma_stop() argument 175 struct powertec_info *info = (struct powertec_info *)host->hostdata; powertecscsi_dma_stop() 180 /* Prototype: const char *powertecscsi_info(struct Scsi_Host * host) 182 * Params : host - driver host structure to return info for. 185 const char *powertecscsi_info(struct Scsi_Host *host) powertecscsi_info() argument 187 struct powertec_info *info = (struct powertec_info *)host->hostdata; powertecscsi_info() 191 host->hostt->name, info->info.scsi.type, info->ec->slot_no, powertecscsi_info() 197 /* Prototype: int powertecscsi_set_proc_info(struct Scsi_Host *host, char *buffer, int length) 199 * Params : host - host to setup 205 powertecscsi_set_proc_info(struct Scsi_Host *host, char *buffer, int length) powertecscsi_set_proc_info() argument 215 powertecscsi_terminator_ctl(host, 1); powertecscsi_set_proc_info() 217 powertecscsi_terminator_ctl(host, 0); powertecscsi_set_proc_info() 240 static int powertecscsi_show_info(struct seq_file *m, struct Scsi_Host *host) powertecscsi_show_info() argument 244 info = (struct powertec_info *)host->hostdata; powertecscsi_show_info() 259 struct Scsi_Host *host = ecard_get_drvdata(ec); powertecscsi_show_term() local 260 struct powertec_info *info = (struct powertec_info *)host->hostdata; powertecscsi_show_term() 269 struct Scsi_Host *host = ecard_get_drvdata(ec); powertecscsi_store_term() local 272 powertecscsi_terminator_ctl(host, buf[0] != '0'); powertecscsi_store_term() 304 struct Scsi_Host *host; powertecscsi_probe() local 319 host = scsi_host_alloc(&powertecscsi_template, powertecscsi_probe() 321 if (!host) { powertecscsi_probe() 326 ecard_set_drvdata(ec, host); powertecscsi_probe() 328 info = (struct powertec_info *)host->hostdata; powertecscsi_probe() 330 powertecscsi_terminator_ctl(host, term[ec->slot_no]); powertecscsi_probe() 356 ret = fas216_init(host); powertecscsi_probe() 364 host->host_no, ec->irq, ret); powertecscsi_probe() 371 host->host_no, info->info.scsi.dma); powertecscsi_probe() 379 ret = fas216_add(host, &ec->dev); powertecscsi_probe() 385 free_irq(ec->irq, host); powertecscsi_probe() 388 fas216_release(host); powertecscsi_probe() 392 scsi_host_put(host); powertecscsi_probe() 403 struct Scsi_Host *host = ecard_get_drvdata(ec); powertecscsi_remove() local 404 struct powertec_info *info = (struct powertec_info *)host->hostdata; powertecscsi_remove() 407 fas216_remove(host); powertecscsi_remove() 415 fas216_release(host); powertecscsi_remove() 416 scsi_host_put(host); powertecscsi_remove()
|
H A D | arxescsi.c | 57 * Function: int arxescsi_dma_setup(host, SCpnt, direction, min_type) 59 * Params : host - host 66 arxescsi_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp, arxescsi_dma_setup() argument 104 * Function: int arxescsi_dma_pseudo(host, SCpnt, direction, transfer) 106 * Params : host - host 112 arxescsi_dma_pseudo(struct Scsi_Host *host, struct scsi_pointer *SCp, arxescsi_dma_pseudo() argument 115 struct arxescsi_info *info = (struct arxescsi_info *)host->hostdata; arxescsi_dma_pseudo() 193 * Function: int arxescsi_dma_stop(host, SCpnt) 195 * Params : host - host 198 static void arxescsi_dma_stop(struct Scsi_Host *host, struct scsi_pointer *SCp) arxescsi_dma_stop() argument 206 * Function: const char *arxescsi_info(struct Scsi_Host * host) 208 * Params : host - driver host structure to return info for. 211 static const char *arxescsi_info(struct Scsi_Host *host) arxescsi_info() argument 213 struct arxescsi_info *info = (struct arxescsi_info *)host->hostdata; arxescsi_info() 217 host->hostt->name, info->info.scsi.type, info->ec->slot_no, arxescsi_info() 224 arxescsi_show_info(struct seq_file *m, struct Scsi_Host *host) arxescsi_show_info() argument 227 info = (struct arxescsi_info *)host->hostdata; arxescsi_show_info() 255 struct Scsi_Host *host; arxescsi_probe() local 270 host = scsi_host_alloc(&arxescsi_template, sizeof(struct arxescsi_info)); arxescsi_probe() 271 if (!host) { arxescsi_probe() 276 info = (struct arxescsi_info *)host->hostdata; arxescsi_probe() 299 ret = fas216_init(host); arxescsi_probe() 303 ret = fas216_add(host, &ec->dev); arxescsi_probe() 307 fas216_release(host); arxescsi_probe() 309 scsi_host_put(host); arxescsi_probe() 318 struct Scsi_Host *host = ecard_get_drvdata(ec); arxescsi_remove() local 321 fas216_remove(host); arxescsi_remove() 323 fas216_release(host); arxescsi_remove() 324 scsi_host_put(host); arxescsi_remove()
|
H A D | eesox.c | 115 /* Prototype: void eesoxscsi_terminator_ctl(*host, on_off) 117 * Params : host - card to turn on/off 121 eesoxscsi_terminator_ctl(struct Scsi_Host *host, int on_off) eesoxscsi_terminator_ctl() argument 123 struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; eesoxscsi_terminator_ctl() 126 spin_lock_irqsave(host->host_lock, flags); eesoxscsi_terminator_ctl() 133 spin_unlock_irqrestore(host->host_lock, flags); eesoxscsi_terminator_ctl() 149 /* Prototype: fasdmatype_t eesoxscsi_dma_setup(host, SCpnt, direction, min_type) 151 * Params : host - host 158 eesoxscsi_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp, eesoxscsi_dma_setup() argument 161 struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; eesoxscsi_dma_setup() 162 struct device *dev = scsi_get_device(host); eesoxscsi_dma_setup() 353 eesoxscsi_dma_pseudo(struct Scsi_Host *host, struct scsi_pointer *SCp, eesoxscsi_dma_pseudo() argument 356 struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; eesoxscsi_dma_pseudo() 364 /* Prototype: int eesoxscsi_dma_stop(host, SCpnt) 366 * Params : host - host 370 eesoxscsi_dma_stop(struct Scsi_Host *host, struct scsi_pointer *SCp) eesoxscsi_dma_stop() argument 372 struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; eesoxscsi_dma_stop() 377 /* Prototype: const char *eesoxscsi_info(struct Scsi_Host * host) 379 * Params : host - driver host structure to return info for. 382 const char *eesoxscsi_info(struct Scsi_Host *host) eesoxscsi_info() argument 384 struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; eesoxscsi_info() 388 host->hostt->name, info->info.scsi.type, info->ec->slot_no, eesoxscsi_info() 394 /* Prototype: int eesoxscsi_set_proc_info(struct Scsi_Host *host, char *buffer, int length) 396 * Params : host - host to setup 402 eesoxscsi_set_proc_info(struct Scsi_Host *host, char *buffer, int length) eesoxscsi_set_proc_info() argument 412 eesoxscsi_terminator_ctl(host, 1); eesoxscsi_set_proc_info() 414 eesoxscsi_terminator_ctl(host, 0); eesoxscsi_set_proc_info() 425 static int eesoxscsi_show_info(struct seq_file *m, struct Scsi_Host *host) eesoxscsi_show_info() argument 429 info = (struct eesoxscsi_info *)host->hostdata; eesoxscsi_show_info() 444 struct Scsi_Host *host = ecard_get_drvdata(ec); eesoxscsi_show_term() local 445 struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; eesoxscsi_show_term() 453 struct Scsi_Host *host = ecard_get_drvdata(ec); eesoxscsi_store_term() local 454 struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; eesoxscsi_store_term() 458 spin_lock_irqsave(host->host_lock, flags); eesoxscsi_store_term() 465 spin_unlock_irqrestore(host->host_lock, flags); eesoxscsi_store_term() 496 struct Scsi_Host *host; eesoxscsi_probe() local 511 host = scsi_host_alloc(&eesox_template, eesoxscsi_probe() 513 if (!host) { eesoxscsi_probe() 518 ecard_set_drvdata(ec, host); eesoxscsi_probe() 520 info = (struct eesoxscsi_info *)host->hostdata; eesoxscsi_probe() 550 ret = fas216_init(host); eesoxscsi_probe() 557 host->host_no, ec->irq, ret); eesoxscsi_probe() 564 host->host_no, info->info.scsi.dma); eesoxscsi_probe() 573 ret = fas216_add(host, &ec->dev); eesoxscsi_probe() 579 free_irq(ec->irq, host); eesoxscsi_probe() 582 fas216_remove(host); eesoxscsi_probe() 586 scsi_host_put(host); eesoxscsi_probe() 597 struct Scsi_Host *host = ecard_get_drvdata(ec); eesoxscsi_remove() local 598 struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; eesoxscsi_remove() 601 fas216_remove(host); eesoxscsi_remove() 609 fas216_release(host); eesoxscsi_remove() 610 scsi_host_put(host); eesoxscsi_remove()
|
H A D | cumana_2.c | 116 /* Prototype: void cumanascsi_2_terminator_ctl(host, on_off) 118 * Params : host - card to turn on/off 122 cumanascsi_2_terminator_ctl(struct Scsi_Host *host, int on_off) cumanascsi_2_terminator_ctl() argument 124 struct cumanascsi2_info *info = (struct cumanascsi2_info *)host->hostdata; cumanascsi_2_terminator_ctl() 148 /* Prototype: fasdmatype_t cumanascsi_2_dma_setup(host, SCpnt, direction, min_type) 150 * Params : host - host 157 cumanascsi_2_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp, cumanascsi_2_dma_setup() argument 160 struct cumanascsi2_info *info = (struct cumanascsi2_info *)host->hostdata; cumanascsi_2_dma_setup() 161 struct device *dev = scsi_get_device(host); cumanascsi_2_dma_setup() 201 * Prototype: void cumanascsi_2_dma_pseudo(host, SCpnt, direction, transfer) 203 * Params : host - host 209 cumanascsi_2_dma_pseudo(struct Scsi_Host *host, struct scsi_pointer *SCp, cumanascsi_2_dma_pseudo() argument 212 struct cumanascsi2_info *info = (struct cumanascsi2_info *)host->hostdata; cumanascsi_2_dma_pseudo() 277 /* Prototype: int cumanascsi_2_dma_stop(host, SCpnt) 279 * Params : host - host 283 cumanascsi_2_dma_stop(struct Scsi_Host *host, struct scsi_pointer *SCp) cumanascsi_2_dma_stop() argument 285 struct cumanascsi2_info *info = (struct cumanascsi2_info *)host->hostdata; cumanascsi_2_dma_stop() 292 /* Prototype: const char *cumanascsi_2_info(struct Scsi_Host * host) 294 * Params : host - driver host structure to return info for. 297 const char *cumanascsi_2_info(struct Scsi_Host *host) cumanascsi_2_info() argument 299 struct cumanascsi2_info *info = (struct cumanascsi2_info *)host->hostdata; cumanascsi_2_info() 303 host->hostt->name, info->info.scsi.type, info->ec->slot_no, cumanascsi_2_info() 309 /* Prototype: int cumanascsi_2_set_proc_info(struct Scsi_Host *host, char *buffer, int length) 311 * Params : host - host to setup 317 cumanascsi_2_set_proc_info(struct Scsi_Host *host, char *buffer, int length) cumanascsi_2_set_proc_info() argument 327 cumanascsi_2_terminator_ctl(host, 1); cumanascsi_2_set_proc_info() 329 cumanascsi_2_terminator_ctl(host, 0); cumanascsi_2_set_proc_info() 340 static int cumanascsi_2_show_info(struct seq_file *m, struct Scsi_Host *host) cumanascsi_2_show_info() argument 343 info = (struct cumanascsi2_info *)host->hostdata; cumanascsi_2_show_info() 378 struct Scsi_Host *host; cumanascsi2_probe() local 393 host = scsi_host_alloc(&cumanascsi2_template, cumanascsi2_probe() 395 if (!host) { cumanascsi2_probe() 400 ecard_set_drvdata(ec, host); cumanascsi2_probe() 402 info = (struct cumanascsi2_info *)host->hostdata; cumanascsi2_probe() 406 cumanascsi_2_terminator_ctl(host, term[ec->slot_no]); cumanascsi2_probe() 429 ret = fas216_init(host); cumanascsi2_probe() 437 host->host_no, ec->irq, ret); cumanascsi2_probe() 444 host->host_no, info->info.scsi.dma); cumanascsi2_probe() 452 ret = fas216_add(host, &ec->dev); cumanascsi2_probe() 458 free_irq(ec->irq, host); cumanascsi2_probe() 461 fas216_release(host); cumanascsi2_probe() 464 scsi_host_put(host); cumanascsi2_probe() 475 struct Scsi_Host *host = ecard_get_drvdata(ec); cumanascsi2_remove() local 476 struct cumanascsi2_info *info = (struct cumanascsi2_info *)host->hostdata; cumanascsi2_remove() 479 fas216_remove(host); cumanascsi2_remove() 485 fas216_release(host); cumanascsi2_remove() 486 scsi_host_put(host); cumanascsi2_remove()
|
/linux-4.1.27/drivers/scsi/ |
H A D | initio.c | 28 * This is the Linux low-level SCSI driver for Initio INI-9X00U/UW SCSI host 147 static struct scsi_ctrl_blk *initio_find_busy_scb(struct initio_host * host, u16 tarlun); 148 static struct scsi_ctrl_blk *initio_find_done_scb(struct initio_host * host); 150 static int tulip_main(struct initio_host * host); 152 static int initio_next_state(struct initio_host * host); 153 static int initio_state_1(struct initio_host * host); 154 static int initio_state_2(struct initio_host * host); 155 static int initio_state_3(struct initio_host * host); 156 static int initio_state_4(struct initio_host * host); 157 static int initio_state_5(struct initio_host * host); 158 static int initio_state_6(struct initio_host * host); 159 static int initio_state_7(struct initio_host * host); 160 static int initio_xfer_data_in(struct initio_host * host); 161 static int initio_xfer_data_out(struct initio_host * host); 162 static int initio_xpad_in(struct initio_host * host); 163 static int initio_xpad_out(struct initio_host * host); 164 static int initio_status_msg(struct initio_host * host); 166 static int initio_msgin(struct initio_host * host); 167 static int initio_msgin_sync(struct initio_host * host); 168 static int initio_msgin_accept(struct initio_host * host); 169 static int initio_msgout_reject(struct initio_host * host); 170 static int initio_msgin_extend(struct initio_host * host); 172 static int initio_msgout_ide(struct initio_host * host); 173 static int initio_msgout_abort_targ(struct initio_host * host); 174 static int initio_msgout_abort_tag(struct initio_host * host); 176 static int initio_bus_device_reset(struct initio_host * host); 177 static void initio_select_atn(struct initio_host * host, struct scsi_ctrl_blk * scb); 178 static void initio_select_atn3(struct initio_host * host, struct scsi_ctrl_blk * scb); 179 static void initio_select_atn_stop(struct initio_host * host, struct scsi_ctrl_blk * scb); 180 static int int_initio_busfree(struct initio_host * host); 181 static int int_initio_scsi_rst(struct initio_host * host); 182 static int int_initio_bad_seq(struct initio_host * host); 183 static int int_initio_resel(struct initio_host * host); 184 static int initio_sync_done(struct initio_host * host); 185 static int wdtr_done(struct initio_host * host); 186 static int wait_tulip(struct initio_host * host); 187 static int initio_wait_done_disc(struct initio_host * host); 188 static int initio_wait_disc(struct initio_host * host); 189 static void tulip_scsi(struct initio_host * host); 190 static int initio_post_scsi_rst(struct initio_host * host); 504 * Retrieve the host adapter configuration data from E2Prom. If the 532 * @host: InitIO we are stopping 537 static void initio_stop_bm(struct initio_host * host) initio_stop_bm() argument 540 if (inb(host->addr + TUL_XStatus) & XPEND) { /* if DMA xfer is pending, abort DMA xfer */ initio_stop_bm() 541 outb(TAX_X_ABT | TAX_X_CLR_FIFO, host->addr + TUL_XCmd); initio_stop_bm() 543 while ((inb(host->addr + TUL_Int) & XABT) == 0) initio_stop_bm() 546 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); initio_stop_bm() 550 * initio_reset_scsi - Reset SCSI host controller 551 * @host: InitIO host to reset 557 static int initio_reset_scsi(struct initio_host * host, int seconds) initio_reset_scsi() argument 559 outb(TSC_RST_BUS, host->addr + TUL_SCtrl0); initio_reset_scsi() 561 while (!((host->jsint = inb(host->addr + TUL_SInt)) & TSS_SCSIRST_INT)) initio_reset_scsi() 565 outb(0, host->addr + TUL_SSignal); initio_reset_scsi() 572 inb(host->addr + TUL_SInt); initio_reset_scsi() 577 * initio_init - set up an InitIO host adapter 578 * @host: InitIO host adapter 582 * Set up the host adapter and devices according to the configuration 589 static void initio_init(struct initio_host * host, u8 *bios_addr) initio_init() argument 596 initio_read_eeprom(host->addr); initio_init() 598 host->max_tar = 8; initio_init() 600 host->max_tar = 16; initio_init() 602 host->config = i91unvramp->NVM_SCSIInfo[0].NVM_ChConfig1; initio_init() 604 host->scsi_id = i91unvramp->NVM_SCSIInfo[0].NVM_ChSCSIID; initio_init() 605 host->idmask = ~(1 << host->scsi_id); initio_init() 609 outb(inb(host->addr + TUL_PCMD) | 0x40, host->addr + TUL_PCMD); initio_init() 613 outb(0x1F, host->addr + TUL_Mask); initio_init() 615 initio_stop_bm(host); initio_init() 617 outb(TSC_RST_CHIP, host->addr + TUL_SCtrl0); initio_init() 620 outb(host->scsi_id << 4, host->addr + TUL_SScsiId); initio_init() 624 if (host->config & HCC_EN_PAR) initio_init() 625 host->sconf1 = (TSC_INITDEFAULT | TSC_EN_SCSI_PAR); initio_init() 627 host->sconf1 = (TSC_INITDEFAULT); initio_init() 628 outb(host->sconf1, host->addr + TUL_SConfig); initio_init() 631 outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); initio_init() 633 outb(0, host->addr + TUL_SPeriod); initio_init() 636 outb(153, host->addr + TUL_STimeOut); initio_init() 639 outb((host->config & (HCC_ACT_TERM1 | HCC_ACT_TERM2)), initio_init() 640 host->addr + TUL_XCtrl); initio_init() 641 outb(((host->config & HCC_AUTO_TERM) >> 4) | initio_init() 642 (inb(host->addr + TUL_GCTRL1) & 0xFE), initio_init() 643 host->addr + TUL_GCTRL1); initio_init() 648 i < host->max_tar; initio_init() 650 host->targets[i].flags = *flags & ~(TCF_SYNC_DONE | TCF_WDTR_DONE); initio_init() 651 if (host->targets[i].flags & TCF_EN_255) initio_init() 652 host->targets[i].drv_flags = TCF_DRV_255_63; initio_init() 654 host->targets[i].drv_flags = 0; initio_init() 655 host->targets[i].js_period = 0; initio_init() 656 host->targets[i].sconfig0 = host->sconf1; initio_init() 657 host->targets[i].heads = *heads++; initio_init() 658 if (host->targets[i].heads == 255) initio_init() 659 host->targets[i].drv_flags = TCF_DRV_255_63; initio_init() 661 host->targets[i].drv_flags = 0; initio_init() 662 host->targets[i].sectors = *heads++; initio_init() 663 host->targets[i].flags &= ~TCF_BUSY; initio_init() 664 host->act_tags[i] = 0; initio_init() 665 host->max_tags[i] = 0xFF; initio_init() 668 host->addr, host->pci_dev->irq, initio_init() 669 host->bios_addr, host->scsi_id); initio_init() 671 if (host->config & HCC_SCSI_RESET) { initio_init() 673 initio_reset_scsi(host, 10); initio_init() 675 outb(0x17, host->addr + TUL_SCFG1); initio_init() 676 outb(0xE9, host->addr + TUL_SIntEnable); initio_init() 681 * @host: InitIO host we are allocating for 686 static struct scsi_ctrl_blk *initio_alloc_scb(struct initio_host *host) initio_alloc_scb() argument 691 spin_lock_irqsave(&host->avail_lock, flags); initio_alloc_scb() 692 if ((scb = host->first_avail) != NULL) { initio_alloc_scb() 696 if ((host->first_avail = scb->next) == NULL) initio_alloc_scb() 697 host->last_avail = NULL; initio_alloc_scb() 701 spin_unlock_irqrestore(&host->avail_lock, flags); initio_alloc_scb() 707 * @host: InitIO host that owns the SCB 710 * Return an allocated SCB to the host free list 713 static void initio_release_scb(struct initio_host * host, struct scsi_ctrl_blk * cmnd) initio_release_scb() argument 720 spin_lock_irqsave(&(host->avail_lock), flags); initio_release_scb() 724 if (host->last_avail != NULL) { initio_release_scb() 725 host->last_avail->next = cmnd; initio_release_scb() 726 host->last_avail = cmnd; initio_release_scb() 728 host->first_avail = cmnd; initio_release_scb() 729 host->last_avail = cmnd; initio_release_scb() 731 spin_unlock_irqrestore(&(host->avail_lock), flags); initio_release_scb() 735 static void initio_append_pend_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp) initio_append_pend_scb() argument 743 if (host->last_pending != NULL) { initio_append_pend_scb() 744 host->last_pending->next = scbp; initio_append_pend_scb() 745 host->last_pending = scbp; initio_append_pend_scb() 747 host->first_pending = scbp; initio_append_pend_scb() 748 host->last_pending = scbp; initio_append_pend_scb() 753 static void initio_push_pend_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp) initio_push_pend_scb() argument 760 if ((scbp->next = host->first_pending) != NULL) { initio_push_pend_scb() 761 host->first_pending = scbp; initio_push_pend_scb() 763 host->first_pending = scbp; initio_push_pend_scb() 764 host->last_pending = scbp; initio_push_pend_scb() 768 static struct scsi_ctrl_blk *initio_find_first_pend_scb(struct initio_host * host) initio_find_first_pend_scb() argument 773 first = host->first_pending; initio_find_first_pend_scb() 778 if ((host->act_tags[first->target] == 0) && initio_find_first_pend_scb() 779 !(host->targets[first->target].flags & TCF_BUSY)) initio_find_first_pend_scb() 782 if ((host->act_tags[first->target] >= initio_find_first_pend_scb() 783 host->max_tags[first->target]) | initio_find_first_pend_scb() 784 (host->targets[first->target].flags & TCF_BUSY)) { initio_find_first_pend_scb() 795 static void initio_unlink_pend_scb(struct initio_host * host, struct scsi_ctrl_blk * scb) initio_unlink_pend_scb() argument 803 prev = tmp = host->first_pending; initio_unlink_pend_scb() 806 if (tmp == host->first_pending) { initio_unlink_pend_scb() 807 if ((host->first_pending = tmp->next) == NULL) initio_unlink_pend_scb() 808 host->last_pending = NULL; initio_unlink_pend_scb() 811 if (tmp == host->last_pending) initio_unlink_pend_scb() 812 host->last_pending = prev; initio_unlink_pend_scb() 822 static void initio_append_busy_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp) initio_append_busy_scb() argument 829 host->act_tags[scbp->target]++; initio_append_busy_scb() 831 host->targets[scbp->target].flags |= TCF_BUSY; initio_append_busy_scb() 834 if (host->last_busy != NULL) { initio_append_busy_scb() 835 host->last_busy->next = scbp; initio_append_busy_scb() 836 host->last_busy = scbp; initio_append_busy_scb() 838 host->first_busy = scbp; initio_append_busy_scb() 839 host->last_busy = scbp; initio_append_busy_scb() 844 static struct scsi_ctrl_blk *initio_pop_busy_scb(struct initio_host * host) initio_pop_busy_scb() argument 849 if ((tmp = host->first_busy) != NULL) { initio_pop_busy_scb() 850 if ((host->first_busy = tmp->next) == NULL) initio_pop_busy_scb() 851 host->last_busy = NULL; initio_pop_busy_scb() 854 host->act_tags[tmp->target]--; initio_pop_busy_scb() 856 host->targets[tmp->target].flags &= ~TCF_BUSY; initio_pop_busy_scb() 865 static void initio_unlink_busy_scb(struct initio_host * host, struct scsi_ctrl_blk * scb) initio_unlink_busy_scb() argument 873 prev = tmp = host->first_busy; initio_unlink_busy_scb() 876 if (tmp == host->first_busy) { initio_unlink_busy_scb() 877 if ((host->first_busy = tmp->next) == NULL) initio_unlink_busy_scb() 878 host->last_busy = NULL; initio_unlink_busy_scb() 881 if (tmp == host->last_busy) initio_unlink_busy_scb() 882 host->last_busy = prev; initio_unlink_busy_scb() 886 host->act_tags[tmp->target]--; initio_unlink_busy_scb() 888 host->targets[tmp->target].flags &= ~TCF_BUSY; initio_unlink_busy_scb() 897 struct scsi_ctrl_blk *initio_find_busy_scb(struct initio_host * host, u16 tarlun) initio_find_busy_scb() argument 903 prev = tmp = host->first_busy; initio_find_busy_scb() 918 static void initio_append_done_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp) initio_append_done_scb() argument 926 if (host->last_done != NULL) { initio_append_done_scb() 927 host->last_done->next = scbp; initio_append_done_scb() 928 host->last_done = scbp; initio_append_done_scb() 930 host->first_done = scbp; initio_append_done_scb() 931 host->last_done = scbp; initio_append_done_scb() 935 struct scsi_ctrl_blk *initio_find_done_scb(struct initio_host * host) initio_find_done_scb() argument 939 if ((tmp = host->first_done) != NULL) { initio_find_done_scb() 940 if ((host->first_done = tmp->next) == NULL) initio_find_done_scb() 941 host->last_done = NULL; initio_find_done_scb() 950 static int initio_abort_srb(struct initio_host * host, struct scsi_cmnd *srbp) initio_abort_srb() argument 955 spin_lock_irqsave(&host->semaph_lock, flags); initio_abort_srb() 957 if ((host->semaph == 0) && (host->active == NULL)) { initio_abort_srb() 959 outb(0x1F, host->addr + TUL_Mask); initio_abort_srb() 960 spin_unlock_irqrestore(&host->semaph_lock, flags); initio_abort_srb() 962 tulip_main(host); initio_abort_srb() 963 spin_lock_irqsave(&host->semaph_lock, flags); initio_abort_srb() 964 host->semaph = 1; initio_abort_srb() 965 outb(0x0F, host->addr + TUL_Mask); initio_abort_srb() 966 spin_unlock_irqrestore(&host->semaph_lock, flags); initio_abort_srb() 969 prev = tmp = host->first_pending; /* Check Pend queue */ initio_abort_srb() 973 if (tmp == host->active) { initio_abort_srb() 974 spin_unlock_irqrestore(&host->semaph_lock, flags); initio_abort_srb() 976 } else if (tmp == host->first_pending) { initio_abort_srb() 977 if ((host->first_pending = tmp->next) == NULL) initio_abort_srb() 978 host->last_pending = NULL; initio_abort_srb() 981 if (tmp == host->last_pending) initio_abort_srb() 982 host->last_pending = prev; initio_abort_srb() 987 (*tmp->post) ((u8 *) host, (u8 *) tmp); initio_abort_srb() 988 spin_unlock_irqrestore(&host->semaph_lock, flags); initio_abort_srb() 995 prev = tmp = host->first_busy; /* Check Busy queue */ initio_abort_srb() 998 if (tmp == host->active) { initio_abort_srb() 999 spin_unlock_irqrestore(&host->semaph_lock, flags); initio_abort_srb() 1002 spin_unlock_irqrestore(&host->semaph_lock, flags); initio_abort_srb() 1005 host->act_tags[tmp->target]--; initio_abort_srb() 1006 if (tmp == host->first_busy) { initio_abort_srb() 1007 if ((host->first_busy = tmp->next) == NULL) initio_abort_srb() 1008 host->last_busy = NULL; initio_abort_srb() 1011 if (tmp == host->last_busy) initio_abort_srb() 1012 host->last_busy = prev; initio_abort_srb() 1020 (*tmp->post) ((u8 *) host, (u8 *) tmp); initio_abort_srb() 1021 spin_unlock_irqrestore(&host->semaph_lock, flags); initio_abort_srb() 1028 spin_unlock_irqrestore(&host->semaph_lock, flags); initio_abort_srb() 1033 static int initio_bad_seq(struct initio_host * host) initio_bad_seq() argument 1037 printk("initio_bad_seg c=%d\n", host->index); initio_bad_seq() 1039 if ((scb = host->active) != NULL) { initio_bad_seq() 1040 initio_unlink_busy_scb(host, scb); initio_bad_seq() 1043 initio_append_done_scb(host, scb); initio_bad_seq() 1045 initio_stop_bm(host); initio_bad_seq() 1046 initio_reset_scsi(host, 8); /* 7/29/98 */ initio_bad_seq() 1047 return initio_post_scsi_rst(host); initio_bad_seq() 1052 static void initio_exec_scb(struct initio_host * host, struct scsi_ctrl_blk * scb) initio_exec_scb() argument 1061 spin_lock_irqsave(&host->semaph_lock, flags); initio_exec_scb() 1063 initio_append_pend_scb(host, scb); /* Append this SCB to Pending queue */ initio_exec_scb() 1066 if (host->semaph == 1) { initio_exec_scb() 1068 outb(0x1F, host->addr + TUL_Mask); initio_exec_scb() 1069 host->semaph = 0; initio_exec_scb() 1070 spin_unlock_irqrestore(&host->semaph_lock, flags); initio_exec_scb() 1072 tulip_main(host); initio_exec_scb() 1074 spin_lock_irqsave(&host->semaph_lock, flags); initio_exec_scb() 1075 host->semaph = 1; initio_exec_scb() 1076 outb(0x0F, host->addr + TUL_Mask); initio_exec_scb() 1078 spin_unlock_irqrestore(&host->semaph_lock, flags); initio_exec_scb() 1083 static int initio_isr(struct initio_host * host) initio_isr() argument 1085 if (inb(host->addr + TUL_Int) & TSS_INT_PENDING) { initio_isr() 1086 if (host->semaph == 1) { initio_isr() 1087 outb(0x1F, host->addr + TUL_Mask); initio_isr() 1089 host->semaph = 0; initio_isr() 1091 tulip_main(host); initio_isr() 1093 host->semaph = 1; initio_isr() 1094 outb(0x0F, host->addr + TUL_Mask); initio_isr() 1101 static int tulip_main(struct initio_host * host) tulip_main() argument 1106 tulip_scsi(host); /* Call tulip_scsi */ tulip_main() 1109 while ((scb = initio_find_done_scb(host)) != NULL) { /* find done entry */ tulip_main() 1111 host->max_tags[scb->target] = tulip_main() 1112 host->act_tags[scb->target] - 1; tulip_main() 1114 initio_append_pend_scb(host, scb); tulip_main() 1143 initio_push_pend_scb(host, scb); tulip_main() 1158 (*scb->post) ((u8 *) host, (u8 *) scb); tulip_main() 1162 if (inb(host->addr + TUL_SStatus0) & TSS_INT_PENDING) tulip_main() 1164 if (host->active) /* return to OS and wait for xfer_done_ISR/Selected_ISR */ tulip_main() 1167 if (initio_find_first_pend_scb(host) == NULL) tulip_main() 1173 static void tulip_scsi(struct initio_host * host) tulip_scsi() argument 1179 if ((host->jsstatus0 = inb(host->addr + TUL_SStatus0)) & TSS_INT_PENDING) { tulip_scsi() 1180 host->phase = host->jsstatus0 & TSS_PH_MASK; tulip_scsi() 1181 host->jsstatus1 = inb(host->addr + TUL_SStatus1); tulip_scsi() 1182 host->jsint = inb(host->addr + TUL_SInt); tulip_scsi() 1183 if (host->jsint & TSS_SCSIRST_INT) { /* SCSI bus reset detected */ tulip_scsi() 1184 int_initio_scsi_rst(host); tulip_scsi() 1187 if (host->jsint & TSS_RESEL_INT) { /* if selected/reselected interrupt */ tulip_scsi() 1188 if (int_initio_resel(host) == 0) tulip_scsi() 1189 initio_next_state(host); tulip_scsi() 1192 if (host->jsint & TSS_SEL_TIMEOUT) { tulip_scsi() 1193 int_initio_busfree(host); tulip_scsi() 1196 if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */ tulip_scsi() 1197 int_initio_busfree(host); /* unexpected bus free or sel timeout */ tulip_scsi() 1200 if (host->jsint & (TSS_FUNC_COMP | TSS_BUS_SERV)) { /* func complete or Bus service */ tulip_scsi() 1201 if ((scb = host->active) != NULL) tulip_scsi() 1202 initio_next_state(host); tulip_scsi() 1206 if (host->active != NULL) tulip_scsi() 1209 if ((scb = initio_find_first_pend_scb(host)) == NULL) tulip_scsi() 1213 outb((host->scsi_id << 4) | (scb->target & 0x0F), tulip_scsi() 1214 host->addr + TUL_SScsiId); tulip_scsi() 1216 active_tc = &host->targets[scb->target]; tulip_scsi() 1223 outb(active_tc->js_period, host->addr + TUL_SPeriod); tulip_scsi() 1225 initio_select_atn_stop(host, scb); tulip_scsi() 1228 initio_select_atn_stop(host, scb); tulip_scsi() 1231 initio_select_atn3(host, scb); tulip_scsi() 1233 initio_select_atn(host, scb); tulip_scsi() 1237 while (wait_tulip(host) != -1) { tulip_scsi() 1238 if (initio_next_state(host) == -1) tulip_scsi() 1243 initio_select_atn_stop(host, scb); tulip_scsi() 1246 while (wait_tulip(host) != -1) { tulip_scsi() 1247 if (initio_next_state(host) == -1) tulip_scsi() 1252 if (initio_abort_srb(host, scb->srb) != 0) { tulip_scsi() 1253 initio_unlink_pend_scb(host, scb); tulip_scsi() 1254 initio_release_scb(host, scb); tulip_scsi() 1257 initio_select_atn_stop(host, scb); tulip_scsi() 1261 initio_unlink_pend_scb(host, scb); tulip_scsi() 1263 initio_append_done_scb(host, scb); tulip_scsi() 1270 * @host: InitIO host we are processing 1278 static int initio_next_state(struct initio_host * host) initio_next_state() argument 1282 next = host->active->next_state; initio_next_state() 1286 next = initio_state_1(host); initio_next_state() 1289 next = initio_state_2(host); initio_next_state() 1292 next = initio_state_3(host); initio_next_state() 1295 next = initio_state_4(host); initio_next_state() 1298 next = initio_state_5(host); initio_next_state() 1301 next = initio_state_6(host); initio_next_state() 1304 next = initio_state_7(host); initio_next_state() 1307 return initio_bus_device_reset(host); initio_next_state() 1309 return initio_bad_seq(host); initio_next_state() 1319 * @host: InitIO host we are controlling 1324 static int initio_state_1(struct initio_host * host) initio_state_1() argument 1326 struct scsi_ctrl_blk *scb = host->active; initio_state_1() 1327 struct target_control *active_tc = host->active_tc; initio_state_1() 1333 initio_unlink_pend_scb(host, scb); initio_state_1() 1334 initio_append_busy_scb(host, scb); initio_state_1() 1336 outb(active_tc->sconfig0, host->addr + TUL_SConfig ); initio_state_1() 1338 if (host->phase == MSG_OUT) { initio_state_1() 1339 outb(TSC_EN_BUS_IN | TSC_HW_RESELECT, host->addr + TUL_SCtrl1); initio_state_1() 1340 outb(scb->ident, host->addr + TUL_SFifo); initio_state_1() 1343 outb(scb->tagmsg, host->addr + TUL_SFifo); initio_state_1() 1344 outb(scb->tagid, host->addr + TUL_SFifo); initio_state_1() 1348 outb(MSG_EXTEND, host->addr + TUL_SFifo); initio_state_1() 1349 outb(2, host->addr + TUL_SFifo); /* Extended msg length */ initio_state_1() 1350 outb(3, host->addr + TUL_SFifo); /* Sync request */ initio_state_1() 1351 outb(1, host->addr + TUL_SFifo); /* Start from 16 bits */ initio_state_1() 1354 outb(MSG_EXTEND, host->addr + TUL_SFifo); initio_state_1() 1355 outb(3, host->addr + TUL_SFifo); /* extended msg length */ initio_state_1() 1356 outb(1, host->addr + TUL_SFifo); /* sync request */ initio_state_1() 1357 outb(initio_rate_tbl[active_tc->flags & TCF_SCSI_RATE], host->addr + TUL_SFifo); initio_state_1() 1358 outb(MAX_OFFSET, host->addr + TUL_SFifo); /* REQ/ACK offset */ initio_state_1() 1360 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); initio_state_1() 1361 if (wait_tulip(host) == -1) initio_state_1() 1364 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); initio_state_1() 1365 outb((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)), host->addr + TUL_SSignal); initio_state_1() 1373 * @host: InitIO host we are controlling 1379 static int initio_state_2(struct initio_host * host) initio_state_2() argument 1381 struct scsi_ctrl_blk *scb = host->active; initio_state_2() 1382 struct target_control *active_tc = host->active_tc; initio_state_2() 1387 initio_unlink_pend_scb(host, scb); initio_state_2() 1388 initio_append_busy_scb(host, scb); initio_state_2() 1390 outb(active_tc->sconfig0, host->addr + TUL_SConfig); initio_state_2() 1392 if (host->jsstatus1 & TSS_CMD_PH_CMP) initio_state_2() 1395 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); initio_state_2() 1396 outb((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)), host->addr + TUL_SSignal); initio_state_2() 1403 * @host: InitIO host we are controlling 1408 static int initio_state_3(struct initio_host * host) initio_state_3() argument 1410 struct scsi_ctrl_blk *scb = host->active; initio_state_3() 1411 struct target_control *active_tc = host->active_tc; initio_state_3() 1418 switch (host->phase) { initio_state_3() 1421 outb(scb->cdb[i], host->addr + TUL_SFifo); initio_state_3() 1422 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); initio_state_3() 1423 if (wait_tulip(host) == -1) initio_state_3() 1425 if (host->phase == CMD_OUT) initio_state_3() 1426 return initio_bad_seq(host); initio_state_3() 1431 if (initio_msgin(host) == -1) initio_state_3() 1436 if (initio_status_msg(host) == -1) initio_state_3() 1442 outb(MSG_NOP, host->addr + TUL_SFifo); /* msg nop */ initio_state_3() 1443 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); initio_state_3() 1444 if (wait_tulip(host) == -1) initio_state_3() 1449 outb(MSG_EXTEND, host->addr + TUL_SFifo); initio_state_3() 1450 outb(3, host->addr + TUL_SFifo); /* ext. msg len */ initio_state_3() 1451 outb(1, host->addr + TUL_SFifo); /* sync request */ initio_state_3() 1452 outb(initio_rate_tbl[active_tc->flags & TCF_SCSI_RATE], host->addr + TUL_SFifo); initio_state_3() 1453 outb(MAX_OFFSET, host->addr + TUL_SFifo); /* REQ/ACK offset */ initio_state_3() 1454 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); initio_state_3() 1455 if (wait_tulip(host) == -1) initio_state_3() 1457 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); initio_state_3() 1458 outb(inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7), host->addr + TUL_SSignal); initio_state_3() 1463 return initio_bad_seq(host); initio_state_3() 1470 * @host: InitIO host we are controlling 1475 static int initio_state_4(struct initio_host * host) initio_state_4() argument 1477 struct scsi_ctrl_blk *scb = host->active; initio_state_4() 1489 switch (host->phase) { initio_state_4() 1494 if ((initio_status_msg(host)) == -1) initio_state_4() 1500 if (initio_msgin(host) == -1) initio_state_4() 1505 if (host->jsstatus0 & TSS_PAR_ERROR) { initio_state_4() 1508 if (initio_msgout_ide(host) == -1) initio_state_4() 1512 outb(MSG_NOP, host->addr + TUL_SFifo); /* msg nop */ initio_state_4() 1513 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); initio_state_4() 1514 if (wait_tulip(host) == -1) initio_state_4() 1520 return initio_xfer_data_in(host); initio_state_4() 1523 return initio_xfer_data_out(host); initio_state_4() 1526 return initio_bad_seq(host); initio_state_4() 1534 * @host: InitIO host we are controlling 1539 static int initio_state_5(struct initio_host * host) initio_state_5() argument 1541 struct scsi_ctrl_blk *scb = host->active; initio_state_5() 1548 cnt = inl(host->addr + TUL_SCnt0) & 0x0FFFFFF; initio_state_5() 1550 if (inb(host->addr + TUL_XCmd) & 0x20) { initio_state_5() 1553 if (host->jsstatus0 & TSS_PAR_ERROR) initio_state_5() 1555 if (inb(host->addr + TUL_XStatus) & XPEND) { /* DMA xfer pending, Send STOP */ initio_state_5() 1557 outb(inb(host->addr + TUL_XCtrl) | 0x80, host->addr + TUL_XCtrl); initio_state_5() 1559 while (inb(host->addr + TUL_XStatus) & XPEND) initio_state_5() 1564 if ((inb(host->addr + TUL_SStatus1) & TSS_XFER_CMP) == 0) { initio_state_5() 1565 if (host->active_tc->js_period & TSC_WIDE_SCSI) initio_state_5() 1566 cnt += (inb(host->addr + TUL_SFifoCnt) & 0x1F) << 1; initio_state_5() 1568 cnt += (inb(host->addr + TUL_SFifoCnt) & 0x1F); initio_state_5() 1570 if (inb(host->addr + TUL_XStatus) & XPEND) { /* if DMA xfer is pending, abort DMA xfer */ initio_state_5() 1571 outb(TAX_X_ABT, host->addr + TUL_XCmd); initio_state_5() 1573 while ((inb(host->addr + TUL_Int) & XABT) == 0) initio_state_5() 1576 if ((cnt == 1) && (host->phase == DATA_OUT)) { initio_state_5() 1577 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); initio_state_5() 1578 if (wait_tulip(host) == -1) initio_state_5() 1582 if ((inb(host->addr + TUL_SStatus1) & TSS_XFER_CMP) == 0) initio_state_5() 1583 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); initio_state_5() 1623 * @host: InitIO host we are controlling 1628 static int initio_state_6(struct initio_host * host) initio_state_6() argument 1630 struct scsi_ctrl_blk *scb = host->active; initio_state_6() 1636 switch (host->phase) { initio_state_6() 1638 if ((initio_status_msg(host)) == -1) initio_state_6() 1644 if ((initio_msgin(host)) == -1) initio_state_6() 1649 outb(MSG_NOP, host->addr + TUL_SFifo); /* msg nop */ initio_state_6() 1650 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); initio_state_6() 1651 if (wait_tulip(host) == -1) initio_state_6() 1656 return initio_xpad_in(host); initio_state_6() 1659 return initio_xpad_out(host); initio_state_6() 1662 return initio_bad_seq(host); initio_state_6() 1669 * @host: InitIO host we are controlling 1673 int initio_state_7(struct initio_host * host) initio_state_7() argument 1681 cnt = inb(host->addr + TUL_SFifoCnt) & 0x1F; initio_state_7() 1684 inb(host->addr + TUL_SFifo); initio_state_7() 1686 switch (host->phase) { initio_state_7() 1689 return initio_bad_seq(host); initio_state_7() 1697 * @host: InitIO host in use 1703 static int initio_xfer_data_in(struct initio_host * host) initio_xfer_data_in() argument 1705 struct scsi_ctrl_blk *scb = host->active; initio_xfer_data_in() 1710 outl(scb->buflen, host->addr + TUL_SCnt0); initio_xfer_data_in() 1711 outb(TSC_XF_DMA_IN, host->addr + TUL_SCmd); /* 7/25/95 */ initio_xfer_data_in() 1714 outl(((u32) scb->sglen) << 3, host->addr + TUL_XCntH); initio_xfer_data_in() 1715 outl(scb->bufptr, host->addr + TUL_XAddH); initio_xfer_data_in() 1716 outb(TAX_SG_IN, host->addr + TUL_XCmd); initio_xfer_data_in() 1718 outl(scb->buflen, host->addr + TUL_XCntH); initio_xfer_data_in() 1719 outl(scb->bufptr, host->addr + TUL_XAddH); initio_xfer_data_in() 1720 outb(TAX_X_IN, host->addr + TUL_XCmd); initio_xfer_data_in() 1728 * @host: InitIO host in use 1735 static int initio_xfer_data_out(struct initio_host * host) initio_xfer_data_out() argument 1737 struct scsi_ctrl_blk *scb = host->active; initio_xfer_data_out() 1742 outl(scb->buflen, host->addr + TUL_SCnt0); initio_xfer_data_out() 1743 outb(TSC_XF_DMA_OUT, host->addr + TUL_SCmd); initio_xfer_data_out() 1746 outl(((u32) scb->sglen) << 3, host->addr + TUL_XCntH); initio_xfer_data_out() 1747 outl(scb->bufptr, host->addr + TUL_XAddH); initio_xfer_data_out() 1748 outb(TAX_SG_OUT, host->addr + TUL_XCmd); initio_xfer_data_out() 1750 outl(scb->buflen, host->addr + TUL_XCntH); initio_xfer_data_out() 1751 outl(scb->bufptr, host->addr + TUL_XAddH); initio_xfer_data_out() 1752 outb(TAX_X_OUT, host->addr + TUL_XCmd); initio_xfer_data_out() 1759 int initio_xpad_in(struct initio_host * host) initio_xpad_in() argument 1761 struct scsi_ctrl_blk *scb = host->active; initio_xpad_in() 1762 struct target_control *active_tc = host->active_tc; initio_xpad_in() 1768 outl(2, host->addr + TUL_SCnt0); initio_xpad_in() 1770 outl(1, host->addr + TUL_SCnt0); initio_xpad_in() 1772 outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd); initio_xpad_in() 1773 if (wait_tulip(host) == -1) initio_xpad_in() 1775 if (host->phase != DATA_IN) { initio_xpad_in() 1776 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); initio_xpad_in() 1779 inb(host->addr + TUL_SFifo); initio_xpad_in() 1783 int initio_xpad_out(struct initio_host * host) initio_xpad_out() argument 1785 struct scsi_ctrl_blk *scb = host->active; initio_xpad_out() 1786 struct target_control *active_tc = host->active_tc; initio_xpad_out() 1792 outl(2, host->addr + TUL_SCnt0); initio_xpad_out() 1794 outl(1, host->addr + TUL_SCnt0); initio_xpad_out() 1796 outb(0, host->addr + TUL_SFifo); initio_xpad_out() 1797 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); initio_xpad_out() 1798 if ((wait_tulip(host)) == -1) initio_xpad_out() 1800 if (host->phase != DATA_OUT) { /* Disable wide CPU to allow read 16 bits */ initio_xpad_out() 1801 outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); initio_xpad_out() 1802 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); initio_xpad_out() 1808 int initio_status_msg(struct initio_host * host) initio_status_msg() argument 1810 struct scsi_ctrl_blk *scb = host->active; initio_status_msg() 1813 outb(TSC_CMD_COMP, host->addr + TUL_SCmd); initio_status_msg() 1814 if (wait_tulip(host) == -1) initio_status_msg() 1818 scb->tastat = inb(host->addr + TUL_SFifo); initio_status_msg() 1820 if (host->phase == MSG_OUT) { initio_status_msg() 1821 if (host->jsstatus0 & TSS_PAR_ERROR) initio_status_msg() 1822 outb(MSG_PARITY, host->addr + TUL_SFifo); initio_status_msg() 1824 outb(MSG_NOP, host->addr + TUL_SFifo); initio_status_msg() 1825 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); initio_status_msg() 1826 return wait_tulip(host); initio_status_msg() 1828 if (host->phase == MSG_IN) { initio_status_msg() 1829 msg = inb(host->addr + TUL_SFifo); initio_status_msg() 1830 if (host->jsstatus0 & TSS_PAR_ERROR) { /* Parity error */ initio_status_msg() 1831 if ((initio_msgin_accept(host)) == -1) initio_status_msg() 1833 if (host->phase != MSG_OUT) initio_status_msg() 1834 return initio_bad_seq(host); initio_status_msg() 1835 outb(MSG_PARITY, host->addr + TUL_SFifo); initio_status_msg() 1836 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); initio_status_msg() 1837 return wait_tulip(host); initio_status_msg() 1842 return initio_bad_seq(host); initio_status_msg() 1843 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); initio_status_msg() 1844 outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd); initio_status_msg() 1845 return initio_wait_done_disc(host); initio_status_msg() 1850 return initio_msgin_accept(host); initio_status_msg() 1853 return initio_bad_seq(host); initio_status_msg() 1858 int int_initio_busfree(struct initio_host * host) int_initio_busfree() argument 1860 struct scsi_ctrl_blk *scb = host->active; int_initio_busfree() 1864 initio_unlink_pend_scb(host, scb); int_initio_busfree() 1866 initio_append_done_scb(host, scb); int_initio_busfree() 1868 initio_unlink_busy_scb(host, scb); int_initio_busfree() 1870 initio_append_done_scb(host, scb); int_initio_busfree() 1872 host->active = NULL; int_initio_busfree() 1873 host->active_tc = NULL; int_initio_busfree() 1875 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */ int_initio_busfree() 1876 outb(TSC_INITDEFAULT, host->addr + TUL_SConfig); int_initio_busfree() 1877 outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */ int_initio_busfree() 1884 * @host: Host seeing the reset 1892 static int int_initio_scsi_rst(struct initio_host * host) int_initio_scsi_rst() argument 1898 if (inb(host->addr + TUL_XStatus) & 0x01) { int_initio_scsi_rst() 1899 outb(TAX_X_ABT | TAX_X_CLR_FIFO, host->addr + TUL_XCmd); int_initio_scsi_rst() 1901 while ((inb(host->addr + TUL_Int) & 0x04) == 0) int_initio_scsi_rst() 1903 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); int_initio_scsi_rst() 1906 while ((scb = initio_pop_busy_scb(host)) != NULL) { int_initio_scsi_rst() 1908 initio_append_done_scb(host, scb); int_initio_scsi_rst() 1910 host->active = NULL; int_initio_scsi_rst() 1911 host->active_tc = NULL; int_initio_scsi_rst() 1914 for (i = 0; i < host->max_tar; i++) int_initio_scsi_rst() 1915 host->targets[i].flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE); int_initio_scsi_rst() 1921 * @host: InitIO host adapter 1928 int int_initio_resel(struct initio_host * host) int_initio_resel() argument 1935 if ((scb = host->active) != NULL) { int_initio_resel() 1939 host->active = NULL; int_initio_resel() 1942 tar = inb(host->addr + TUL_SBusId); int_initio_resel() 1944 lun = inb(host->addr + TUL_SIdent) & 0x0F; int_initio_resel() 1946 active_tc = &host->targets[tar]; int_initio_resel() 1947 host->active_tc = active_tc; int_initio_resel() 1948 outb(active_tc->sconfig0, host->addr + TUL_SConfig); int_initio_resel() 1949 outb(active_tc->js_period, host->addr + TUL_SPeriod); int_initio_resel() 1953 if ((initio_msgin_accept(host)) == -1) int_initio_resel() 1955 if (host->phase != MSG_IN) int_initio_resel() 1957 outl(1, host->addr + TUL_SCnt0); int_initio_resel() 1958 outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd); int_initio_resel() 1959 if (wait_tulip(host) == -1) int_initio_resel() 1961 msg = inb(host->addr + TUL_SFifo); /* Read Tag Message */ int_initio_resel() 1966 if (initio_msgin_accept(host) == -1) int_initio_resel() 1969 if (host->phase != MSG_IN) int_initio_resel() 1972 outl(1, host->addr + TUL_SCnt0); int_initio_resel() 1973 outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd); int_initio_resel() 1974 if (wait_tulip(host) == -1) int_initio_resel() 1976 tag = inb(host->addr + TUL_SFifo); /* Read Tag ID */ int_initio_resel() 1977 scb = host->scb + tag; int_initio_resel() 1979 return initio_msgout_abort_tag(host); int_initio_resel() 1982 return initio_msgout_abort_tag(host); int_initio_resel() 1984 host->active = scb; int_initio_resel() 1985 if ((initio_msgin_accept(host)) == -1) int_initio_resel() 1989 if ((scb = initio_find_busy_scb(host, tar | (lun << 8))) == NULL) { int_initio_resel() 1990 return initio_msgout_abort_targ(host); int_initio_resel() 1992 host->active = scb; int_initio_resel() 1994 if ((initio_msgin_accept(host)) == -1) int_initio_resel() 2003 * @host: InitIO host flagging event 2005 * We have ended up out of phase somehow. Reset the host controller 2009 static int int_initio_bad_seq(struct initio_host * host) int_initio_bad_seq() argument 2014 initio_reset_scsi(host, 10); int_initio_bad_seq() 2016 while ((scb = initio_pop_busy_scb(host)) != NULL) { int_initio_bad_seq() 2018 initio_append_done_scb(host, scb); int_initio_bad_seq() 2020 for (i = 0; i < host->max_tar; i++) int_initio_bad_seq() 2021 host->targets[i].flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE); int_initio_bad_seq() 2028 * @host: InitIO host 2034 static int initio_msgout_abort_targ(struct initio_host * host) initio_msgout_abort_targ() argument 2037 outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal); initio_msgout_abort_targ() 2038 if (initio_msgin_accept(host) == -1) initio_msgout_abort_targ() 2040 if (host->phase != MSG_OUT) initio_msgout_abort_targ() 2041 return initio_bad_seq(host); initio_msgout_abort_targ() 2043 outb(MSG_ABORT, host->addr + TUL_SFifo); initio_msgout_abort_targ() 2044 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); initio_msgout_abort_targ() 2046 return initio_wait_disc(host); initio_msgout_abort_targ() 2051 * @host: InitIO host 2057 static int initio_msgout_abort_tag(struct initio_host * host) initio_msgout_abort_tag() argument 2060 outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal); initio_msgout_abort_tag() 2061 if (initio_msgin_accept(host) == -1) initio_msgout_abort_tag() 2063 if (host->phase != MSG_OUT) initio_msgout_abort_tag() 2064 return initio_bad_seq(host); initio_msgout_abort_tag() 2066 outb(MSG_ABORT_TAG, host->addr + TUL_SFifo); initio_msgout_abort_tag() 2067 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); initio_msgout_abort_tag() 2069 return initio_wait_disc(host); initio_msgout_abort_tag() 2075 * @host: InitIO Host 2079 static int initio_msgin(struct initio_host * host) initio_msgin() argument 2084 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); initio_msgin() 2086 outl(1, host->addr + TUL_SCnt0); initio_msgin() 2087 outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd); initio_msgin() 2088 if (wait_tulip(host) == -1) initio_msgin() 2091 switch (inb(host->addr + TUL_SFifo)) { initio_msgin() 2093 outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd); initio_msgin() 2094 return initio_wait_disc(host); initio_msgin() 2098 initio_msgin_accept(host); initio_msgin() 2101 outb((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)), initio_msgin() 2102 host->addr + TUL_SSignal); initio_msgin() 2103 active_tc = host->active_tc; initio_msgin() 2105 outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), initio_msgin() 2106 host->addr + TUL_SSignal); initio_msgin() 2107 initio_msgin_accept(host); initio_msgin() 2110 initio_msgin_extend(host); initio_msgin() 2113 initio_msgin_accept(host); initio_msgin() 2116 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); initio_msgin() 2117 outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd); initio_msgin() 2118 return initio_wait_done_disc(host); initio_msgin() 2120 initio_msgout_reject(host); initio_msgin() 2123 if (host->phase != MSG_IN) initio_msgin() 2124 return host->phase; initio_msgin() 2129 static int initio_msgout_reject(struct initio_host * host) initio_msgout_reject() argument 2131 outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal); initio_msgout_reject() 2133 if (initio_msgin_accept(host) == -1) initio_msgout_reject() 2136 if (host->phase == MSG_OUT) { initio_msgout_reject() 2137 outb(MSG_REJ, host->addr + TUL_SFifo); /* Msg reject */ initio_msgout_reject() 2138 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); initio_msgout_reject() 2139 return wait_tulip(host); initio_msgout_reject() 2141 return host->phase; initio_msgout_reject() 2144 static int initio_msgout_ide(struct initio_host * host) initio_msgout_ide() argument 2146 outb(MSG_IDE, host->addr + TUL_SFifo); /* Initiator Detected Error */ initio_msgout_ide() 2147 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); initio_msgout_ide() 2148 return wait_tulip(host); initio_msgout_ide() 2151 static int initio_msgin_extend(struct initio_host * host) initio_msgin_extend() argument 2155 if (initio_msgin_accept(host) != MSG_IN) initio_msgin_extend() 2156 return host->phase; initio_msgin_extend() 2159 outl(1, host->addr + TUL_SCnt0); initio_msgin_extend() 2160 outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd); initio_msgin_extend() 2161 if (wait_tulip(host) == -1) initio_msgin_extend() 2164 len = inb(host->addr + TUL_SFifo); initio_msgin_extend() 2165 host->msg[0] = len; initio_msgin_extend() 2168 if ((initio_msgin_accept(host)) != MSG_IN) initio_msgin_extend() 2169 return host->phase; initio_msgin_extend() 2170 outl(1, host->addr + TUL_SCnt0); initio_msgin_extend() 2171 outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd); initio_msgin_extend() 2172 if (wait_tulip(host) == -1) initio_msgin_extend() 2174 host->msg[idx++] = inb(host->addr + TUL_SFifo); initio_msgin_extend() 2176 if (host->msg[1] == 1) { /* if it's synchronous data transfer request */ initio_msgin_extend() 2178 if (host->msg[0] != 3) /* if length is not right */ initio_msgin_extend() 2179 return initio_msgout_reject(host); initio_msgin_extend() 2180 if (host->active_tc->flags & TCF_NO_SYNC_NEGO) { /* Set OFFSET=0 to do async, nego back */ initio_msgin_extend() 2181 host->msg[3] = 0; initio_msgin_extend() 2183 if (initio_msgin_sync(host) == 0 && initio_msgin_extend() 2184 (host->active_tc->flags & TCF_SYNC_DONE)) { initio_msgin_extend() 2185 initio_sync_done(host); initio_msgin_extend() 2186 return initio_msgin_accept(host); initio_msgin_extend() 2190 r = inb(host->addr + TUL_SSignal); initio_msgin_extend() 2192 host->addr + TUL_SSignal); initio_msgin_extend() 2193 if (initio_msgin_accept(host) != MSG_OUT) initio_msgin_extend() 2194 return host->phase; initio_msgin_extend() 2196 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); initio_msgin_extend() 2198 initio_sync_done(host); initio_msgin_extend() 2200 outb(MSG_EXTEND, host->addr + TUL_SFifo); initio_msgin_extend() 2201 outb(3, host->addr + TUL_SFifo); initio_msgin_extend() 2202 outb(1, host->addr + TUL_SFifo); initio_msgin_extend() 2203 outb(host->msg[2], host->addr + TUL_SFifo); initio_msgin_extend() 2204 outb(host->msg[3], host->addr + TUL_SFifo); initio_msgin_extend() 2205 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); initio_msgin_extend() 2206 return wait_tulip(host); initio_msgin_extend() 2208 if (host->msg[0] != 2 || host->msg[1] != 3) initio_msgin_extend() 2209 return initio_msgout_reject(host); initio_msgin_extend() 2211 if (host->active_tc->flags & TCF_NO_WDTR) { initio_msgin_extend() 2212 host->msg[2] = 0; initio_msgin_extend() 2214 if (host->msg[2] > 2) /* > 32 bits */ initio_msgin_extend() 2215 return initio_msgout_reject(host); initio_msgin_extend() 2216 if (host->msg[2] == 2) { /* == 32 */ initio_msgin_extend() 2217 host->msg[2] = 1; initio_msgin_extend() 2219 if ((host->active_tc->flags & TCF_NO_WDTR) == 0) { initio_msgin_extend() 2220 wdtr_done(host); initio_msgin_extend() 2221 if ((host->active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) initio_msgin_extend() 2222 outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal); initio_msgin_extend() 2223 return initio_msgin_accept(host); initio_msgin_extend() 2227 outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal); initio_msgin_extend() 2229 if (initio_msgin_accept(host) != MSG_OUT) initio_msgin_extend() 2230 return host->phase; initio_msgin_extend() 2232 outb(MSG_EXTEND, host->addr + TUL_SFifo); initio_msgin_extend() 2233 outb(2, host->addr + TUL_SFifo); initio_msgin_extend() 2234 outb(3, host->addr + TUL_SFifo); initio_msgin_extend() 2235 outb(host->msg[2], host->addr + TUL_SFifo); initio_msgin_extend() 2236 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); initio_msgin_extend() 2237 return wait_tulip(host); initio_msgin_extend() 2240 static int initio_msgin_sync(struct initio_host * host) initio_msgin_sync() argument 2244 default_period = initio_rate_tbl[host->active_tc->flags & TCF_SCSI_RATE]; initio_msgin_sync() 2245 if (host->msg[3] > MAX_OFFSET) { initio_msgin_sync() 2246 host->msg[3] = MAX_OFFSET; initio_msgin_sync() 2247 if (host->msg[2] < default_period) { initio_msgin_sync() 2248 host->msg[2] = default_period; initio_msgin_sync() 2251 if (host->msg[2] >= 59) /* Change to async */ initio_msgin_sync() 2252 host->msg[3] = 0; initio_msgin_sync() 2256 if (host->msg[3] == 0) { initio_msgin_sync() 2259 if (host->msg[2] < default_period) { initio_msgin_sync() 2260 host->msg[2] = default_period; initio_msgin_sync() 2263 if (host->msg[2] >= 59) { initio_msgin_sync() 2264 host->msg[3] = 0; initio_msgin_sync() 2270 static int wdtr_done(struct initio_host * host) wdtr_done() argument 2272 host->active_tc->flags &= ~TCF_SYNC_DONE; wdtr_done() 2273 host->active_tc->flags |= TCF_WDTR_DONE; wdtr_done() 2275 host->active_tc->js_period = 0; wdtr_done() 2276 if (host->msg[2]) /* if 16 bit */ wdtr_done() 2277 host->active_tc->js_period |= TSC_WIDE_SCSI; wdtr_done() 2278 host->active_tc->sconfig0 &= ~TSC_ALT_PERIOD; wdtr_done() 2279 outb(host->active_tc->sconfig0, host->addr + TUL_SConfig); wdtr_done() 2280 outb(host->active_tc->js_period, host->addr + TUL_SPeriod); wdtr_done() 2285 static int initio_sync_done(struct initio_host * host) initio_sync_done() argument 2289 host->active_tc->flags |= TCF_SYNC_DONE; initio_sync_done() 2291 if (host->msg[3]) { initio_sync_done() 2292 host->active_tc->js_period |= host->msg[3]; initio_sync_done() 2294 if (initio_rate_tbl[i] >= host->msg[2]) /* pick the big one */ initio_sync_done() 2297 host->active_tc->js_period |= (i << 4); initio_sync_done() 2298 host->active_tc->sconfig0 |= TSC_ALT_PERIOD; initio_sync_done() 2300 outb(host->active_tc->sconfig0, host->addr + TUL_SConfig); initio_sync_done() 2301 outb(host->active_tc->js_period, host->addr + TUL_SPeriod); initio_sync_done() 2307 static int initio_post_scsi_rst(struct initio_host * host) initio_post_scsi_rst() argument 2313 host->active = NULL; initio_post_scsi_rst() 2314 host->active_tc = NULL; initio_post_scsi_rst() 2315 host->flags = 0; initio_post_scsi_rst() 2317 while ((scb = initio_pop_busy_scb(host)) != NULL) { initio_post_scsi_rst() 2319 initio_append_done_scb(host, scb); initio_post_scsi_rst() 2322 active_tc = &host->targets[0]; initio_post_scsi_rst() 2323 for (i = 0; i < host->max_tar; active_tc++, i++) { initio_post_scsi_rst() 2327 active_tc->sconfig0 = host->sconf1; initio_post_scsi_rst() 2328 host->act_tags[0] = 0; /* 07/22/98 */ initio_post_scsi_rst() 2329 host->targets[i].flags &= ~TCF_BUSY; /* 07/22/98 */ initio_post_scsi_rst() 2335 static void initio_select_atn_stop(struct initio_host * host, struct scsi_ctrl_blk * scb) initio_select_atn_stop() argument 2339 host->active = scb; initio_select_atn_stop() 2340 host->active_tc = &host->targets[scb->target]; initio_select_atn_stop() 2341 outb(TSC_SELATNSTOP, host->addr + TUL_SCmd); initio_select_atn_stop() 2345 static void initio_select_atn(struct initio_host * host, struct scsi_ctrl_blk * scb) initio_select_atn() argument 2352 outb(scb->ident, host->addr + TUL_SFifo); initio_select_atn() 2354 outb(scb->cdb[i], host->addr + TUL_SFifo); initio_select_atn() 2355 host->active_tc = &host->targets[scb->target]; initio_select_atn() 2356 host->active = scb; initio_select_atn() 2357 outb(TSC_SEL_ATN, host->addr + TUL_SCmd); initio_select_atn() 2360 static void initio_select_atn3(struct initio_host * host, struct scsi_ctrl_blk * scb) initio_select_atn3() argument 2367 outb(scb->ident, host->addr + TUL_SFifo); initio_select_atn3() 2368 outb(scb->tagmsg, host->addr + TUL_SFifo); initio_select_atn3() 2369 outb(scb->tagid, host->addr + TUL_SFifo); initio_select_atn3() 2371 outb(scb->cdb[i], host->addr + TUL_SFifo); initio_select_atn3() 2372 host->active_tc = &host->targets[scb->target]; initio_select_atn3() 2373 host->active = scb; initio_select_atn3() 2374 outb(TSC_SEL_ATN3, host->addr + TUL_SCmd); initio_select_atn3() 2379 * @host: InitIO host to reset 2384 int initio_bus_device_reset(struct initio_host * host) initio_bus_device_reset() argument 2386 struct scsi_ctrl_blk *scb = host->active; initio_bus_device_reset() 2387 struct target_control *active_tc = host->active_tc; initio_bus_device_reset() 2391 if (host->phase != MSG_OUT) initio_bus_device_reset() 2392 return int_initio_bad_seq(host); /* Unexpected phase */ initio_bus_device_reset() 2394 initio_unlink_pend_scb(host, scb); initio_bus_device_reset() 2395 initio_release_scb(host, scb); initio_bus_device_reset() 2403 prev = tmp = host->first_busy; /* Check Busy queue */ initio_bus_device_reset() 2407 if (tmp == host->first_busy) { initio_bus_device_reset() 2408 if ((host->first_busy = tmp->next) == NULL) initio_bus_device_reset() 2409 host->last_busy = NULL; initio_bus_device_reset() 2412 if (tmp == host->last_busy) initio_bus_device_reset() 2413 host->last_busy = prev; initio_bus_device_reset() 2416 initio_append_done_scb(host, tmp); initio_bus_device_reset() 2424 outb(MSG_DEVRST, host->addr + TUL_SFifo); initio_bus_device_reset() 2425 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); initio_bus_device_reset() 2426 return initio_wait_disc(host); initio_bus_device_reset() 2430 static int initio_msgin_accept(struct initio_host * host) initio_msgin_accept() argument 2432 outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd); initio_msgin_accept() 2433 return wait_tulip(host); initio_msgin_accept() 2436 static int wait_tulip(struct initio_host * host) wait_tulip() argument 2439 while (!((host->jsstatus0 = inb(host->addr + TUL_SStatus0)) wait_tulip() 2443 host->jsint = inb(host->addr + TUL_SInt); wait_tulip() 2444 host->phase = host->jsstatus0 & TSS_PH_MASK; wait_tulip() 2445 host->jsstatus1 = inb(host->addr + TUL_SStatus1); wait_tulip() 2447 if (host->jsint & TSS_RESEL_INT) /* if SCSI bus reset detected */ wait_tulip() 2448 return int_initio_resel(host); wait_tulip() 2449 if (host->jsint & TSS_SEL_TIMEOUT) /* if selected/reselected timeout interrupt */ wait_tulip() 2450 return int_initio_busfree(host); wait_tulip() 2451 if (host->jsint & TSS_SCSIRST_INT) /* if SCSI bus reset detected */ wait_tulip() 2452 return int_initio_scsi_rst(host); wait_tulip() 2454 if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */ wait_tulip() 2455 if (host->flags & HCF_EXPECT_DONE_DISC) { wait_tulip() 2456 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */ wait_tulip() 2457 initio_unlink_busy_scb(host, host->active); wait_tulip() 2458 host->active->hastat = 0; wait_tulip() 2459 initio_append_done_scb(host, host->active); wait_tulip() 2460 host->active = NULL; wait_tulip() 2461 host->active_tc = NULL; wait_tulip() 2462 host->flags &= ~HCF_EXPECT_DONE_DISC; wait_tulip() 2463 outb(TSC_INITDEFAULT, host->addr + TUL_SConfig); wait_tulip() 2464 outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */ wait_tulip() 2467 if (host->flags & HCF_EXPECT_DISC) { wait_tulip() 2468 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */ wait_tulip() 2469 host->active = NULL; wait_tulip() 2470 host->active_tc = NULL; wait_tulip() 2471 host->flags &= ~HCF_EXPECT_DISC; wait_tulip() 2472 outb(TSC_INITDEFAULT, host->addr + TUL_SConfig); wait_tulip() 2473 outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */ wait_tulip() 2476 return int_initio_busfree(host); wait_tulip() 2479 if (host->jsint & (TSS_FUNC_COMP | TSS_BUS_SERV)) wait_tulip() 2480 return host->phase; wait_tulip() 2481 return host->phase; wait_tulip() 2484 static int initio_wait_disc(struct initio_host * host) initio_wait_disc() argument 2486 while (!((host->jsstatus0 = inb(host->addr + TUL_SStatus0)) & TSS_INT_PENDING)) initio_wait_disc() 2489 host->jsint = inb(host->addr + TUL_SInt); initio_wait_disc() 2491 if (host->jsint & TSS_SCSIRST_INT) /* if SCSI bus reset detected */ initio_wait_disc() 2492 return int_initio_scsi_rst(host); initio_wait_disc() 2493 if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */ initio_wait_disc() 2494 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */ initio_wait_disc() 2495 outb(TSC_INITDEFAULT, host->addr + TUL_SConfig); initio_wait_disc() 2496 outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */ initio_wait_disc() 2497 host->active = NULL; initio_wait_disc() 2500 return initio_bad_seq(host); initio_wait_disc() 2503 static int initio_wait_done_disc(struct initio_host * host) initio_wait_done_disc() argument 2505 while (!((host->jsstatus0 = inb(host->addr + TUL_SStatus0)) initio_wait_done_disc() 2509 host->jsint = inb(host->addr + TUL_SInt); initio_wait_done_disc() 2511 if (host->jsint & TSS_SCSIRST_INT) /* if SCSI bus reset detected */ initio_wait_done_disc() 2512 return int_initio_scsi_rst(host); initio_wait_done_disc() 2513 if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */ initio_wait_done_disc() 2514 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */ initio_wait_done_disc() 2515 outb(TSC_INITDEFAULT, host->addr + TUL_SConfig); initio_wait_done_disc() 2516 outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */ initio_wait_done_disc() 2517 initio_unlink_busy_scb(host, host->active); initio_wait_done_disc() 2519 initio_append_done_scb(host, host->active); initio_wait_done_disc() 2520 host->active = NULL; initio_wait_done_disc() 2523 return initio_bad_seq(host); initio_wait_done_disc() 2553 * @host: InitIO host taking the command 2558 * suitable for feeding to the InitIO host controller. This also requires 2562 static void initio_build_scb(struct initio_host * host, struct scsi_ctrl_blk * cblk, struct scsi_cmnd * cmnd) initio_build_scb() argument 2582 dma_addr = dma_map_single(&host->pci_dev->dev, cmnd->sense_buffer, initio_build_scb() 2606 dma_addr = dma_map_single(&host->pci_dev->dev, &cblk->sglist[0], initio_build_scb() 2637 * Attempts to queue a new command with the host adapter. Will return 2638 * zero if successful or indicate a host busy condition if not (which 2645 struct initio_host *host = (struct initio_host *) cmd->device->host->hostdata; i91u_queuecommand_lck() local 2650 cmnd = initio_alloc_scb(host); i91u_queuecommand_lck() 2654 initio_build_scb(host, cmnd, cmd); i91u_queuecommand_lck() 2655 initio_exec_scb(host, cmnd); i91u_queuecommand_lck() 2670 struct initio_host *host; i91u_bus_reset() local 2672 host = (struct initio_host *) cmnd->device->host->hostdata; i91u_bus_reset() 2674 spin_lock_irq(cmnd->device->host->host_lock); i91u_bus_reset() 2675 initio_reset_scsi(host, 0); i91u_bus_reset() 2676 spin_unlock_irq(cmnd->device->host->host_lock); i91u_bus_reset() 2688 * Map the device geometry in a manner compatible with the host 2697 struct initio_host *host; /* Point to Host adapter control block */ i91u_biosparam() local 2700 host = (struct initio_host *) sdev->host->hostdata; i91u_biosparam() 2701 tc = &host->targets[sdev->id]; i91u_biosparam() 2761 * @host: Pointer to host adapter control block. 2771 struct initio_host *host; i91uSCBPost() local 2774 host = (struct initio_host *) host_mem; i91uSCBPost() 2779 initio_release_scb(host, cblk); /* Release SCB for current channel */ i91uSCBPost() 2799 phase sequence was requested by the target. The host adapter i91uSCBPost() 2800 will generate a SCSI Reset Condition, notifying the host with i91uSCBPost() 2822 i91u_unmap_scb(host->pci_dev, cmnd); i91uSCBPost() 2824 initio_release_scb(host, cblk); /* Release SCB for current channel */ i91uSCBPost() 2844 struct initio_host *host; initio_probe_one() local 2867 printk(KERN_WARNING "initio: Could not allocate host structure.\n"); initio_probe_one() 2871 host = (struct initio_host *)shost->hostdata; initio_probe_one() 2872 memset(host, 0, sizeof(struct initio_host)); initio_probe_one() 2873 host->addr = pci_resource_start(pdev, 0); initio_probe_one() 2874 host->bios_addr = bios_seg; initio_probe_one() 2876 if (!request_region(host->addr, 256, "i91u")) { initio_probe_one() 2877 printk(KERN_WARNING "initio: I/O port range 0x%x is busy.\n", host->addr); initio_probe_one() 2899 host->pci_dev = pdev; initio_probe_one() 2901 host->semaph = 1; initio_probe_one() 2902 spin_lock_init(&host->semaph_lock); initio_probe_one() 2903 host->num_scbs = num_scb; initio_probe_one() 2904 host->scb = scb; initio_probe_one() 2905 host->next_pending = scb; initio_probe_one() 2906 host->next_avail = scb; initio_probe_one() 2914 host->scb_end = tmp; initio_probe_one() 2915 host->first_avail = scb; initio_probe_one() 2916 host->last_avail = prev; initio_probe_one() 2917 spin_lock_init(&host->avail_lock); initio_probe_one() 2919 initio_init(host, phys_to_virt(((u32)bios_seg << 4))); initio_probe_one() 2921 host->jsstatus0 = 0; initio_probe_one() 2923 shost->io_port = host->addr; initio_probe_one() 2926 shost->unique_id = host->addr; initio_probe_one() 2927 shost->max_id = host->max_tar; initio_probe_one() 2930 shost->this_id = host->scsi_id; /* Assign HCS index */ initio_probe_one() 2931 shost->base = host->addr; initio_probe_one() 2950 kfree(host->scb); initio_probe_one() 2952 release_region(host->addr, 256); initio_probe_one() 2970 struct Scsi_Host *host = pci_get_drvdata(pdev); initio_remove_one() local 2971 struct initio_host *s = (struct initio_host *)host->hostdata; initio_remove_one() 2972 scsi_remove_host(host); initio_remove_one() 2973 free_irq(pdev->irq, host); initio_remove_one() 2975 scsi_host_put(host); initio_remove_one()
|
H A D | a100u2w.c | 50 * - Fix allocation of scsi host structs and private data 85 static struct orc_scb *__orc_alloc_scb(struct orc_host * host); 86 static void inia100_scb_handler(struct orc_host *host, struct orc_scb *scb); 139 static u8 wait_chip_ready(struct orc_host * host) wait_chip_ready() argument 144 if (inb(host->base + ORC_HCTRL) & HOSTSTOP) /* Wait HOSTSTOP set */ wait_chip_ready() 151 static u8 wait_firmware_ready(struct orc_host * host) wait_firmware_ready() argument 156 if (inb(host->base + ORC_HSTUS) & RREADY) /* Wait READY set */ wait_firmware_ready() 164 static u8 wait_scsi_reset_done(struct orc_host * host) wait_scsi_reset_done() argument 169 if (!(inb(host->base + ORC_HCTRL) & SCSIRST)) /* Wait SCSIRST done */ wait_scsi_reset_done() 177 static u8 wait_HDO_off(struct orc_host * host) wait_HDO_off() argument 182 if (!(inb(host->base + ORC_HCTRL) & HDO)) /* Wait HDO off */ wait_HDO_off() 190 static u8 wait_hdi_set(struct orc_host * host, u8 * data) wait_hdi_set() argument 195 if ((*data = inb(host->base + ORC_HSTUS)) & HDI) wait_hdi_set() 203 static unsigned short orc_read_fwrev(struct orc_host * host) orc_read_fwrev() argument 208 outb(ORC_CMD_VERSION, host->base + ORC_HDATA); orc_read_fwrev() 209 outb(HDO, host->base + ORC_HCTRL); orc_read_fwrev() 210 if (wait_HDO_off(host) == 0) /* Wait HDO off */ orc_read_fwrev() 213 if (wait_hdi_set(host, &data) == 0) /* Wait HDI set */ orc_read_fwrev() 215 version = inb(host->base + ORC_HDATA); orc_read_fwrev() 216 outb(data, host->base + ORC_HSTUS); /* Clear HDI */ orc_read_fwrev() 218 if (wait_hdi_set(host, &data) == 0) /* Wait HDI set */ orc_read_fwrev() 220 version |= inb(host->base + ORC_HDATA) << 8; orc_read_fwrev() 221 outb(data, host->base + ORC_HSTUS); /* Clear HDI */ orc_read_fwrev() 227 static u8 orc_nv_write(struct orc_host * host, unsigned char address, unsigned char value) orc_nv_write() argument 229 outb(ORC_CMD_SET_NVM, host->base + ORC_HDATA); /* Write command */ orc_nv_write() 230 outb(HDO, host->base + ORC_HCTRL); orc_nv_write() 231 if (wait_HDO_off(host) == 0) /* Wait HDO off */ orc_nv_write() 234 outb(address, host->base + ORC_HDATA); /* Write address */ orc_nv_write() 235 outb(HDO, host->base + ORC_HCTRL); orc_nv_write() 236 if (wait_HDO_off(host) == 0) /* Wait HDO off */ orc_nv_write() 239 outb(value, host->base + ORC_HDATA); /* Write value */ orc_nv_write() 240 outb(HDO, host->base + ORC_HCTRL); orc_nv_write() 241 if (wait_HDO_off(host) == 0) /* Wait HDO off */ orc_nv_write() 248 static u8 orc_nv_read(struct orc_host * host, u8 address, u8 *ptr) orc_nv_read() argument 252 outb(ORC_CMD_GET_NVM, host->base + ORC_HDATA); /* Write command */ orc_nv_read() 253 outb(HDO, host->base + ORC_HCTRL); orc_nv_read() 254 if (wait_HDO_off(host) == 0) /* Wait HDO off */ orc_nv_read() 257 outb(address, host->base + ORC_HDATA); /* Write address */ orc_nv_read() 258 outb(HDO, host->base + ORC_HCTRL); orc_nv_read() 259 if (wait_HDO_off(host) == 0) /* Wait HDO off */ orc_nv_read() 262 if (wait_hdi_set(host, &data) == 0) /* Wait HDI set */ orc_nv_read() 264 *ptr = inb(host->base + ORC_HDATA); orc_nv_read() 265 outb(data, host->base + ORC_HSTUS); /* Clear HDI */ orc_nv_read() 273 * @host: host adapter the SCB belongs to 277 static void orc_exec_scb(struct orc_host * host, struct orc_scb * scb) orc_exec_scb() argument 280 outb(scb->scbidx, host->base + ORC_PQUEUE); orc_exec_scb() 286 * @host: Host whose EEPROM is being loaded 291 static int se2_rd_all(struct orc_host * host) se2_rd_all() argument 298 if (orc_nv_read(host, (u8) i, np) == 0) se2_rd_all() 314 * @host: Host whose EEPROM is being updated 319 static void se2_update_all(struct orc_host * host) se2_update_all() argument 334 orc_nv_write(host, (u8) i, *np); se2_update_all() 340 * @host: Host EEPROM to read 342 * Read the EEPROM for a given host. If it is invalid or fails 346 static void read_eeprom(struct orc_host * host) read_eeprom() argument 348 if (se2_rd_all(host) != 1) { read_eeprom() 349 se2_update_all(host); /* setup default pattern */ read_eeprom() 350 se2_rd_all(host); /* load again */ read_eeprom() 357 * @host: Host to set up 365 static u8 orc_load_firmware(struct orc_host * host) orc_load_firmware() argument 375 data = inb(host->base + ORC_GCFG); orc_load_firmware() 376 outb(data | EEPRG, host->base + ORC_GCFG); /* Enable EEPROM programming */ orc_load_firmware() 377 outb(0x00, host->base + ORC_EBIOSADR2); orc_load_firmware() 378 outw(0x0000, host->base + ORC_EBIOSADR0); orc_load_firmware() 379 if (inb(host->base + ORC_EBIOSDATA) != 0x55) { orc_load_firmware() 380 outb(data, host->base + ORC_GCFG); /* Disable EEPROM programming */ orc_load_firmware() 383 outw(0x0001, host->base + ORC_EBIOSADR0); orc_load_firmware() 384 if (inb(host->base + ORC_EBIOSDATA) != 0xAA) { orc_load_firmware() 385 outb(data, host->base + ORC_GCFG); /* Disable EEPROM programming */ orc_load_firmware() 389 outb(PRGMRST | DOWNLOAD, host->base + ORC_RISCCTL); /* Enable SRAM programming */ orc_load_firmware() 392 outw(0x0010, host->base + ORC_EBIOSADR0); orc_load_firmware() 393 *data32_ptr = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */ orc_load_firmware() 394 outw(0x0011, host->base + ORC_EBIOSADR0); orc_load_firmware() 395 *(data32_ptr + 1) = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */ orc_load_firmware() 396 outw(0x0012, host->base + ORC_EBIOSADR0); orc_load_firmware() 397 *(data32_ptr + 2) = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */ orc_load_firmware() 398 outw(*(data32_ptr + 2), host->base + ORC_EBIOSADR2); orc_load_firmware() 399 outl(le32_to_cpu(data32), host->base + ORC_FWBASEADR); /* Write FW address */ orc_load_firmware() 408 outw(bios_addr, host->base + ORC_EBIOSADR0); orc_load_firmware() 409 *data32_ptr++ = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */ orc_load_firmware() 411 outl(le32_to_cpu(data32), host->base + ORC_RISCRAM); /* Write every 4 bytes */ orc_load_firmware() 418 outb(PRGMRST | DOWNLOAD, host->base + ORC_RISCCTL); /* Reset program count 0 */ orc_load_firmware() 423 outw(bios_addr, host->base + ORC_EBIOSADR0); orc_load_firmware() 424 *data32_ptr++ = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */ orc_load_firmware() 426 if (inl(host->base + ORC_RISCRAM) != le32_to_cpu(data32)) { orc_load_firmware() 427 outb(PRGMRST, host->base + ORC_RISCCTL); /* Reset program to 0 */ orc_load_firmware() 428 outb(data, host->base + ORC_GCFG); /*Disable EEPROM programming */ orc_load_firmware() 436 outb(PRGMRST, host->base + ORC_RISCCTL); /* Reset program to 0 */ orc_load_firmware() 437 outb(data, host->base + ORC_GCFG); /* Disable EEPROM programming */ orc_load_firmware() 442 static void setup_SCBs(struct orc_host * host) setup_SCBs() argument 450 outb(ORC_MAXQUEUE, host->base + ORC_SCBSIZE); /* Total number of SCBs */ setup_SCBs() 452 outl(host->scb_phys, host->base + ORC_SCBBASE0); setup_SCBs() 454 outl(host->scb_phys, host->base + ORC_SCBBASE1); setup_SCBs() 457 scb = host->scb_virt; setup_SCBs() 458 escb = host->escb_virt; setup_SCBs() 461 escb_phys = (host->escb_phys + (sizeof(struct orc_extended_scb) * i)); setup_SCBs() 473 * @host: host map to configure 479 static void init_alloc_map(struct orc_host * host) init_alloc_map() argument 485 host->allocation_map[i][j] = 0xffffffff; init_alloc_map() 491 * init_orchid - initialise the host adapter 492 * @host:host adapter to initialise 499 static int init_orchid(struct orc_host * host) init_orchid() argument 505 init_alloc_map(host); init_orchid() 506 outb(0xFF, host->base + ORC_GIMSK); /* Disable all interrupts */ init_orchid() 508 if (inb(host->base + ORC_HSTUS) & RREADY) { /* Orchid is ready */ init_orchid() 509 revision = orc_read_fwrev(host); init_orchid() 511 outb(DEVRST, host->base + ORC_HCTRL); /* Reset Host Adapter */ init_orchid() 512 if (wait_chip_ready(host) == 0) init_orchid() 514 orc_load_firmware(host); /* Download FW */ init_orchid() 515 setup_SCBs(host); /* Setup SCB base and SCB Size registers */ init_orchid() 516 outb(0x00, host->base + ORC_HCTRL); /* clear HOSTSTOP */ init_orchid() 517 if (wait_firmware_ready(host) == 0) init_orchid() 521 setup_SCBs(host); /* Setup SCB base and SCB Size registers */ init_orchid() 524 outb(DEVRST, host->base + ORC_HCTRL); /* Reset Host Adapter */ init_orchid() 525 if (wait_chip_ready(host) == 0) init_orchid() 527 orc_load_firmware(host); /* Download FW */ init_orchid() 528 setup_SCBs(host); /* Setup SCB base and SCB Size registers */ init_orchid() 529 outb(HDO, host->base + ORC_HCTRL); /* Do Hardware Reset & */ init_orchid() 532 if (wait_firmware_ready(host) == 0) /* Wait for firmware ready */ init_orchid() 538 read_eeprom(host); init_orchid() 543 host->scsi_id = nvramp->scsi_id; init_orchid() 544 host->BIOScfg = nvramp->BIOSConfig1; init_orchid() 545 host->max_targets = MAX_TARGETS; init_orchid() 548 host->target_flag[i] = *ptr; init_orchid() 549 host->max_tags[i] = ORC_MAXTAGS; init_orchid() 553 host->flags |= HCF_SCSI_RESET; init_orchid() 554 outb(0xFB, host->base + ORC_GIMSK); /* enable RP FIFO interrupt */ init_orchid() 560 * @host: host being reset 565 static int orc_reset_scsi_bus(struct orc_host * host) orc_reset_scsi_bus() argument 569 spin_lock_irqsave(&host->allocation_lock, flags); orc_reset_scsi_bus() 571 init_alloc_map(host); orc_reset_scsi_bus() 573 outb(SCSIRST, host->base + ORC_HCTRL); orc_reset_scsi_bus() 576 if (wait_scsi_reset_done(host) == 0) { orc_reset_scsi_bus() 577 spin_unlock_irqrestore(&host->allocation_lock, flags); orc_reset_scsi_bus() 580 spin_unlock_irqrestore(&host->allocation_lock, flags); orc_reset_scsi_bus() 587 * @host: host to reset 595 static int orc_device_reset(struct orc_host * host, struct scsi_cmnd *cmd, unsigned int target) orc_device_reset() argument 603 spin_lock_irqsave(&(host->allocation_lock), flags); orc_device_reset() 608 host_scb = host->scb_virt; orc_device_reset() 612 init_alloc_map(host); orc_device_reset() 624 spin_unlock_irqrestore(&(host->allocation_lock), flags); orc_device_reset() 629 if ((scb = __orc_alloc_scb(host)) == NULL) { orc_device_reset() 631 spin_unlock_irqrestore(&(host->allocation_lock), flags); orc_device_reset() 650 orc_exec_scb(host, scb); /* Start execute SCB */ orc_device_reset() 651 spin_unlock_irqrestore(&host->allocation_lock, flags); orc_device_reset() 657 * @host: host to allocate from 665 static struct orc_scb *__orc_alloc_scb(struct orc_host * host) __orc_alloc_scb() argument 672 channel = host->index; __orc_alloc_scb() 675 if ((host->allocation_map[channel][i] >> index) & 0x01) { __orc_alloc_scb() 676 host->allocation_map[channel][i] &= ~(1 << index); __orc_alloc_scb() 681 return host->scb_virt + idx; __orc_alloc_scb() 690 * @host: host to allocate from 696 static struct orc_scb *orc_alloc_scb(struct orc_host * host) orc_alloc_scb() argument 701 spin_lock_irqsave(&host->allocation_lock, flags); orc_alloc_scb() 702 scb = __orc_alloc_scb(host); orc_alloc_scb() 703 spin_unlock_irqrestore(&host->allocation_lock, flags); orc_alloc_scb() 709 * @host: host owning the SCB 713 * calling the SCB must be out of use on both the host and the HA. 716 static void orc_release_scb(struct orc_host *host, struct orc_scb *scb) orc_release_scb() argument 721 spin_lock_irqsave(&(host->allocation_lock), flags); orc_release_scb() 722 channel = host->index; /* Channel */ orc_release_scb() 726 host->allocation_map[channel][i] |= (1 << index); orc_release_scb() 727 spin_unlock_irqrestore(&(host->allocation_lock), flags); orc_release_scb() 738 static int orchid_abort_scb(struct orc_host * host, struct orc_scb * scb) orchid_abort_scb() argument 742 outb(ORC_CMD_ABORT_SCB, host->base + ORC_HDATA); /* Write command */ orchid_abort_scb() 743 outb(HDO, host->base + ORC_HCTRL); orchid_abort_scb() 744 if (wait_HDO_off(host) == 0) /* Wait HDO off */ orchid_abort_scb() 747 outb(scb->scbidx, host->base + ORC_HDATA); /* Write address */ orchid_abort_scb() 748 outb(HDO, host->base + ORC_HCTRL); orchid_abort_scb() 749 if (wait_HDO_off(host) == 0) /* Wait HDO off */ orchid_abort_scb() 752 if (wait_hdi_set(host, &data) == 0) /* Wait HDI set */ orchid_abort_scb() 754 status = inb(host->base + ORC_HDATA); orchid_abort_scb() 755 outb(data, host->base + ORC_HSTUS); /* Clear HDI */ orchid_abort_scb() 762 static int inia100_abort_cmd(struct orc_host * host, struct scsi_cmnd *cmd) inia100_abort_cmd() argument 769 spin_lock_irqsave(&(host->allocation_lock), flags); inia100_abort_cmd() 771 scb = host->scb_virt; inia100_abort_cmd() 784 if (orchid_abort_scb(host, scb)) { inia100_abort_cmd() 786 spin_unlock_irqrestore(&host->allocation_lock, flags); inia100_abort_cmd() 794 spin_unlock_irqrestore(&host->allocation_lock, flags); inia100_abort_cmd() 800 * @host: Host causing the interrupt 803 * by the host lock. While the controller reports that there are 805 * index into a host address pointer to the scb and call the scb 811 static irqreturn_t orc_interrupt(struct orc_host * host) orc_interrupt() argument 817 if (inb(host->base + ORC_RQUEUECNT) == 0) orc_interrupt() 822 scb_index = inb(host->base + ORC_RQUEUE); orc_interrupt() 824 /* Translate it back to a host pointer */ orc_interrupt() 825 scb = (struct orc_scb *) ((unsigned long) host->scb_virt + (unsigned long) (sizeof(struct orc_scb) * scb_index)); orc_interrupt() 828 inia100_scb_handler(host, scb); orc_interrupt() 829 } while (inb(host->base + ORC_RQUEUECNT)); orc_interrupt() 835 * @host: host owing the control block 839 * Build a host adapter control block from the SCSI mid layer command 842 static int inia100_build_scb(struct orc_host * host, struct orc_scb * scb, struct scsi_cmnd * cmd) inia100_build_scb() argument 905 * inia100_queue - queue command with host 910 * block, build the host specific scb structures and if there is room 917 struct orc_host *host; /* Point to Host adapter control block */ inia100_queue_lck() local 919 host = (struct orc_host *) cmd->device->host->hostdata; inia100_queue_lck() 922 if ((scb = orc_alloc_scb(host)) == NULL) inia100_queue_lck() 925 if (inia100_build_scb(host, scb, cmd)) { inia100_queue_lck() 926 orc_release_scb(host, scb); inia100_queue_lck() 929 orc_exec_scb(host, scb); /* Start execute SCB */ inia100_queue_lck() 939 Input : host - Pointer to host adapter structure 945 struct orc_host *host; inia100_abort() local 947 host = (struct orc_host *) cmd->device->host->hostdata; inia100_abort() 948 return inia100_abort_cmd(host, cmd); inia100_abort() 955 Input : host - Pointer to host adapter structure 961 struct orc_host *host; inia100_bus_reset() local 962 host = (struct orc_host *) cmd->device->host->hostdata; inia100_bus_reset() 963 return orc_reset_scsi_bus(host); inia100_bus_reset() 969 Input : host - Pointer to host adapter structure 975 struct orc_host *host; inia100_device_reset() local 976 host = (struct orc_host *) cmd->device->host->hostdata; inia100_device_reset() 977 return orc_device_reset(host, cmd, scmd_id(cmd)); inia100_device_reset() 983 * @host: Host causing the interrupt 987 * from host to SCSI midlayer error coding, save any sense data and 991 static void inia100_scb_handler(struct orc_host *host, struct orc_scb *scb) inia100_scb_handler() argument 999 orc_release_scb(host, scb); /* Release SCB for current channel */ inia100_scb_handler() 1017 phase sequence was requested by the target. The host adapter inia100_scb_handler() 1018 will generate a SCSI Reset Condition, notifying the host with inia100_scb_handler() 1046 orc_release_scb(host, scb); /* Release SCB for current channel */ inia100_scb_handler() 1060 struct orc_host *host = (struct orc_host *)shost->hostdata; inia100_intr() local 1065 res = orc_interrupt(host); inia100_intr() 1089 struct orc_host *host; inia100_probe_one() local 1120 host = (struct orc_host *)shost->hostdata; inia100_probe_one() 1121 host->pdev = pdev; inia100_probe_one() 1122 host->base = port; inia100_probe_one() 1123 host->BIOScfg = bios; inia100_probe_one() 1124 spin_lock_init(&host->allocation_lock); inia100_probe_one() 1128 host->scb_virt = pci_zalloc_consistent(pdev, sz, &host->scb_phys); inia100_probe_one() 1129 if (!host->scb_virt) { inia100_probe_one() 1136 host->escb_virt = pci_zalloc_consistent(pdev, sz, &host->escb_phys); inia100_probe_one() 1137 if (!host->escb_virt) { inia100_probe_one() 1142 biosaddr = host->BIOScfg; inia100_probe_one() 1145 if (init_orchid(host)) { /* Initialize orchid chip */ inia100_probe_one() 1150 shost->io_port = host->base; inia100_probe_one() 1154 shost->max_id = host->max_targets; inia100_probe_one() 1157 shost->this_id = host->scsi_id; /* Assign HCS index */ inia100_probe_one() 1182 host->escb_virt, host->escb_phys); inia100_probe_one() 1185 host->scb_virt, host->scb_phys); inia100_probe_one() 1199 struct orc_host *host = (struct orc_host *)shost->hostdata; inia100_remove_one() local 1205 host->escb_virt, host->escb_phys); inia100_remove_one() 1207 host->scb_virt, host->scb_phys); inia100_remove_one()
|
H A D | a4000t.c | 39 struct Scsi_Host *host; amiga_a4000t_scsi_probe() local 52 dev_err(&pdev->dev, "Failed to allocate host data\n"); amiga_a4000t_scsi_probe() 66 host = NCR_700_detect(&a4000t_scsi_driver_template, hostdata, amiga_a4000t_scsi_probe() 68 if (!host) { amiga_a4000t_scsi_probe() 70 "No host detected; board configuration problem?\n"); amiga_a4000t_scsi_probe() 74 host->this_id = 7; amiga_a4000t_scsi_probe() 75 host->base = scsi_addr; amiga_a4000t_scsi_probe() 76 host->irq = IRQ_AMIGA_PORTS; amiga_a4000t_scsi_probe() 78 if (request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "a4000t-scsi", amiga_a4000t_scsi_probe() 79 host)) { amiga_a4000t_scsi_probe() 84 platform_set_drvdata(pdev, host); amiga_a4000t_scsi_probe() 85 scsi_scan_host(host); amiga_a4000t_scsi_probe() 89 scsi_host_put(host); amiga_a4000t_scsi_probe() 99 struct Scsi_Host *host = platform_get_drvdata(pdev); amiga_a4000t_scsi_remove() local 100 struct NCR_700_Host_Parameters *hostdata = shost_priv(host); amiga_a4000t_scsi_remove() 103 scsi_remove_host(host); amiga_a4000t_scsi_remove() 104 NCR_700_release(host); amiga_a4000t_scsi_remove() 106 free_irq(host->irq, host); amiga_a4000t_scsi_remove()
|
H A D | bvme6000_scsi.c | 40 struct Scsi_Host *host; bvme6000_probe() local 49 "Failed to allocate host data\n"); bvme6000_probe() 62 host = NCR_700_detect(&bvme6000_scsi_driver_template, hostdata, bvme6000_probe() 64 if (!host) { bvme6000_probe() 65 printk(KERN_ERR "bvme6000-scsi: No host detected; " bvme6000_probe() 69 host->base = BVME_NCR53C710_BASE; bvme6000_probe() 70 host->this_id = 7; bvme6000_probe() 71 host->irq = BVME_IRQ_SCSI; bvme6000_probe() 73 host)) { bvme6000_probe() 78 platform_set_drvdata(dev, host); bvme6000_probe() 79 scsi_scan_host(host); bvme6000_probe() 84 scsi_host_put(host); bvme6000_probe() 94 struct Scsi_Host *host = platform_get_drvdata(dev); bvme6000_device_remove() local 95 struct NCR_700_Host_Parameters *hostdata = shost_priv(host); bvme6000_device_remove() 97 scsi_remove_host(host); bvme6000_device_remove() 98 NCR_700_release(host); bvme6000_device_remove() 100 free_irq(host->irq, host); bvme6000_device_remove()
|
H A D | mvme16x_scsi.c | 39 struct Scsi_Host * host = NULL; mvme16x_probe() local 54 "Failed to allocate host data\n"); mvme16x_probe() 67 host = NCR_700_detect(&mvme16x_scsi_driver_template, hostdata, mvme16x_probe() 69 if (!host) { mvme16x_probe() 70 printk(KERN_ERR "mvme16x-scsi: No host detected; " mvme16x_probe() 74 host->this_id = 7; mvme16x_probe() 75 host->base = 0xfff47000UL; mvme16x_probe() 76 host->irq = MVME16x_IRQ_SCSI; mvme16x_probe() 77 if (request_irq(host->irq, NCR_700_intr, 0, "mvme16x-scsi", host)) { mvme16x_probe() 92 platform_set_drvdata(dev, host); mvme16x_probe() 93 scsi_scan_host(host); mvme16x_probe() 98 scsi_host_put(host); mvme16x_probe() 107 struct Scsi_Host *host = platform_get_drvdata(dev); mvme16x_device_remove() local 108 struct NCR_700_Host_Parameters *hostdata = shost_priv(host); mvme16x_device_remove() 118 scsi_remove_host(host); mvme16x_device_remove() 119 NCR_700_release(host); mvme16x_device_remove() 121 free_irq(host->irq, host); mvme16x_device_remove()
|
H A D | zorro7xx.c | 77 struct Scsi_Host *host; zorro7xx_init_one() local 99 printk(KERN_ERR "zorro7xx: Failed to allocate host data\n"); zorro7xx_init_one() 118 host = NCR_700_detect(&zorro7xx_scsi_driver_template, hostdata, zorro7xx_init_one() 120 if (!host) { zorro7xx_init_one() 121 printk(KERN_ERR "zorro7xx: No host detected; " zorro7xx_init_one() 126 host->this_id = 7; zorro7xx_init_one() 127 host->base = ioaddr; zorro7xx_init_one() 128 host->irq = IRQ_AMIGA_PORTS; zorro7xx_init_one() 130 if (request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "zorro7xx-scsi", zorro7xx_init_one() 131 host)) { zorro7xx_init_one() 136 zorro_set_drvdata(z, host); zorro7xx_init_one() 137 scsi_scan_host(host); zorro7xx_init_one() 142 scsi_host_put(host); zorro7xx_init_one() 155 struct Scsi_Host *host = zorro_get_drvdata(z); zorro7xx_remove_one() local 156 struct NCR_700_Host_Parameters *hostdata = shost_priv(host); zorro7xx_remove_one() 158 scsi_remove_host(host); zorro7xx_remove_one() 160 NCR_700_release(host); zorro7xx_remove_one() 162 free_irq(host->irq, host); zorro7xx_remove_one()
|
H A D | sni_53c710.c | 72 struct Scsi_Host *host; snirm710_probe() local 82 dev_printk(KERN_ERR, dev, "Failed to allocate host data\n"); snirm710_probe() 96 host = NCR_700_detect(&snirm710_template, hostdata, &dev->dev); snirm710_probe() 97 if (!host) snirm710_probe() 99 host->this_id = 7; snirm710_probe() 100 host->base = base; snirm710_probe() 101 host->irq = platform_get_irq(dev, 0); snirm710_probe() 102 if(request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "snirm710", host)) { snirm710_probe() 107 dev_set_drvdata(&dev->dev, host); snirm710_probe() 108 scsi_scan_host(host); snirm710_probe() 113 scsi_host_put(host); snirm710_probe() 122 struct Scsi_Host *host = dev_get_drvdata(&dev->dev); snirm710_driver_remove() local 124 (struct NCR_700_Host_Parameters *)host->hostdata[0]; snirm710_driver_remove() 126 scsi_remove_host(host); snirm710_driver_remove() 127 NCR_700_release(host); snirm710_driver_remove() 128 free_irq(host->irq, host); snirm710_driver_remove()
|
H A D | sgiwd93.c | 41 #define host_to_hostdata(host) ((struct ip22_hostdata *)((host)->hostdata)) 55 struct Scsi_Host * host = dev_id; sgiwd93_intr() local 58 spin_lock_irqsave(host->host_lock, flags); sgiwd93_intr() 59 wd33c93_intr(host); sgiwd93_intr() 60 spin_unlock_irqrestore(host->host_lock, flags); sgiwd93_intr() 105 struct ip22_hostdata *hdata = host_to_hostdata(cmd->device->host); dma_setup() 107 (struct hpc3_scsiregs *) cmd->device->host->base; dma_setup() 148 hregs = (struct hpc3_scsiregs *) SCpnt->device->host->base; dma_stop() 202 spin_lock_irq(cmd->device->host->host_lock); sgiwd93_bus_reset() 204 spin_unlock_irq(cmd->device->host->host_lock); sgiwd93_bus_reset() 235 struct Scsi_Host *host; sgiwd93_probe() local 241 host = scsi_host_alloc(&sgiwd93_template, sizeof(struct ip22_hostdata)); sgiwd93_probe() 242 if (!host) { sgiwd93_probe() 247 host->base = (unsigned long) hregs; sgiwd93_probe() 248 host->irq = irq; sgiwd93_probe() 250 hdata = host_to_hostdata(host); sgiwd93_probe() 256 "host %d buffer.\n", unit); sgiwd93_probe() 270 wd33c93_init(host, regs, dma_setup, dma_stop, WD33C93_FS_MHZ(20)); sgiwd93_probe() 272 err = request_irq(irq, sgiwd93_intr, 0, "SGI WD93", host); sgiwd93_probe() 275 "for host %d.\n", irq, unit); sgiwd93_probe() 279 platform_set_drvdata(pdev, host); sgiwd93_probe() 281 err = scsi_add_host(host, NULL); sgiwd93_probe() 285 scsi_scan_host(host); sgiwd93_probe() 290 free_irq(irq, host); sgiwd93_probe() 294 scsi_host_put(host); sgiwd93_probe() 302 struct Scsi_Host *host = platform_get_drvdata(pdev); sgiwd93_remove() local 303 struct ip22_hostdata *hdata = (struct ip22_hostdata *) host->hostdata; sgiwd93_remove() 306 scsi_remove_host(host); sgiwd93_remove() 307 free_irq(pd->irq, host); sgiwd93_remove() 309 scsi_host_put(host); sgiwd93_remove()
|
H A D | 53c700.c | 35 * The 700 chip has no host bus interface logic of its own. However, 173 STATIC void NCR_700_chip_setup(struct Scsi_Host *host); 174 STATIC void NCR_700_chip_reset(struct Scsi_Host *host); 278 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0]; NCR_700_get_SXFER() 292 struct Scsi_Host *host; NCR_700_detect() local 315 /* Fill in the missing routines from the host template */ NCR_700_detect() 335 host = scsi_host_alloc(tpnt, 4); NCR_700_detect() 336 if (!host) NCR_700_detect() 370 host->max_id = 8; NCR_700_detect() 371 host->max_lun = NCR_700_MAX_LUNS; NCR_700_detect() 373 host->transportt = NCR_700_transport_template; NCR_700_detect() 374 host->unique_id = (unsigned long)hostdata->base; NCR_700_detect() 376 host->hostdata[0] = (unsigned long)hostdata; NCR_700_detect() 378 NCR_700_writeb(0xff, host, CTEST9_REG); NCR_700_detect() 380 hostdata->rev = (NCR_700_readb(host, CTEST8_REG)>>4) & 0x0f; NCR_700_detect() 382 hostdata->rev = (NCR_700_readb(host, CTEST7_REG)>>4) & 0x0f; NCR_700_detect() 383 hostdata->fast = (NCR_700_readb(host, CTEST9_REG) == 0); NCR_700_detect() 388 printk(KERN_NOTICE "scsi%d: %s rev %d %s\n", host->host_no, NCR_700_detect() 394 NCR_700_chip_reset(host); NCR_700_detect() 396 if (scsi_add_host(host, dev)) { NCR_700_detect() 398 scsi_host_put(host); NCR_700_detect() 402 spi_signalling(host) = hostdata->differential ? SPI_SIGNAL_HVD : NCR_700_detect() 405 return host; NCR_700_detect() 409 NCR_700_release(struct Scsi_Host *host) NCR_700_release() argument 412 (struct NCR_700_Host_Parameters *)host->hostdata[0]; NCR_700_release() 428 * Function : static int data_residual (Scsi_Host *host) 436 * Inputs : host - SCSI host */ 438 NCR_700_data_residual (struct Scsi_Host *host) { NCR_700_data_residual() argument 440 (struct NCR_700_Host_Parameters *)host->hostdata[0]; NCR_700_data_residual() 445 count = ((NCR_700_readb(host, DFIFO_REG) & 0x7f) - NCR_700_data_residual() 446 (NCR_700_readl(host, DBC_REG) & 0x7f)) & 0x7f; NCR_700_data_residual() 448 count = ((NCR_700_readb(host, DFIFO_REG) & 0x3f) - NCR_700_data_residual() 449 (NCR_700_readl(host, DBC_REG) & 0x3f)) & 0x3f; NCR_700_data_residual() 453 synchronous = NCR_700_readb(host, SXFER_REG) & 0x0f; NCR_700_data_residual() 456 ddir = NCR_700_readb(host, CTEST0_REG) & 0x01; NCR_700_data_residual() 461 count += (NCR_700_readb(host, SSTAT2_REG) & 0xf0) >> 4; NCR_700_data_residual() 463 if (NCR_700_readb(host, SSTAT1_REG) & SIDL_REG_FULL) NCR_700_data_residual() 467 __u8 sstat = NCR_700_readb(host, SSTAT1_REG); NCR_700_data_residual() 632 NCR_700_internal_bus_reset(struct Scsi_Host *host) NCR_700_internal_bus_reset() argument 635 NCR_700_writeb(ASSERT_RST, host, SCNTL1_REG); NCR_700_internal_bus_reset() 637 NCR_700_writeb(0, host, SCNTL1_REG); NCR_700_internal_bus_reset() 642 NCR_700_chip_setup(struct Scsi_Host *host) NCR_700_chip_setup() argument 645 (struct NCR_700_Host_Parameters *)host->hostdata[0]; NCR_700_chip_setup() 672 NCR_700_writeb(hostdata->dcntl_extra, host, DCNTL_REG); NCR_700_chip_setup() 674 host, DMODE_710_REG); NCR_700_chip_setup() 677 host, CTEST7_REG); NCR_700_chip_setup() 678 NCR_700_writeb(BTB_TIMER_DISABLE, host, CTEST0_REG); NCR_700_chip_setup() 680 | AUTO_ATN, host, SCNTL0_REG); NCR_700_chip_setup() 683 host, DMODE_700_REG); NCR_700_chip_setup() 685 DIFF : 0, host, CTEST7_REG); NCR_700_chip_setup() 689 | GENERATE_RECEIVE_PARITY, host, NCR_700_chip_setup() 693 | PARITY | AUTO_ATN, host, SCNTL0_REG); NCR_700_chip_setup() 697 NCR_700_writeb(1 << host->this_id, host, SCID_REG); NCR_700_chip_setup() 698 NCR_700_writeb(0, host, SBCL_REG); NCR_700_chip_setup() 699 NCR_700_writeb(ASYNC_OPERATION, host, SXFER_REG); NCR_700_chip_setup() 702 | RST_INT | PAR_ERR_INT | SELECT_INT, host, SIEN_REG); NCR_700_chip_setup() 704 NCR_700_writeb(ABORT_INT | INT_INST_INT | ILGL_INST_INT, host, DIEN_REG); NCR_700_chip_setup() 705 NCR_700_writeb(ENABLE_SELECT, host, SCNTL1_REG); NCR_700_chip_setup() 711 NCR_700_writeb(SYNC_DIV_2_0, host, SBCL_REG); NCR_700_chip_setup() 712 NCR_700_writeb(ASYNC_DIV_3_0 | hostdata->dcntl_extra, host, DCNTL_REG); NCR_700_chip_setup() 717 NCR_700_writeb(SYNC_DIV_1_5, host, SBCL_REG); NCR_700_chip_setup() 718 NCR_700_writeb(ASYNC_DIV_3_0 | hostdata->dcntl_extra, host, DCNTL_REG); NCR_700_chip_setup() 725 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG); NCR_700_chip_setup() 726 NCR_700_writeb(ASYNC_DIV_2_0 | hostdata->dcntl_extra, host, DCNTL_REG); NCR_700_chip_setup() 731 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG); NCR_700_chip_setup() 732 NCR_700_writeb(ASYNC_DIV_1_5 | hostdata->dcntl_extra, host, DCNTL_REG); NCR_700_chip_setup() 736 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG); NCR_700_chip_setup() 737 NCR_700_writeb(ASYNC_DIV_1_0 | hostdata->dcntl_extra, host, DCNTL_REG); NCR_700_chip_setup() 752 NCR_700_chip_reset(struct Scsi_Host *host) NCR_700_chip_reset() argument 755 (struct NCR_700_Host_Parameters *)host->hostdata[0]; NCR_700_chip_reset() 757 NCR_700_writeb(SOFTWARE_RESET_710, host, ISTAT_REG); NCR_700_chip_reset() 760 NCR_700_writeb(0, host, ISTAT_REG); NCR_700_chip_reset() 762 NCR_700_writeb(SOFTWARE_RESET, host, DCNTL_REG); NCR_700_chip_reset() 765 NCR_700_writeb(0, host, DCNTL_REG); NCR_700_chip_reset() 770 NCR_700_chip_setup(host); NCR_700_chip_reset() 780 process_extended_message(struct Scsi_Host *host, process_extended_message() argument 816 host, SXFER_REG); process_extended_message() 820 shost_printk(KERN_WARNING, host, process_extended_message() 834 host->host_no, pun, lun); process_extended_message() 845 host->host_no, pun, lun, process_extended_message() 858 NCR_700_writel(temp, host, TEMP_REG); process_extended_message() 863 process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata, process_message() argument 876 printk("scsi%d (%d:%d): message %s: ", host->host_no, pun, lun, process_message() 885 resume_offset = process_extended_message(host, hostdata, SCp, process_message() 906 scsi_change_queue_depth(SCp->device, host->cmd_per_lun); process_message() 908 shost_printk(KERN_WARNING, host, process_message() 917 printk(KERN_ERR "scsi%d (%d:%d) Parity Error!\n", host->host_no, process_message() 919 NCR_700_internal_bus_reset(host); process_message() 922 printk(KERN_INFO "scsi%d (%d:%d) SIMPLE TAG %d %s\n", host->host_no, process_message() 929 host->host_no, pun, lun, process_message() 945 NCR_700_writel(temp, host, TEMP_REG); process_message() 953 struct Scsi_Host *host, process_script_interrupt() 1065 sbcl_to_string(NCR_700_readb(host, SBCL_REG))); process_script_interrupt() 1070 NCR_700_internal_bus_reset(host); process_script_interrupt() 1075 host->host_no, pun, lun, NCR_700_fatal_messages[i]); process_script_interrupt() 1080 NCR_700_internal_bus_reset(host); process_script_interrupt() 1086 host->host_no, pun, lun, process_script_interrupt() 1101 host->host_no, reselection_id, lun)); process_script_interrupt() 1103 SDp = __scsi_device_lookup(host, 0, reselection_id, lun); process_script_interrupt() 1106 host->host_no, reselection_id, lun); process_script_interrupt() 1113 host->host_no, reselection_id, lun, hostdata->msgin[2]); process_script_interrupt() 1133 host->host_no, reselection_id, lun, process_script_interrupt() 1138 printk(KERN_ERR "scsi%d: FATAL, host not busy during valid reselection!\n", process_script_interrupt() 1139 host->host_no); process_script_interrupt() 1158 host, SXFER_REG); process_script_interrupt() 1180 __u8 reselection_id = NCR_700_readb(host, SFBR_REG); process_script_interrupt() 1184 reselection_id &= ~(1<<host->this_id); process_script_interrupt() 1189 host->host_no, reselection_id, lun, dsp, dsp - hostdata->pScript, hostdata->state, hostdata->command_slot_count); process_script_interrupt() 1214 printk(KERN_ERR "scsi%d: Invalid reselection during selection!!\n", host->host_no); process_script_interrupt() 1218 host->host_no); process_script_interrupt() 1242 resume_offset = process_message(host, hostdata, SCp, process_script_interrupt() 1247 host->host_no, pun, lun, NCR_700_condition[i], process_script_interrupt() 1257 NCR_700_internal_bus_reset(host); 1260 host->host_no, pun, lun, dsps & 0xfff, dsp, dsp - hostdata->pScript); 1264 host->host_no, pun, lun, dsps, dsp - hostdata->pScript); 1265 NCR_700_internal_bus_reset(host); 1278 process_selection(struct Scsi_Host *host, __u32 dsp) process_selection() argument 1284 (struct NCR_700_Host_Parameters *)host->hostdata[0]; process_selection() 1289 id = NCR_700_readb(host, hostdata->chip710 ? process_selection() 1293 id &= ~(1<<host->this_id); process_selection() 1298 sbcl = NCR_700_readb(host, SBCL_REG); process_selection() 1306 host->host_no, id)); process_selection() 1332 process_script_interrupt(A_GOOD_STATUS_AFTER_STATUS, dsp, SCp, host, hostdata); process_selection() 1359 NCR_700_clear_fifo(struct Scsi_Host *host) { NCR_700_clear_fifo() argument 1361 = (struct NCR_700_Host_Parameters *)host->hostdata[0]; NCR_700_clear_fifo() 1363 NCR_700_writeb(CLR_FIFO_710, host, CTEST8_REG); NCR_700_clear_fifo() 1365 NCR_700_writeb(CLR_FIFO, host, DFIFO_REG); NCR_700_clear_fifo() 1370 NCR_700_flush_fifo(struct Scsi_Host *host) { NCR_700_flush_fifo() argument 1372 = (struct NCR_700_Host_Parameters *)host->hostdata[0]; NCR_700_flush_fifo() 1374 NCR_700_writeb(FLUSH_DMA_FIFO_710, host, CTEST8_REG); NCR_700_flush_fifo() 1376 NCR_700_writeb(0, host, CTEST8_REG); NCR_700_flush_fifo() 1378 NCR_700_writeb(FLUSH_DMA_FIFO, host, DFIFO_REG); NCR_700_flush_fifo() 1380 NCR_700_writeb(0, host, DFIFO_REG); NCR_700_flush_fifo() 1393 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0]; NCR_700_start_command() 1403 DEBUG(("scsi%d: host busy, queueing command %p, slot %p\n", NCR_700_start_command() 1404 SCp->device->host->host_no, slot->cmnd, slot)); NCR_700_start_command() 1456 NCR_700_clear_fifo(SCp->device->host); NCR_700_start_command() 1469 SCp->device->host, SXFER_REG); NCR_700_start_command() 1470 NCR_700_writel(slot->temp, SCp->device->host, TEMP_REG); NCR_700_start_command() 1471 NCR_700_writel(slot->resume_offset, SCp->device->host, DSP_REG); NCR_700_start_command() 1479 struct Scsi_Host *host = (struct Scsi_Host *)dev_id; NCR_700_intr() local 1481 (struct NCR_700_Host_Parameters *)host->hostdata[0]; NCR_700_intr() 1488 /* Use the host lock to serialise access to the 53c700 NCR_700_intr() 1491 * need to ensure that for this driver, the host lock and the NCR_700_intr() 1493 spin_lock_irqsave(host->host_lock, flags); NCR_700_intr() 1494 if((istat = NCR_700_readb(host, ISTAT_REG)) NCR_700_intr() 1509 sstat0 = NCR_700_readb(host, SSTAT0_REG); NCR_700_intr() 1515 dstat = NCR_700_readb(host, DSTAT_REG); NCR_700_intr() 1518 dsps = NCR_700_readl(host, DSPS_REG); NCR_700_intr() 1519 dsp = NCR_700_readl(host, DSP_REG); NCR_700_intr() 1522 host->host_no, istat, sstat0, dstat, NCR_700_intr() 1538 host->host_no, SCp, SCp == NULL ? NULL : SCp->host_scribble, dsp, dsp - hostdata->pScript); NCR_700_intr() 1540 scsi_report_bus_reset(host, 0); NCR_700_intr() 1543 __shost_for_each_device(SDp, host) __shost_for_each_device() 1571 NCR_700_chip_setup(host); 1581 host->host_no, pun, lun)); 1591 __u32 temp = NCR_700_readl(host, TEMP_REG); 1592 int count = (hostdata->script[Ent_SendMessage/4] & 0xffffff) - ((NCR_700_readl(host, DBC_REG) & 0xffffff) + NCR_700_data_residual(host)); 1593 printk("scsi%d (%d:%d) PHASE MISMATCH IN SEND MESSAGE %d remain, return %p[%04x], phase %s\n", host->host_no, pun, lun, count, (void *)temp, temp - hostdata->pScript, sbcl_to_string(NCR_700_readb(host, SBCL_REG))); 1598 int data_transfer = NCR_700_readl(host, DBC_REG) & 0xffffff; 1600 int residual = NCR_700_data_residual(host); 1603 __u32 naddr = NCR_700_readl(host, DNAD_REG); 1606 host->host_no, pun, lun, 1611 host->host_no, pun, lun, 1631 printk("scsi%d (%d:%d) transfer mismatch pAddr=%lx, naddr=%lx, data_transfer=%d, residual=%d\n", host->host_no, pun, lun, (unsigned long)pAddr, (unsigned long)naddr, data_transfer, residual); 1646 NCR_700_flush_fifo(host); 1648 __u8 sbcl = NCR_700_readb(host, SBCL_REG); 1650 host->host_no, pun, lun, dsp - hostdata->pScript, sbcl_to_string(sbcl)); 1651 NCR_700_internal_bus_reset(host); 1656 host->host_no, pun, lun); 1660 host->host_no, pun, lun); 1664 host->host_no, pun, lun)); 1665 resume_offset = process_script_interrupt(dsps, dsp, SCp, host, hostdata); 1669 host->host_no, pun, lun, 1674 host->host_no, pun, lun, dstat); 1700 resume_offset = process_selection(host, dsp); 1708 printk(KERN_ERR "scsi%d: Driver error: resume at 0x%08x [0x%04x] with non busy host!\n", 1709 host->host_no, resume_offset, resume_offset - hostdata->pScript); 1714 NCR_700_clear_fifo(host); 1715 NCR_700_writel(resume_offset, host, DSP_REG); 1735 host->host_no, &hostdata->slots[j], 1744 spin_unlock_irqrestore(host->host_lock, flags); 1752 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0]; NCR_700_queuecommand_lck() 1760 printk(KERN_WARNING "scsi%d: Command depth has gone over queue depth\n", SCp->device->host->host_no); NCR_700_queuecommand_lck() 1796 printk("53c700: scsi%d, command ", SCp->device->host->host_no); NCR_700_queuecommand_lck() 1928 NCR_700_internal_bus_reset(SCp->device->host); DEF_SCSI_QCMD() 1940 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0]; NCR_700_bus_reset() 1949 spin_lock_irq(SCp->device->host->host_lock); NCR_700_bus_reset() 1951 spin_unlock_irq(SCp->device->host->host_lock); NCR_700_bus_reset() 1953 spin_lock_irq(SCp->device->host->host_lock); NCR_700_bus_reset() 1957 NCR_700_internal_bus_reset(SCp->device->host); NCR_700_bus_reset() 1959 spin_unlock_irq(SCp->device->host->host_lock); NCR_700_bus_reset() 1961 spin_lock_irq(SCp->device->host->host_lock); NCR_700_bus_reset() 1968 spin_unlock_irq(SCp->device->host->host_lock); NCR_700_bus_reset() 1978 spin_lock_irq(SCp->device->host->host_lock); NCR_700_host_reset() 1980 NCR_700_internal_bus_reset(SCp->device->host); NCR_700_host_reset() 1981 NCR_700_chip_reset(SCp->device->host); NCR_700_host_reset() 1983 spin_unlock_irq(SCp->device->host->host_lock); NCR_700_host_reset() 2049 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0]; NCR_700_slave_configure() 952 process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp, struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata) process_script_interrupt() argument
|
H A D | aha1740.c | 86 #define HOSTDATA(host) ((struct aha1740_hostdata *) &host->hostdata) 88 static inline struct ecb *ecb_dma_to_cpu (struct Scsi_Host *host, ecb_dma_to_cpu() argument 91 struct aha1740_hostdata *hdata = HOSTDATA (host); ecb_dma_to_cpu() 99 static inline dma_addr_t ecb_cpu_to_dma (struct Scsi_Host *host, void *cpu) ecb_cpu_to_dma() argument 101 struct aha1740_hostdata *hdata = HOSTDATA (host); ecb_cpu_to_dma() 111 struct aha1740_hostdata *host = HOSTDATA(shpnt); aha1740_show_info() local 114 shpnt->io_port, shpnt->irq, host->edev->slot, aha1740_show_info() 115 host->translation ? "en" : "dis"); aha1740_show_info() 209 struct Scsi_Host *host = (struct Scsi_Host *) dev_id; aha1740_intr_handle() local 221 if (!host) aha1740_intr_handle() 222 panic("aha1740.c: Irq from unknown host!\n"); aha1740_intr_handle() 223 spin_lock_irqsave(host->host_lock, flags); aha1740_intr_handle() 224 base = host->io_port; aha1740_intr_handle() 226 edev = HOSTDATA(host)->edev; aha1740_intr_handle() 232 ecbptr = ecb_dma_to_cpu (host, inl(MBOXIN0(base))); aha1740_intr_handle() 310 spin_unlock_irqrestore(host->host_lock, flags); aha1740_intr_handle() 319 struct aha1740_hostdata *host = HOSTDATA(SCpnt->device->host); aha1740_queuecommand_lck() local 347 spin_lock_irqsave(SCpnt->device->host->host_lock, flags); aha1740_queuecommand_lck() 348 ecbno = host->last_ecb_used + 1; /* An optimization */ aha1740_queuecommand_lck() 352 if (!host->ecb[ecbno].cmdw) aha1740_queuecommand_lck() 357 } while (ecbno != host->last_ecb_used); aha1740_queuecommand_lck() 359 if (host->ecb[ecbno].cmdw) aha1740_queuecommand_lck() 362 host->ecb[ecbno].cmdw = AHA1740CMD_INIT; /* SCSI Initiator Command aha1740_queuecommand_lck() 365 host->last_ecb_used = ecbno; aha1740_queuecommand_lck() 366 spin_unlock_irqrestore(SCpnt->device->host->host_lock, flags); aha1740_queuecommand_lck() 372 host->ecb[ecbno].cdblen = SCpnt->cmd_len; /* SCSI Command aha1740_queuecommand_lck() 382 memcpy(host->ecb[ecbno].cdb, cmd, SCpnt->cmd_len); aha1740_queuecommand_lck() 384 SCpnt->host_scribble = dma_alloc_coherent (&host->edev->dev, aha1740_queuecommand_lck() 402 host->ecb[ecbno].sg = 1; /* SCSI Initiator Command aha1740_queuecommand_lck() 409 host->ecb[ecbno].datalen = nseg * sizeof(struct aha1740_chain); 410 host->ecb[ecbno].dataptr = sg_dma; 417 host->ecb[ecbno].datalen = 0; 418 host->ecb[ecbno].dataptr = 0; 420 host->ecb[ecbno].lun = SCpnt->device->lun; 421 host->ecb[ecbno].ses = 1; /* Suppress underrun errors */ 422 host->ecb[ecbno].dir = direction; 423 host->ecb[ecbno].ars = 1; /* Yes, get the sense on an error */ 424 host->ecb[ecbno].senselen = 12; 425 host->ecb[ecbno].senseptr = ecb_cpu_to_dma (SCpnt->device->host, 426 host->ecb[ecbno].sense); 427 host->ecb[ecbno].statusptr = ecb_cpu_to_dma (SCpnt->device->host, 428 host->ecb[ecbno].status); 429 host->ecb[ecbno].done = done; 430 host->ecb[ecbno].SCpnt = SCpnt; 435 for (i = 0; i < sizeof(host->ecb[ecbno]) - 10; i++) 436 printk("%02x ", ((unchar *)&host->ecb[ecbno])[i]); 456 unsigned int base = SCpnt->device->host->io_port; 459 spin_lock_irqsave(SCpnt->device->host->host_lock, flags); 468 outl (ecb_cpu_to_dma (SCpnt->device->host, host->ecb + ecbno), 479 spin_unlock_irqrestore(SCpnt->device->host->host_lock, flags); 508 int extended = HOSTDATA(sdev->host)->translation; aha1740_biosparam() 557 struct aha1740_hostdata *host; aha1740_probe() local 588 host = HOSTDATA(shpnt); aha1740_probe() 589 host->edev = edev; aha1740_probe() 590 host->translation = translation; aha1740_probe() 591 host->ecb_dma_addr = dma_map_single (&edev->dev, host->ecb, aha1740_probe() 592 sizeof (host->ecb), aha1740_probe() 594 if (!host->ecb_dma_addr) { aha1740_probe() 620 dma_unmap_single (&edev->dev, host->ecb_dma_addr, aha1740_probe() 621 sizeof (host->ecb), DMA_BIDIRECTIONAL); aha1740_probe() 633 struct aha1740_hostdata *host = HOSTDATA (shpnt); aha1740_remove() local 638 dma_unmap_single (dev, host->ecb_dma_addr, aha1740_remove() 639 sizeof (host->ecb), DMA_BIDIRECTIONAL); aha1740_remove()
|
H A D | lasi700.c | 103 struct Scsi_Host *host; lasi700_probe() local 107 dev_printk(KERN_ERR, &dev->dev, "Failed to allocate host data\n"); lasi700_probe() 127 host = NCR_700_detect(&lasi700_template, hostdata, &dev->dev); lasi700_probe() 128 if (!host) lasi700_probe() 130 host->this_id = 7; lasi700_probe() 131 host->base = base; lasi700_probe() 132 host->irq = dev->irq; lasi700_probe() 133 if(request_irq(dev->irq, NCR_700_intr, IRQF_SHARED, "lasi700", host)) { lasi700_probe() 138 dev_set_drvdata(&dev->dev, host); lasi700_probe() 139 scsi_scan_host(host); lasi700_probe() 144 scsi_host_put(host); lasi700_probe() 154 struct Scsi_Host *host = dev_get_drvdata(&dev->dev); lasi700_driver_remove() local 156 (struct NCR_700_Host_Parameters *)host->hostdata[0]; lasi700_driver_remove() 158 scsi_remove_host(host); lasi700_driver_remove() 159 NCR_700_release(host); lasi700_driver_remove() 160 free_irq(host->irq, host); lasi700_driver_remove()
|
H A D | jazz_esp.c | 135 struct Scsi_Host *host; esp_jazz_probe() local 140 host = scsi_host_alloc(tpnt, sizeof(struct esp)); esp_jazz_probe() 143 if (!host) esp_jazz_probe() 146 host->max_id = 8; esp_jazz_probe() 147 esp = shost_priv(host); esp_jazz_probe() 149 esp->host = host; esp_jazz_probe() 173 host->irq = platform_get_irq(dev, 0); esp_jazz_probe() 174 err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp); esp_jazz_probe() 179 esp->host->this_id = esp->scsi_id; esp_jazz_probe() 192 free_irq(host->irq, esp); esp_jazz_probe() 199 scsi_host_put(host); esp_jazz_probe() 207 unsigned int irq = esp->host->irq; esp_jazz_remove() 216 scsi_host_put(esp->host); esp_jazz_remove()
|
H A D | zalon.c | 94 struct Scsi_Host *host; zalon_probe() local 135 host = ncr_attach(&zalon7xx_template, unit, &device); zalon_probe() 136 if (!host) zalon_probe() 139 if (request_irq(dev->irq, ncr53c8xx_intr, IRQF_SHARED, "zalon", host)) { zalon_probe() 147 dev_set_drvdata(&dev->dev, host); zalon_probe() 149 error = scsi_add_host(host, &dev->dev); zalon_probe() 153 scsi_scan_host(host); zalon_probe() 157 free_irq(dev->irq, host); zalon_probe() 159 ncr53c8xx_release(host); zalon_probe() 172 struct Scsi_Host *host = dev_get_drvdata(&dev->dev); zalon_remove() local 174 scsi_remove_host(host); zalon_remove() 175 ncr53c8xx_release(host); zalon_remove() 176 free_irq(dev->irq, host); zalon_remove()
|
H A D | wd7000.c | 14 * Rewritten to support multiple host adapters. 74 * visible to the host CPU is defined effectively by the Z80's 77 * on-board BIOS is of no interest whatsoever.) The host CPU has 147 * use host->host_lock, not io_request_lock, cleanups 159 * Make host reset actually reset the card 164 * Cleaned up host template definition 202 * more commands, while ICMBs are used by the host adapter per command. 419 * - mb and scbs are required for interfacing with the host adapter. 424 * - An icb is for host-only (non-SCSI) commands. ICBs are 16 bytes each; 467 Adapter *host; /* host adapter */ member in struct:scb 472 * This driver is written to allow host-only commands to be executed. 596 * The SCBs declared here are shared by all host adapters; hence, this 738 static inline void wd7000_enable_intr(Adapter * host) wd7000_enable_intr() argument 740 host->control |= INT_EN; wd7000_enable_intr() 741 outb(host->control, host->iobase + ASC_CONTROL); wd7000_enable_intr() 745 static inline void wd7000_enable_dma(Adapter * host) wd7000_enable_dma() argument 748 host->control |= DMA_EN; wd7000_enable_dma() 749 outb(host->control, host->iobase + ASC_CONTROL); wd7000_enable_dma() 752 set_dma_mode(host->dma, DMA_MODE_CASCADE); wd7000_enable_dma() 753 enable_dma(host->dma); wd7000_enable_dma() 777 static inline int command_out(Adapter * host, unchar * cmd, int len) command_out() argument 779 if (!WAIT(host->iobase + ASC_STAT, ASC_STATMASK, CMD_RDY, 0)) { command_out() 782 outb(*cmd, host->iobase + ASC_COMMAND); command_out() 783 WAIT(host->iobase + ASC_STAT, ASC_STATMASK, CMD_RDY, 0); command_out() 784 } while (inb(host->iobase + ASC_STAT) & CMD_REJ); command_out() 808 static inline Scb *alloc_scbs(struct Scsi_Host *host, int needed) alloc_scbs() argument 819 spin_unlock_irq(host->host_lock); alloc_scbs() 856 spin_lock_irq(host->host_lock); alloc_scbs() 895 static int mail_out(Adapter * host, Scb * scbptr) mail_out() argument 903 Mailbox *ogmbs = host->mb.ogmb; mail_out() 904 int *next_ogmb = &(host->next_ogmb); mail_out() 909 spin_lock_irqsave(host->sh->host_lock, flags); mail_out() 922 spin_unlock_irqrestore(host->sh->host_lock, flags); mail_out() 939 wd7000_enable_intr(host); mail_out() 942 command_out(host, &start_ogmb, 1); mail_out() 972 case 6: /* Unexpected Command Received w/ host as target */ make_code() 991 dprintk("\nSCSI command error: SCSI 0x%02x host 0x%04x return %d\n", scsierr, in_error, hosterr); make_code() 996 #define wd7000_intr_ack(host) outb (0, host->iobase + ASC_INTR_ACK) 1001 Adapter *host = (Adapter *) dev_id; wd7000_intr() local 1005 IcbAny *icb; /* for host commands */ wd7000_intr() 1007 Mailbox *icmbs = host->mb.icmb; wd7000_intr() 1010 spin_lock_irqsave(host->sh->host_lock, flags); wd7000_intr() 1011 host->int_counter++; wd7000_intr() 1013 dprintk("wd7000_intr: irq = %d, host = 0x%06lx\n", irq, (long) host); wd7000_intr() 1015 flag = inb(host->iobase + ASC_INTR_STAT); wd7000_intr() 1019 if (!(inb(host->iobase + ASC_STAT) & INT_IM)) { wd7000_intr() 1023 * comes out as 7 from the 8259, which is 15 to the host. Thus, it wd7000_intr() 1078 wd7000_intr_ack(host); wd7000_intr() 1080 spin_unlock_irqrestore(host->sh->host_lock, flags); wd7000_intr() 1093 Adapter *host = (Adapter *) SCpnt->device->host->hostdata; wd7000_queuecommand_lck() local 1099 scb = alloc_scbs(SCpnt->device->host, 1); wd7000_queuecommand_lck() 1106 scb->host = host; wd7000_queuecommand_lck() 1135 while (!mail_out(host, scb)) 1143 static int wd7000_diagnostics(Adapter * host, int code) wd7000_diagnostics() argument 1158 mail_out(host, (struct scb *) &icb); wd7000_diagnostics() 1178 static int wd7000_adapter_reset(Adapter * host) wd7000_adapter_reset() argument 1183 host->bus_on, wd7000_adapter_reset() 1184 host->bus_off, wd7000_adapter_reset() 1195 outb(ASC_RES, host->iobase + ASC_CONTROL); wd7000_adapter_reset() 1197 outb(0, host->iobase + ASC_CONTROL); wd7000_adapter_reset() 1198 host->control = 0; /* this must always shadow ASC_CONTROL */ wd7000_adapter_reset() 1200 if (WAIT(host->iobase + ASC_STAT, ASC_STATMASK, CMD_RDY, 0)) { wd7000_adapter_reset() 1205 if ((diag = inb(host->iobase + ASC_INTR_STAT)) != 1) { wd7000_adapter_reset() 1233 memset(&(host->mb), 0, sizeof(host->mb)); wd7000_adapter_reset() 1236 any2scsi((unchar *) & (init_cmd.mailboxes), (int) &(host->mb)); wd7000_adapter_reset() 1237 if (!command_out(host, (unchar *) & init_cmd, sizeof(init_cmd))) { wd7000_adapter_reset() 1242 if (WAIT(host->iobase + ASC_STAT, ASC_STATMASK, ASC_INIT, 0)) { wd7000_adapter_reset() 1249 static int wd7000_init(Adapter * host) wd7000_init() argument 1251 if (wd7000_adapter_reset(host) == -1) wd7000_init() 1255 if (request_irq(host->irq, wd7000_intr, 0, "wd7000", host)) { wd7000_init() 1256 printk("wd7000_init: can't get IRQ %d.\n", host->irq); wd7000_init() 1259 if (request_dma(host->dma, "wd7000")) { wd7000_init() 1260 printk("wd7000_init: can't get DMA channel %d.\n", host->dma); wd7000_init() 1261 free_irq(host->irq, host); wd7000_init() 1264 wd7000_enable_dma(host); wd7000_init() 1265 wd7000_enable_intr(host); wd7000_init() 1267 if (!wd7000_diagnostics(host, ICB_DIAG_FULL)) { wd7000_init() 1268 free_dma(host->dma); wd7000_init() 1269 free_irq(host->irq, NULL); wd7000_init() 1277 static void wd7000_revision(Adapter * host) wd7000_revision() argument 1288 mail_out(host, (struct scb *) &icb); wd7000_revision() 1293 host->rev1 = icb.primary; wd7000_revision() 1294 host->rev2 = icb.secondary; wd7000_revision() 1298 static int wd7000_set_info(struct Scsi_Host *host, char *buffer, int length) wd7000_set_info() argument 1310 static int wd7000_show_info(struct seq_file *m, struct Scsi_Host *host) wd7000_show_info() argument 1312 Adapter *adapter = (Adapter *)host->hostdata; wd7000_show_info() 1319 spin_lock_irqsave(host->host_lock, flags); wd7000_show_info() 1320 seq_printf(m, "Host scsi%d: Western Digital WD-7000 (rev %d.%d)\n", host->host_no, adapter->rev1, adapter->rev2); wd7000_show_info() 1359 spin_unlock_irqrestore(host->host_lock, flags); wd7000_show_info() 1381 Adapter *host = NULL; wd7000_detect() local 1471 * which we'll use as the Adapter structure (host) for wd7000_detect() 1480 host = (Adapter *) sh->hostdata; wd7000_detect() 1482 dprintk("wd7000_detect: adapter allocated at 0x%x\n", (int) host); wd7000_detect() 1483 memset(host, 0, sizeof(Adapter)); wd7000_detect() 1485 host->irq = configs[pass].irq; wd7000_detect() 1486 host->dma = configs[pass].dma; wd7000_detect() 1487 host->iobase = iobase; wd7000_detect() 1488 host->int_counter = 0; wd7000_detect() 1489 host->bus_on = configs[pass].bus_on; wd7000_detect() 1490 host->bus_off = configs[pass].bus_off; wd7000_detect() 1491 host->sh = wd7000_host[unit] = sh; wd7000_detect() 1494 dprintk("wd7000_detect: Trying init WD-7000 card at IO " "0x%x, IRQ %d, DMA %d...\n", host->iobase, host->irq, host->dma); wd7000_detect() 1496 if (!wd7000_init(host)) /* Initialization failed */ wd7000_detect() 1502 wd7000_revision(host); /* important for scatter/gather */ wd7000_detect() 1507 if (host->rev1 < 6) wd7000_detect() 1515 printk(KERN_INFO "Western Digital WD-7000 (rev %d.%d) ", host->rev1, host->rev2); wd7000_detect() 1516 printk("using IO 0x%x, IRQ %d, DMA %d.\n", host->iobase, host->irq, host->dma); wd7000_detect() 1517 printk(" BUS_ON time: %dns, BUS_OFF time: %dns\n", host->bus_on * 125, host->bus_off * 125); wd7000_detect() 1553 Adapter *host = (Adapter *) SCpnt->device->host->hostdata; 1555 if (inb(host->iobase + ASC_STAT) & INT_IM) { 1557 wd7000_intr_handle(host->irq, NULL, NULL); 1570 Adapter *host = (Adapter *) SCpnt->device->host->hostdata; wd7000_host_reset() local 1572 spin_lock_irq(SCpnt->device->host->host_lock); wd7000_host_reset() 1574 if (wd7000_adapter_reset(host) < 0) { wd7000_host_reset() 1575 spin_unlock_irq(SCpnt->device->host->host_lock); wd7000_host_reset() 1579 wd7000_enable_intr(host); wd7000_host_reset() 1581 spin_unlock_irq(SCpnt->device->host->host_lock); wd7000_host_reset()
|
H A D | sim710.c | 101 struct Scsi_Host * host = NULL; sim710_probe_common() local 110 printk(KERN_ERR "sim710: Failed to allocate host data\n"); sim710_probe_common() 128 if((host = NCR_700_detect(&sim710_driver_template, hostdata, dev)) sim710_probe_common() 130 printk(KERN_ERR "sim710: No host detected; card configuration problem?\n"); sim710_probe_common() 133 host->this_id = scsi_id; sim710_probe_common() 134 host->base = base_addr; sim710_probe_common() 135 host->irq = irq; sim710_probe_common() 136 if (request_irq(irq, NCR_700_intr, IRQF_SHARED, "sim710", host)) { sim710_probe_common() 141 dev_set_drvdata(dev, host); sim710_probe_common() 142 scsi_scan_host(host); sim710_probe_common() 147 scsi_host_put(host); sim710_probe_common() 158 struct Scsi_Host *host = dev_get_drvdata(dev); sim710_device_remove() local 160 (struct NCR_700_Host_Parameters *)host->hostdata[0]; sim710_device_remove() 162 scsi_remove_host(host); sim710_device_remove() 163 NCR_700_release(host); sim710_device_remove() 165 free_irq(host->irq, host); sim710_device_remove() 166 release_region(host->base, 64); sim710_device_remove()
|
H A D | scsi_ioctl.c | 34 * ioctl_probe -- return host identification 35 * @host: host to identify 41 static int ioctl_probe(struct Scsi_Host *host, void __user *buffer) ioctl_probe() argument 50 if (host->hostt->info) ioctl_probe() 51 string = host->hostt->info(host); ioctl_probe() 53 string = host->hostt->name; ioctl_probe() 67 * The SCSI_IOCTL_SEND_COMMAND ioctl sends a command out to the SCSI host. 174 struct device *dev = scsi_get_device(sdev->host); scsi_ioctl_get_pci() 227 + ((sdev->host->host_no & 0xff) << 24), scsi_ioctl() 229 __put_user(sdev->host->unique_id, scsi_ioctl() 233 return put_user(sdev->host->host_no, (int __user *)arg); scsi_ioctl() 235 return ioctl_probe(sdev->host, arg); scsi_ioctl() 266 if (sdev->host->hostt->ioctl) scsi_ioctl() 267 return sdev->host->hostt->ioctl(sdev, cmd, arg); scsi_ioctl() 280 if (scsi_host_in_recovery(sdev->host)) scsi_ioctl_block_when_processing_errors()
|
H A D | sun3x_esp.c | 122 esp->host->unique_id); sun3x_esp_dma_drain() 138 "invalidate!\n", esp->host->unique_id); sun3x_esp_dma_invalidate() 200 struct Scsi_Host *host; esp_sun3x_probe() local 205 host = scsi_host_alloc(tpnt, sizeof(struct esp)); esp_sun3x_probe() 206 if (!host) esp_sun3x_probe() 209 host->max_id = 8; esp_sun3x_probe() 210 esp = shost_priv(host); esp_sun3x_probe() 212 esp->host = host; esp_sun3x_probe() 236 host->irq = platform_get_irq(dev, 0); esp_sun3x_probe() 237 err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, esp_sun3x_probe() 243 esp->host->this_id = esp->scsi_id; esp_sun3x_probe() 256 free_irq(host->irq, esp); esp_sun3x_probe() 266 scsi_host_put(host); esp_sun3x_probe() 274 unsigned int irq = esp->host->irq; esp_sun3x_remove() 288 scsi_host_put(esp->host); esp_sun3x_remove()
|
H A D | xen-scsifront.c | 103 struct Scsi_Host *host; member in struct:vscsifrnt_info 117 /* Following items are protected by the host lock. */ 214 shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME scsifront_gnttab_done() 273 shost_printk(KERN_ERR, info->host, KBUILD_MODNAME scsifront_sync_cmd_done() 325 spin_lock_irqsave(info->host->host_lock, flags); scsifront_cmd_done() 331 spin_unlock_irqrestore(info->host->host_lock, flags); scsifront_cmd_done() 391 if (data_grants > info->host->sg_tablesize) { map_data_for_request() 392 shost_printk(KERN_ERR, info->host, KBUILD_MODNAME map_data_for_request() 408 shost_printk(KERN_ERR, info->host, KBUILD_MODNAME map_data_for_request() 598 struct Scsi_Host *host = sc->device->host; scsifront_action_handler() local 599 struct vscsifrnt_info *info = shost_priv(host); scsifront_action_handler() 608 spin_lock_irq(host->host_lock); scsifront_action_handler() 617 spin_unlock_irq(host->host_lock); scsifront_action_handler() 622 spin_unlock_irq(host->host_lock); scsifront_action_handler() 625 spin_lock_irq(host->host_lock); scsifront_action_handler() 629 spin_unlock_irq(host->host_lock); scsifront_action_handler() 644 spin_unlock_irq(host->host_lock); scsifront_action_handler() 646 spin_lock_irq(host->host_lock); scsifront_action_handler() 660 spin_unlock_irq(host->host_lock); scsifront_action_handler() 678 struct vscsifrnt_info *info = shost_priv(sdev->host); scsifront_sdev_configure() 689 struct vscsifrnt_info *info = shost_priv(sdev->host); scsifront_sdev_destroy() 835 struct Scsi_Host *host; scsifront_probe() local 839 host = scsi_host_alloc(&scsifront_sht, sizeof(*info)); scsifront_probe() 840 if (!host) { scsifront_probe() 841 xenbus_dev_fatal(dev, err, "fail to allocate scsi host"); scsifront_probe() 844 info = (struct vscsifrnt_info *)host->hostdata; scsifront_probe() 853 scsi_host_put(host); scsifront_probe() 861 snprintf(name, TASK_COMM_LEN, "vscsiif.%d", host->host_no); scsifront_probe() 863 host->max_id = VSCSIIF_MAX_TARGET; scsifront_probe() 864 host->max_channel = 0; scsifront_probe() 865 host->max_lun = VSCSIIF_MAX_LUN; scsifront_probe() 866 host->max_sectors = (host->sg_tablesize - 1) * PAGE_SIZE / 512; scsifront_probe() 867 host->max_cmd_len = VSCSIIF_MAX_COMMAND_SIZE; scsifront_probe() 869 err = scsi_add_host(host, &dev->dev); scsifront_probe() 871 dev_err(&dev->dev, "fail to add scsi host %d\n", err); scsifront_probe() 874 info->host = host; scsifront_probe() 883 scsi_host_put(host); scsifront_probe() 890 struct Scsi_Host *host = info->host; scsifront_resume() local 893 spin_lock_irq(host->host_lock); scsifront_resume() 898 spin_unlock_irq(host->host_lock); scsifront_resume() 905 scsi_host_put(host); scsifront_resume() 917 struct Scsi_Host *host = info->host; scsifront_suspend() local 921 spin_lock_irq(host->host_lock); scsifront_suspend() 926 spin_unlock_irq(host->host_lock); scsifront_suspend() 930 spin_lock_irq(host->host_lock); scsifront_suspend() 932 spin_unlock_irq(host->host_lock); scsifront_suspend() 945 scsi_remove_host(info->host); scsifront_remove() 951 scsi_host_put(info->host); scsifront_remove() 959 struct Scsi_Host *host = info->host; scsifront_disconnect() local 971 scsi_remove_host(host); scsifront_disconnect() 1026 if (scsi_add_device(info->host, chn, tgt, lun)) { scsifront_do_lun_hotplug() 1037 sdev = scsi_device_lookup(info->host, chn, tgt, lun); scsifront_do_lun_hotplug() 1064 struct Scsi_Host *host = info->host; scsifront_read_backend_params() local 1078 else if (info->pause && nr_segs < host->sg_tablesize) scsifront_read_backend_params() 1081 host->sg_tablesize, nr_segs); scsifront_read_backend_params() 1083 host->sg_tablesize = nr_segs; scsifront_read_backend_params() 1084 host->max_sectors = (nr_segs - 1) * PAGE_SIZE / 512; scsifront_read_backend_params()
|
/linux-4.1.27/drivers/mtd/nand/ |
H A D | hisi504_nand.c | 155 static inline unsigned int hinfc_read(struct hinfc_host *host, unsigned int reg) hinfc_read() argument 157 return readl(host->iobase + reg); hinfc_read() 160 static inline void hinfc_write(struct hinfc_host *host, unsigned int value, hinfc_write() argument 163 writel(value, host->iobase + reg); hinfc_write() 166 static void wait_controller_finished(struct hinfc_host *host) wait_controller_finished() argument 172 val = hinfc_read(host, HINFC504_STATUS); wait_controller_finished() 173 if (host->command == NAND_CMD_ERASE2) { wait_controller_finished() 177 val = hinfc_read(host, HINFC504_STATUS); wait_controller_finished() 187 dev_err(host->dev, "Wait NAND controller exec cmd timeout.\n"); wait_controller_finished() 190 static void hisi_nfc_dma_transfer(struct hinfc_host *host, int todev) hisi_nfc_dma_transfer() argument 192 struct mtd_info *mtd = &host->mtd; hisi_nfc_dma_transfer() 197 hinfc_write(host, host->dma_buffer, HINFC504_DMA_ADDR_DATA); hisi_nfc_dma_transfer() 198 hinfc_write(host, host->dma_oob, HINFC504_DMA_ADDR_OOB); hisi_nfc_dma_transfer() 201 hinfc_write(host, ((mtd->oobsize & HINFC504_DMA_LEN_OOB_MASK) hisi_nfc_dma_transfer() 204 hinfc_write(host, HINFC504_DMA_PARA_DATA_RW_EN hisi_nfc_dma_transfer() 207 if (host->command == NAND_CMD_READOOB) hisi_nfc_dma_transfer() 208 hinfc_write(host, HINFC504_DMA_PARA_OOB_RW_EN hisi_nfc_dma_transfer() 212 hinfc_write(host, HINFC504_DMA_PARA_DATA_RW_EN hisi_nfc_dma_transfer() 224 | ((host->addr_cycle == 4 ? 1 : 0) hisi_nfc_dma_transfer() 226 | ((host->chipselect & HINFC504_DMA_CTRL_CS_MASK) hisi_nfc_dma_transfer() 232 init_completion(&host->cmd_complete); hisi_nfc_dma_transfer() 234 hinfc_write(host, val, HINFC504_DMA_CTRL); hisi_nfc_dma_transfer() 235 ret = wait_for_completion_timeout(&host->cmd_complete, hisi_nfc_dma_transfer() 239 dev_err(host->dev, "DMA operation(irq) timeout!\n"); hisi_nfc_dma_transfer() 241 val = hinfc_read(host, HINFC504_DMA_CTRL); hisi_nfc_dma_transfer() 243 dev_err(host->dev, "DMA is already done but without irq ACK!\n"); hisi_nfc_dma_transfer() 245 dev_err(host->dev, "DMA is really timeout!\n"); hisi_nfc_dma_transfer() 249 static int hisi_nfc_send_cmd_pageprog(struct hinfc_host *host) hisi_nfc_send_cmd_pageprog() argument 251 host->addr_value[0] &= 0xffff0000; hisi_nfc_send_cmd_pageprog() 253 hinfc_write(host, host->addr_value[0], HINFC504_ADDRL); hisi_nfc_send_cmd_pageprog() 254 hinfc_write(host, host->addr_value[1], HINFC504_ADDRH); hisi_nfc_send_cmd_pageprog() 255 hinfc_write(host, NAND_CMD_PAGEPROG << 8 | NAND_CMD_SEQIN, hisi_nfc_send_cmd_pageprog() 258 hisi_nfc_dma_transfer(host, 1); hisi_nfc_send_cmd_pageprog() 263 static int hisi_nfc_send_cmd_readstart(struct hinfc_host *host) hisi_nfc_send_cmd_readstart() argument 265 struct mtd_info *mtd = &host->mtd; hisi_nfc_send_cmd_readstart() 267 if ((host->addr_value[0] == host->cache_addr_value[0]) && hisi_nfc_send_cmd_readstart() 268 (host->addr_value[1] == host->cache_addr_value[1])) hisi_nfc_send_cmd_readstart() 271 host->addr_value[0] &= 0xffff0000; hisi_nfc_send_cmd_readstart() 273 hinfc_write(host, host->addr_value[0], HINFC504_ADDRL); hisi_nfc_send_cmd_readstart() 274 hinfc_write(host, host->addr_value[1], HINFC504_ADDRH); hisi_nfc_send_cmd_readstart() 275 hinfc_write(host, NAND_CMD_READSTART << 8 | NAND_CMD_READ0, hisi_nfc_send_cmd_readstart() 278 hinfc_write(host, 0, HINFC504_LOG_READ_ADDR); hisi_nfc_send_cmd_readstart() 279 hinfc_write(host, mtd->writesize + mtd->oobsize, hisi_nfc_send_cmd_readstart() 282 hisi_nfc_dma_transfer(host, 0); hisi_nfc_send_cmd_readstart() 284 host->cache_addr_value[0] = host->addr_value[0]; hisi_nfc_send_cmd_readstart() 285 host->cache_addr_value[1] = host->addr_value[1]; hisi_nfc_send_cmd_readstart() 290 static int hisi_nfc_send_cmd_erase(struct hinfc_host *host) hisi_nfc_send_cmd_erase() argument 292 hinfc_write(host, host->addr_value[0], HINFC504_ADDRL); hisi_nfc_send_cmd_erase() 293 hinfc_write(host, (NAND_CMD_ERASE2 << 8) | NAND_CMD_ERASE1, hisi_nfc_send_cmd_erase() 296 hinfc_write(host, HINFC504_OP_WAIT_READY_EN hisi_nfc_send_cmd_erase() 300 | ((host->chipselect & HINFC504_OP_NF_CS_MASK) hisi_nfc_send_cmd_erase() 302 | ((host->addr_cycle & HINFC504_OP_ADDR_CYCLE_MASK) hisi_nfc_send_cmd_erase() 306 wait_controller_finished(host); hisi_nfc_send_cmd_erase() 311 static int hisi_nfc_send_cmd_readid(struct hinfc_host *host) hisi_nfc_send_cmd_readid() argument 313 hinfc_write(host, HINFC504_NANDINFO_LEN, HINFC504_DATA_NUM); hisi_nfc_send_cmd_readid() 314 hinfc_write(host, NAND_CMD_READID, HINFC504_CMD); hisi_nfc_send_cmd_readid() 315 hinfc_write(host, 0, HINFC504_ADDRL); hisi_nfc_send_cmd_readid() 317 hinfc_write(host, HINFC504_OP_CMD1_EN | HINFC504_OP_ADDR_EN hisi_nfc_send_cmd_readid() 319 | ((host->chipselect & HINFC504_OP_NF_CS_MASK) hisi_nfc_send_cmd_readid() 323 wait_controller_finished(host); hisi_nfc_send_cmd_readid() 328 static int hisi_nfc_send_cmd_status(struct hinfc_host *host) hisi_nfc_send_cmd_status() argument 330 hinfc_write(host, HINFC504_NANDINFO_LEN, HINFC504_DATA_NUM); hisi_nfc_send_cmd_status() 331 hinfc_write(host, NAND_CMD_STATUS, HINFC504_CMD); hisi_nfc_send_cmd_status() 332 hinfc_write(host, HINFC504_OP_CMD1_EN hisi_nfc_send_cmd_status() 334 | ((host->chipselect & HINFC504_OP_NF_CS_MASK) hisi_nfc_send_cmd_status() 338 wait_controller_finished(host); hisi_nfc_send_cmd_status() 343 static int hisi_nfc_send_cmd_reset(struct hinfc_host *host, int chipselect) hisi_nfc_send_cmd_reset() argument 345 hinfc_write(host, NAND_CMD_RESET, HINFC504_CMD); hisi_nfc_send_cmd_reset() 347 hinfc_write(host, HINFC504_OP_CMD1_EN hisi_nfc_send_cmd_reset() 353 wait_controller_finished(host); hisi_nfc_send_cmd_reset() 361 struct hinfc_host *host = chip->priv; hisi_nfc_select_chip() local 366 host->chipselect = chipselect; hisi_nfc_select_chip() 372 struct hinfc_host *host = chip->priv; hisi_nfc_read_byte() local 374 if (host->command == NAND_CMD_STATUS) hisi_nfc_read_byte() 375 return *(uint8_t *)(host->mmio); hisi_nfc_read_byte() 377 host->offset++; hisi_nfc_read_byte() 379 if (host->command == NAND_CMD_READID) hisi_nfc_read_byte() 380 return *(uint8_t *)(host->mmio + host->offset - 1); hisi_nfc_read_byte() 382 return *(uint8_t *)(host->buffer + host->offset - 1); hisi_nfc_read_byte() 388 struct hinfc_host *host = chip->priv; hisi_nfc_read_word() local 390 host->offset += 2; hisi_nfc_read_word() 391 return *(u16 *)(host->buffer + host->offset - 2); hisi_nfc_read_word() 398 struct hinfc_host *host = chip->priv; hisi_nfc_write_buf() local 400 memcpy(host->buffer + host->offset, buf, len); hisi_nfc_write_buf() 401 host->offset += len; hisi_nfc_write_buf() 407 struct hinfc_host *host = chip->priv; hisi_nfc_read_buf() local 409 memcpy(buf, host->buffer + host->offset, len); hisi_nfc_read_buf() 410 host->offset += len; hisi_nfc_read_buf() 416 struct hinfc_host *host = chip->priv; set_addr() local 417 unsigned int command = host->command; set_addr() 419 host->addr_cycle = 0; set_addr() 420 host->addr_value[0] = 0; set_addr() 421 host->addr_value[1] = 0; set_addr() 430 host->addr_value[0] = column & 0xffff; set_addr() 431 host->addr_cycle = 2; set_addr() 434 host->addr_value[0] |= (page_addr & 0xffff) set_addr() 435 << (host->addr_cycle * 8); set_addr() 436 host->addr_cycle += 2; set_addr() 439 host->addr_cycle += 1; set_addr() 440 if (host->command == NAND_CMD_ERASE1) set_addr() 441 host->addr_value[0] |= ((page_addr >> 16) & 0xff) << 16; set_addr() 443 host->addr_value[1] |= ((page_addr >> 16) & 0xff); set_addr() 452 struct hinfc_host *host = chip->priv; hisi_nfc_cmdfunc() local 456 host->command = command; hisi_nfc_cmdfunc() 462 host->offset = column; hisi_nfc_cmdfunc() 464 host->offset = column + mtd->writesize; hisi_nfc_cmdfunc() 468 hisi_nfc_send_cmd_readstart(host); hisi_nfc_cmdfunc() 472 host->offset = column; hisi_nfc_cmdfunc() 481 hisi_nfc_send_cmd_pageprog(host); hisi_nfc_cmdfunc() 485 hisi_nfc_send_cmd_erase(host); hisi_nfc_cmdfunc() 489 host->offset = column; hisi_nfc_cmdfunc() 490 memset(host->mmio, 0, 0x10); hisi_nfc_cmdfunc() 491 hisi_nfc_send_cmd_readid(host); hisi_nfc_cmdfunc() 495 flag = hinfc_read(host, HINFC504_CON); hisi_nfc_cmdfunc() 497 hinfc_write(host, hisi_nfc_cmdfunc() 501 host->offset = 0; hisi_nfc_cmdfunc() 502 memset(host->mmio, 0, 0x10); hisi_nfc_cmdfunc() 503 hisi_nfc_send_cmd_status(host); hisi_nfc_cmdfunc() 504 hinfc_write(host, flag, HINFC504_CON); hisi_nfc_cmdfunc() 508 hisi_nfc_send_cmd_reset(host, host->chipselect); hisi_nfc_cmdfunc() 512 dev_err(host->dev, "Error: unsupported cmd(cmd=%x, col=%x, page=%x)\n", hisi_nfc_cmdfunc() 517 host->cache_addr_value[0] = ~0; hisi_nfc_cmdfunc() 518 host->cache_addr_value[1] = ~0; hisi_nfc_cmdfunc() 524 struct hinfc_host *host = devid; hinfc_irq_handle() local 527 flag = hinfc_read(host, HINFC504_INTS); hinfc_irq_handle() 529 host->irq_status |= flag; hinfc_irq_handle() 532 hinfc_write(host, HINFC504_INTCLR_DMA, HINFC504_INTCLR); hinfc_irq_handle() 533 complete(&host->cmd_complete); hinfc_irq_handle() 535 hinfc_write(host, HINFC504_INTCLR_CE, HINFC504_INTCLR); hinfc_irq_handle() 537 hinfc_write(host, HINFC504_INTCLR_UE, HINFC504_INTCLR); hinfc_irq_handle() 546 struct hinfc_host *host = chip->priv; hisi_nand_read_page_hwecc() local 554 if (host->irq_status & HINFC504_INTS_UE) { hisi_nand_read_page_hwecc() 556 } else if (host->irq_status & HINFC504_INTS_CE) { hisi_nand_read_page_hwecc() 560 status_ecc = hinfc_read(host, HINFC504_ECC_STATUS) >> hisi_nand_read_page_hwecc() 570 host->irq_status = 0; hisi_nand_read_page_hwecc() 578 struct hinfc_host *host = chip->priv; hisi_nand_read_oob() local 583 if (host->irq_status & HINFC504_INTS_UE) { hisi_nand_read_oob() 584 host->irq_status = 0; hisi_nand_read_oob() 588 host->irq_status = 0; hisi_nand_read_oob() 602 static void hisi_nfc_host_init(struct hinfc_host *host) hisi_nfc_host_init() argument 604 struct nand_chip *chip = &host->chip; hisi_nfc_host_init() 607 host->version = hinfc_read(host, HINFC_VERSION); hisi_nfc_host_init() 608 host->addr_cycle = 0; hisi_nfc_host_init() 609 host->addr_value[0] = 0; hisi_nfc_host_init() 610 host->addr_value[1] = 0; hisi_nfc_host_init() 611 host->cache_addr_value[0] = ~0; hisi_nfc_host_init() 612 host->cache_addr_value[1] = ~0; hisi_nfc_host_init() 613 host->chipselect = 0; hisi_nfc_host_init() 623 hinfc_write(host, flag, HINFC504_CON); hisi_nfc_host_init() 625 memset(host->mmio, 0xff, HINFC504_BUFFER_BASE_ADDRESS_LEN); hisi_nfc_host_init() 627 hinfc_write(host, SET_HINFC504_PWIDTH(HINFC504_W_LATCH, hisi_nfc_host_init() 631 hinfc_write(host, HINFC504_INTEN_DMA, HINFC504_INTEN); hisi_nfc_host_init() 639 static int hisi_nfc_ecc_probe(struct hinfc_host *host) hisi_nfc_ecc_probe() argument 643 struct device *dev = host->dev; hisi_nfc_ecc_probe() 644 struct nand_chip *chip = &host->chip; hisi_nfc_ecc_probe() 645 struct mtd_info *mtd = &host->mtd; hisi_nfc_ecc_probe() 646 struct device_node *np = host->dev->of_node; hisi_nfc_ecc_probe() 683 flag = hinfc_read(host, HINFC504_CON); hisi_nfc_ecc_probe() 687 hinfc_write(host, flag, HINFC504_CON); hisi_nfc_ecc_probe() 690 flag = hinfc_read(host, HINFC504_INTEN) & 0xfff; hisi_nfc_ecc_probe() 691 hinfc_write(host, flag | HINFC504_INTEN_UE | HINFC504_INTEN_CE, hisi_nfc_ecc_probe() 701 struct hinfc_host *host; hisi_nfc_probe() local 708 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); hisi_nfc_probe() 709 if (!host) hisi_nfc_probe() 711 host->dev = dev; hisi_nfc_probe() 713 platform_set_drvdata(pdev, host); hisi_nfc_probe() 714 chip = &host->chip; hisi_nfc_probe() 715 mtd = &host->mtd; hisi_nfc_probe() 725 host->iobase = devm_ioremap_resource(dev, res); hisi_nfc_probe() 726 if (IS_ERR(host->iobase)) { hisi_nfc_probe() 727 ret = PTR_ERR(host->iobase); hisi_nfc_probe() 732 host->mmio = devm_ioremap_resource(dev, res); hisi_nfc_probe() 733 if (IS_ERR(host->mmio)) { hisi_nfc_probe() 734 ret = PTR_ERR(host->mmio); hisi_nfc_probe() 744 chip->priv = host; hisi_nfc_probe() 759 hisi_nfc_host_init(host); hisi_nfc_probe() 761 ret = devm_request_irq(dev, irq, hinfc_irq_handle, 0x0, "nandc", host); hisi_nfc_probe() 773 host->buffer = dmam_alloc_coherent(dev, mtd->writesize + mtd->oobsize, hisi_nfc_probe() 774 &host->dma_buffer, GFP_KERNEL); hisi_nfc_probe() 775 if (!host->buffer) { hisi_nfc_probe() 780 host->dma_oob = host->dma_buffer + mtd->writesize; hisi_nfc_probe() 781 memset(host->buffer, 0xff, mtd->writesize + mtd->oobsize); hisi_nfc_probe() 783 flag = hinfc_read(host, HINFC504_CON); hisi_nfc_probe() 797 hinfc_write(host, flag, HINFC504_CON); hisi_nfc_probe() 800 hisi_nfc_ecc_probe(host); hisi_nfc_probe() 825 struct hinfc_host *host = platform_get_drvdata(pdev); hisi_nfc_remove() local 826 struct mtd_info *mtd = &host->mtd; hisi_nfc_remove() 836 struct hinfc_host *host = dev_get_drvdata(dev); hisi_nfc_suspend() local 840 if (((hinfc_read(host, HINFC504_STATUS) & 0x1) == 0x0) && hisi_nfc_suspend() 841 (hinfc_read(host, HINFC504_DMA_CTRL) & hisi_nfc_suspend() 848 dev_err(host->dev, "nand controller suspend timeout.\n"); hisi_nfc_suspend() 856 struct hinfc_host *host = dev_get_drvdata(dev); hisi_nfc_resume() local 857 struct nand_chip *chip = &host->chip; hisi_nfc_resume() 860 hisi_nfc_send_cmd_reset(host, cs); hisi_nfc_resume() 861 hinfc_write(host, SET_HINFC504_PWIDTH(HINFC504_W_LATCH, hisi_nfc_resume()
|
H A D | atmel_nand.c | 165 static void atmel_nand_enable(struct atmel_nand_host *host) atmel_nand_enable() argument 167 if (gpio_is_valid(host->board.enable_pin)) atmel_nand_enable() 168 gpio_set_value(host->board.enable_pin, 0); atmel_nand_enable() 174 static void atmel_nand_disable(struct atmel_nand_host *host) atmel_nand_disable() argument 176 if (gpio_is_valid(host->board.enable_pin)) atmel_nand_disable() 177 gpio_set_value(host->board.enable_pin, 1); atmel_nand_disable() 186 struct atmel_nand_host *host = nand_chip->priv; atmel_nand_cmd_ctrl() local 190 atmel_nand_enable(host); atmel_nand_cmd_ctrl() 192 atmel_nand_disable(host); atmel_nand_cmd_ctrl() 198 writeb(cmd, host->io_base + (1 << host->board.cle)); atmel_nand_cmd_ctrl() 200 writeb(cmd, host->io_base + (1 << host->board.ale)); atmel_nand_cmd_ctrl() 209 struct atmel_nand_host *host = nand_chip->priv; atmel_nand_device_ready() local 211 return gpio_get_value(host->board.rdy_pin) ^ atmel_nand_device_ready() 212 !!host->board.rdy_pin_active_low; atmel_nand_device_ready() 219 struct atmel_nand_host *host = chip->priv; atmel_nand_set_enable_ready_pins() local 222 if (gpio_is_valid(host->board.rdy_pin)) { atmel_nand_set_enable_ready_pins() 223 res = devm_gpio_request(host->dev, atmel_nand_set_enable_ready_pins() 224 host->board.rdy_pin, "nand_rdy"); atmel_nand_set_enable_ready_pins() 226 dev_err(host->dev, atmel_nand_set_enable_ready_pins() 228 host->board.rdy_pin); atmel_nand_set_enable_ready_pins() 232 res = gpio_direction_input(host->board.rdy_pin); atmel_nand_set_enable_ready_pins() 234 dev_err(host->dev, atmel_nand_set_enable_ready_pins() 236 host->board.rdy_pin); atmel_nand_set_enable_ready_pins() 243 if (gpio_is_valid(host->board.enable_pin)) { atmel_nand_set_enable_ready_pins() 244 res = devm_gpio_request(host->dev, atmel_nand_set_enable_ready_pins() 245 host->board.enable_pin, "nand_enable"); atmel_nand_set_enable_ready_pins() 247 dev_err(host->dev, atmel_nand_set_enable_ready_pins() 249 host->board.enable_pin); atmel_nand_set_enable_ready_pins() 253 res = gpio_direction_output(host->board.enable_pin, 1); atmel_nand_set_enable_ready_pins() 255 dev_err(host->dev, atmel_nand_set_enable_ready_pins() 257 host->board.enable_pin); atmel_nand_set_enable_ready_pins() 271 struct atmel_nand_host *host = nand_chip->priv; atmel_read_buf8() local 273 if (host->nfc && host->nfc->use_nfc_sram && host->nfc->data_in_sram) { atmel_read_buf8() 274 memcpy(buf, host->nfc->data_in_sram, len); atmel_read_buf8() 275 host->nfc->data_in_sram += len; atmel_read_buf8() 284 struct atmel_nand_host *host = nand_chip->priv; atmel_read_buf16() local 286 if (host->nfc && host->nfc->use_nfc_sram && host->nfc->data_in_sram) { atmel_read_buf16() 287 memcpy(buf, host->nfc->data_in_sram, len); atmel_read_buf16() 288 host->nfc->data_in_sram += len; atmel_read_buf16() 313 static int nfc_set_sram_bank(struct atmel_nand_host *host, unsigned int bank) nfc_set_sram_bank() argument 321 if (host->mtd.writesize > 2048) nfc_set_sram_bank() 323 nfc_writel(host->nfc->hsmc_regs, BANK, ATMEL_HSMC_NFC_BANK1); nfc_set_sram_bank() 325 nfc_writel(host->nfc->hsmc_regs, BANK, ATMEL_HSMC_NFC_BANK0); nfc_set_sram_bank() 331 static uint nfc_get_sram_off(struct atmel_nand_host *host) nfc_get_sram_off() argument 333 if (nfc_readl(host->nfc->hsmc_regs, BANK) & ATMEL_HSMC_NFC_BANK1) nfc_get_sram_off() 339 static dma_addr_t nfc_sram_phys(struct atmel_nand_host *host) nfc_sram_phys() argument 341 if (nfc_readl(host->nfc->hsmc_regs, BANK) & ATMEL_HSMC_NFC_BANK1) nfc_sram_phys() 342 return host->nfc->sram_bank0_phys + NFC_SRAM_BANK1_OFFSET; nfc_sram_phys() 344 return host->nfc->sram_bank0_phys; nfc_sram_phys() 356 struct atmel_nand_host *host = chip->priv; atmel_nand_dma_op() local 360 struct atmel_nfc *nfc = host->nfc; atmel_nand_dma_op() 365 dma_dev = host->dma_chan->device; atmel_nand_dma_op() 371 dev_err(host->dev, "Failed to dma_map_single\n"); atmel_nand_dma_op() 377 dma_src_addr = nfc_sram_phys(host) + (nfc->data_in_sram atmel_nand_dma_op() 378 - (nfc->sram_bank0 + nfc_get_sram_off(host))); atmel_nand_dma_op() 380 dma_src_addr = host->io_phys; atmel_nand_dma_op() 387 dma_dst_addr = nfc_sram_phys(host); atmel_nand_dma_op() 389 dma_dst_addr = host->io_phys; atmel_nand_dma_op() 392 tx = dma_dev->device_prep_dma_memcpy(host->dma_chan, dma_dst_addr, atmel_nand_dma_op() 395 dev_err(host->dev, "Failed to prepare DMA memcpy\n"); atmel_nand_dma_op() 399 init_completion(&host->comp); atmel_nand_dma_op() 401 tx->callback_param = &host->comp; atmel_nand_dma_op() 405 dev_err(host->dev, "Failed to do DMA tx_submit\n"); atmel_nand_dma_op() 409 dma_async_issue_pending(host->dma_chan); atmel_nand_dma_op() 410 wait_for_completion(&host->comp); atmel_nand_dma_op() 422 dev_dbg(host->dev, "Fall back to CPU I/O\n"); atmel_nand_dma_op() 429 struct atmel_nand_host *host = chip->priv; atmel_read_buf() local 436 if (host->board.bus_width_16) atmel_read_buf() 445 struct atmel_nand_host *host = chip->priv; atmel_write_buf() local 452 if (host->board.bus_width_16) atmel_write_buf() 493 static void __iomem *pmecc_get_alpha_to(struct atmel_nand_host *host) pmecc_get_alpha_to() argument 497 table_size = host->pmecc_sector_size == 512 ? pmecc_get_alpha_to() 500 return host->pmecc_rom_base + host->pmecc_lookup_table_offset + pmecc_get_alpha_to() 504 static int pmecc_data_alloc(struct atmel_nand_host *host) pmecc_data_alloc() argument 506 const int cap = host->pmecc_corr_cap; pmecc_data_alloc() 510 host->pmecc_partial_syn = devm_kzalloc(host->dev, size, GFP_KERNEL); pmecc_data_alloc() 511 host->pmecc_si = devm_kzalloc(host->dev, size, GFP_KERNEL); pmecc_data_alloc() 512 host->pmecc_lmu = devm_kzalloc(host->dev, pmecc_data_alloc() 514 host->pmecc_smu = devm_kzalloc(host->dev, pmecc_data_alloc() 518 host->pmecc_mu = devm_kzalloc(host->dev, size, GFP_KERNEL); pmecc_data_alloc() 519 host->pmecc_dmu = devm_kzalloc(host->dev, size, GFP_KERNEL); pmecc_data_alloc() 520 host->pmecc_delta = devm_kzalloc(host->dev, size, GFP_KERNEL); pmecc_data_alloc() 522 if (!host->pmecc_partial_syn || pmecc_data_alloc() 523 !host->pmecc_si || pmecc_data_alloc() 524 !host->pmecc_lmu || pmecc_data_alloc() 525 !host->pmecc_smu || pmecc_data_alloc() 526 !host->pmecc_mu || pmecc_data_alloc() 527 !host->pmecc_dmu || pmecc_data_alloc() 528 !host->pmecc_delta) pmecc_data_alloc() 537 struct atmel_nand_host *host = nand_chip->priv; pmecc_gen_syndrome() local 542 for (i = 0; i < host->pmecc_corr_cap; i++) { pmecc_gen_syndrome() 543 value = pmecc_readl_rem_relaxed(host->ecc, sector, i / 2); pmecc_gen_syndrome() 547 host->pmecc_partial_syn[(2 * i) + 1] = (int16_t)value; pmecc_gen_syndrome() 554 struct atmel_nand_host *host = nand_chip->priv; pmecc_substitute() local 555 int16_t __iomem *alpha_to = host->pmecc_alpha_to; pmecc_substitute() 556 int16_t __iomem *index_of = host->pmecc_index_of; pmecc_substitute() 557 int16_t *partial_syn = host->pmecc_partial_syn; pmecc_substitute() 558 const int cap = host->pmecc_corr_cap; pmecc_substitute() 565 si = host->pmecc_si; pmecc_substitute() 572 for (j = 0; j < host->pmecc_degree; j++) { pmecc_substitute() 585 tmp = (tmp * 2) % host->pmecc_cw_len; pmecc_substitute() 596 struct atmel_nand_host *host = nand_chip->priv; pmecc_get_sigma() local 598 int16_t *lmu = host->pmecc_lmu; pmecc_get_sigma() 599 int16_t *si = host->pmecc_si; pmecc_get_sigma() 600 int *mu = host->pmecc_mu; pmecc_get_sigma() 601 int *dmu = host->pmecc_dmu; /* Discrepancy */ pmecc_get_sigma() 602 int *delta = host->pmecc_delta; /* Delta order */ pmecc_get_sigma() 603 int cw_len = host->pmecc_cw_len; pmecc_get_sigma() 604 const int16_t cap = host->pmecc_corr_cap; pmecc_get_sigma() 606 int16_t __iomem *index_of = host->pmecc_index_of; pmecc_get_sigma() 607 int16_t __iomem *alpha_to = host->pmecc_alpha_to; pmecc_get_sigma() 610 int16_t *smu = host->pmecc_smu; pmecc_get_sigma() 754 struct atmel_nand_host *host = nand_chip->priv; pmecc_err_location() local 756 const int cap = host->pmecc_corr_cap; pmecc_err_location() 758 int sector_size = host->pmecc_sector_size; pmecc_err_location() 763 int16_t *smu = host->pmecc_smu; pmecc_err_location() 765 pmerrloc_writel(host->pmerrloc_base, ELDIS, PMERRLOC_DISABLE); pmecc_err_location() 767 for (i = 0; i <= host->pmecc_lmu[cap + 1] >> 1; i++) { pmecc_err_location() 768 pmerrloc_writel_sigma_relaxed(host->pmerrloc_base, i, pmecc_err_location() 777 pmerrloc_writel(host->pmerrloc_base, ELCFG, val); pmecc_err_location() 778 pmerrloc_writel(host->pmerrloc_base, ELEN, pmecc_err_location() 779 sector_size * 8 + host->pmecc_degree * cap); pmecc_err_location() 782 while (!(pmerrloc_readl_relaxed(host->pmerrloc_base, ELISR) pmecc_err_location() 785 dev_err(host->dev, "PMECC: Timeout to calculate error location.\n"); pmecc_err_location() 791 roots_nbr = (pmerrloc_readl_relaxed(host->pmerrloc_base, ELISR) pmecc_err_location() 794 if (roots_nbr == host->pmecc_lmu[cap + 1] >> 1) pmecc_err_location() 806 struct atmel_nand_host *host = nand_chip->priv; pmecc_correct_data() local 812 sector_size = host->pmecc_sector_size; pmecc_correct_data() 815 tmp = pmerrloc_readl_el_relaxed(host->pmerrloc_base, i) - 1; pmecc_correct_data() 826 pos = sector_num * host->pmecc_sector_size + byte_pos; pmecc_correct_data() 827 dev_info(host->dev, "Bit flip in data area, byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n", pmecc_correct_data() 837 dev_info(host->dev, "Bit flip in OOB, oob_byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n", pmecc_correct_data() 852 struct atmel_nand_host *host = nand_chip->priv; pmecc_correction() local 858 if (host->caps->pmecc_correct_erase_page) pmecc_correction() 871 buf_pos = buf + i * host->pmecc_sector_size; pmecc_correction() 879 dev_err(host->dev, "PMECC: Too many errors\n"); pmecc_correction() 895 static void pmecc_enable(struct atmel_nand_host *host, int ecc_op) pmecc_enable() argument 900 dev_err(host->dev, "atmel_nand: wrong pmecc operation type!"); pmecc_enable() 904 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST); pmecc_enable() 905 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE); pmecc_enable() 906 val = pmecc_readl_relaxed(host->ecc, CFG); pmecc_enable() 909 pmecc_writel(host->ecc, CFG, (val & ~PMECC_CFG_WRITE_OP) pmecc_enable() 912 pmecc_writel(host->ecc, CFG, (val | PMECC_CFG_WRITE_OP) pmecc_enable() 915 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE); pmecc_enable() 916 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DATA); pmecc_enable() 922 struct atmel_nand_host *host = chip->priv; atmel_nand_pmecc_read_page() local 930 if (!host->nfc || !host->nfc->use_nfc_sram) atmel_nand_pmecc_read_page() 931 pmecc_enable(host, NAND_ECC_READ); atmel_nand_pmecc_read_page() 937 while ((pmecc_readl_relaxed(host->ecc, SR) & PMECC_SR_BUSY)) { atmel_nand_pmecc_read_page() 939 dev_err(host->dev, "PMECC: Timeout to get error status.\n"); atmel_nand_pmecc_read_page() 945 stat = pmecc_readl_relaxed(host->ecc, ISR); atmel_nand_pmecc_read_page() 959 struct atmel_nand_host *host = chip->priv; atmel_nand_pmecc_write_page() local 964 if (!host->nfc || !host->nfc->write_by_sram) { atmel_nand_pmecc_write_page() 965 pmecc_enable(host, NAND_ECC_WRITE); atmel_nand_pmecc_write_page() 970 while ((pmecc_readl_relaxed(host->ecc, SR) & PMECC_SR_BUSY)) { atmel_nand_pmecc_write_page() 972 dev_err(host->dev, "PMECC: Timeout to get ECC value.\n"); atmel_nand_pmecc_write_page() 984 pmecc_readb_ecc_relaxed(host->ecc, i, j); atmel_nand_pmecc_write_page() 995 struct atmel_nand_host *host = nand_chip->priv; atmel_pmecc_core_init() local 999 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST); atmel_pmecc_core_init() 1000 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE); atmel_pmecc_core_init() 1002 switch (host->pmecc_corr_cap) { atmel_pmecc_core_init() 1020 if (host->pmecc_sector_size == 512) atmel_pmecc_core_init() 1022 else if (host->pmecc_sector_size == 1024) atmel_pmecc_core_init() 1042 pmecc_writel(host->ecc, CFG, val); atmel_pmecc_core_init() 1045 pmecc_writel(host->ecc, SAREA, mtd->oobsize - 1); atmel_pmecc_core_init() 1046 pmecc_writel(host->ecc, SADDR, ecc_layout->eccpos[0]); atmel_pmecc_core_init() 1047 pmecc_writel(host->ecc, EADDR, atmel_pmecc_core_init() 1050 pmecc_writel(host->ecc, CLK, 2); atmel_pmecc_core_init() 1051 pmecc_writel(host->ecc, IDR, 0xff); atmel_pmecc_core_init() 1052 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE); atmel_pmecc_core_init() 1062 static int pmecc_choose_ecc(struct atmel_nand_host *host, pmecc_choose_ecc() argument 1066 if (host->nand_chip.ecc_strength_ds) { pmecc_choose_ecc() 1067 *cap = host->nand_chip.ecc_strength_ds; pmecc_choose_ecc() 1068 *sector_size = host->nand_chip.ecc_step_ds; pmecc_choose_ecc() 1069 dev_info(host->dev, "minimum ECC: %d bits in %d bytes\n", pmecc_choose_ecc() 1074 dev_info(host->dev, "can't detect min. ECC, assume 2 bits in 512 bytes\n"); pmecc_choose_ecc() 1078 if (host->pmecc_corr_cap == 0) { pmecc_choose_ecc() 1081 host->pmecc_corr_cap = 2; pmecc_choose_ecc() 1083 host->pmecc_corr_cap = 4; pmecc_choose_ecc() 1085 host->pmecc_corr_cap = 8; pmecc_choose_ecc() 1087 host->pmecc_corr_cap = 12; pmecc_choose_ecc() 1089 host->pmecc_corr_cap = 24; pmecc_choose_ecc() 1093 if (host->pmecc_sector_size == 0) { pmecc_choose_ecc() 1096 host->pmecc_sector_size = 1024; pmecc_choose_ecc() 1098 host->pmecc_sector_size = 512; pmecc_choose_ecc() 1159 struct atmel_nand_host *host) atmel_pmecc_nand_init_params() 1161 struct mtd_info *mtd = &host->mtd; atmel_pmecc_nand_init_params() 1162 struct nand_chip *nand_chip = &host->nand_chip; atmel_pmecc_nand_init_params() 1167 err_no = pmecc_choose_ecc(host, &cap, §or_size); atmel_pmecc_nand_init_params() 1169 dev_err(host->dev, "The NAND flash's ECC requirement are not support!"); atmel_pmecc_nand_init_params() 1173 if (cap > host->pmecc_corr_cap || atmel_pmecc_nand_init_params() 1174 sector_size != host->pmecc_sector_size) atmel_pmecc_nand_init_params() 1175 dev_info(host->dev, "WARNING: Be Caution! Using different PMECC parameters from Nand ONFI ECC reqirement.\n"); atmel_pmecc_nand_init_params() 1177 cap = host->pmecc_corr_cap; atmel_pmecc_nand_init_params() 1178 sector_size = host->pmecc_sector_size; atmel_pmecc_nand_init_params() 1179 host->pmecc_lookup_table_offset = (sector_size == 512) ? atmel_pmecc_nand_init_params() 1180 host->pmecc_lookup_table_offset_512 : atmel_pmecc_nand_init_params() 1181 host->pmecc_lookup_table_offset_1024; atmel_pmecc_nand_init_params() 1183 dev_info(host->dev, "Initialize PMECC params, cap: %d, sector: %d\n", atmel_pmecc_nand_init_params() 1188 dev_warn(host->dev, atmel_pmecc_nand_init_params() 1194 host->ecc = devm_ioremap_resource(&pdev->dev, regs); atmel_pmecc_nand_init_params() 1195 if (IS_ERR(host->ecc)) { atmel_pmecc_nand_init_params() 1196 err_no = PTR_ERR(host->ecc); atmel_pmecc_nand_init_params() 1201 host->pmerrloc_base = devm_ioremap_resource(&pdev->dev, regs_pmerr); atmel_pmecc_nand_init_params() 1202 if (IS_ERR(host->pmerrloc_base)) { atmel_pmecc_nand_init_params() 1203 err_no = PTR_ERR(host->pmerrloc_base); atmel_pmecc_nand_init_params() 1207 if (!host->has_no_lookup_table) { atmel_pmecc_nand_init_params() 1209 host->pmecc_rom_base = devm_ioremap_resource(&pdev->dev, atmel_pmecc_nand_init_params() 1211 if (IS_ERR(host->pmecc_rom_base)) { atmel_pmecc_nand_init_params() 1212 dev_err(host->dev, "Can not get I/O resource for ROM, will build a lookup table in runtime!\n"); atmel_pmecc_nand_init_params() 1213 host->has_no_lookup_table = true; atmel_pmecc_nand_init_params() 1217 if (host->has_no_lookup_table) { atmel_pmecc_nand_init_params() 1219 galois_table = create_lookup_table(host->dev, sector_size); atmel_pmecc_nand_init_params() 1221 dev_err(host->dev, "Failed to build a lookup table in runtime!\n"); atmel_pmecc_nand_init_params() 1226 host->pmecc_rom_base = (void __iomem *)galois_table; atmel_pmecc_nand_init_params() 1227 host->pmecc_lookup_table_offset = 0; atmel_pmecc_nand_init_params() 1240 dev_err(host->dev, "pmecc sector size is bigger than the page size!\n"); atmel_pmecc_nand_init_params() 1245 host->pmecc_degree = (sector_size == 512) ? atmel_pmecc_nand_init_params() 1247 host->pmecc_cw_len = (1 << host->pmecc_degree) - 1; atmel_pmecc_nand_init_params() 1248 host->pmecc_alpha_to = pmecc_get_alpha_to(host); atmel_pmecc_nand_init_params() 1249 host->pmecc_index_of = host->pmecc_rom_base + atmel_pmecc_nand_init_params() 1250 host->pmecc_lookup_table_offset; atmel_pmecc_nand_init_params() 1259 dev_err(host->dev, "No room for ECC bytes\n"); atmel_pmecc_nand_init_params() 1270 dev_warn(host->dev, atmel_pmecc_nand_init_params() 1279 err_no = pmecc_data_alloc(host); atmel_pmecc_nand_init_params() 1281 dev_err(host->dev, atmel_pmecc_nand_init_params() 1311 struct atmel_nand_host *host = nand_chip->priv; atmel_nand_calculate() local 1315 ecc_value = ecc_readl(host->ecc, PR); atmel_nand_calculate() 1321 ecc_value = ecc_readl(host->ecc, NPR) & ATMEL_ECC_NPARITY; atmel_nand_calculate() 1357 struct atmel_nand_host *host = chip->priv; atmel_nand_read_page() local 1358 if (host->board.need_reset_workaround) atmel_nand_read_page() 1359 ecc_writel(host->ecc, CR, ATMEL_ECC_RST); atmel_nand_read_page() 1415 struct atmel_nand_host *host = nand_chip->priv; atmel_nand_correct() local 1420 ecc_status = ecc_readl(host->ecc, SR); atmel_nand_correct() 1427 ecc_bit = ecc_readl(host->ecc, PR) & ATMEL_ECC_BITADDR; atmel_nand_correct() 1429 ecc_word = ecc_readl(host->ecc, PR) & ATMEL_ECC_WORDADDR; atmel_nand_correct() 1444 dev_dbg(host->dev, "atmel_nand : multiple errors detected." atmel_nand_correct() 1454 dev_dbg(host->dev, "atmel_nand : one bit error on ECC code." atmel_nand_correct() 1459 dev_dbg(host->dev, "atmel_nand : one bit error on data." atmel_nand_correct() 1471 dev_dbg(host->dev, "atmel_nand : error corrected\n"); atmel_nand_correct() 1481 struct atmel_nand_host *host = nand_chip->priv; atmel_nand_hwctl() local 1483 if (host->board.need_reset_workaround) atmel_nand_hwctl() 1484 ecc_writel(host->ecc, CR, ATMEL_ECC_RST); atmel_nand_hwctl() 1489 static int atmel_of_init_port(struct atmel_nand_host *host, atmel_of_init_port() argument 1495 struct atmel_nand_data *board = &host->board; atmel_of_init_port() 1498 host->caps = (struct atmel_nand_caps *) atmel_of_init_port() 1499 of_match_device(atmel_nand_dt_ids, host->dev)->data; atmel_of_init_port() 1503 dev_err(host->dev, "invalid addr-offset %u\n", val); atmel_of_init_port() 1511 dev_err(host->dev, "invalid cmd-offset %u\n", val); atmel_of_init_port() 1534 host->has_pmecc = of_property_read_bool(np, "atmel,has-pmecc"); atmel_of_init_port() 1537 of_platform_populate(np, NULL, NULL, host->dev); atmel_of_init_port() 1539 if (!(board->ecc_mode == NAND_ECC_HW) || !host->has_pmecc) atmel_of_init_port() 1550 dev_err(host->dev, atmel_of_init_port() 1555 host->pmecc_corr_cap = (u8)val; atmel_of_init_port() 1560 dev_err(host->dev, atmel_of_init_port() 1565 host->pmecc_sector_size = (u16)val; atmel_of_init_port() 1570 dev_err(host->dev, "Cannot get PMECC lookup table offset, will build a lookup table in runtime.\n"); atmel_of_init_port() 1571 host->has_no_lookup_table = true; atmel_of_init_port() 1576 dev_err(host->dev, "Invalid PMECC lookup table offset\n"); atmel_of_init_port() 1579 host->pmecc_lookup_table_offset_512 = offset[0]; atmel_of_init_port() 1580 host->pmecc_lookup_table_offset_1024 = offset[1]; atmel_of_init_port() 1586 struct atmel_nand_host *host) atmel_hw_nand_init_params() 1588 struct mtd_info *mtd = &host->mtd; atmel_hw_nand_init_params() 1589 struct nand_chip *nand_chip = &host->nand_chip; atmel_hw_nand_init_params() 1594 dev_err(host->dev, atmel_hw_nand_init_params() 1600 host->ecc = devm_ioremap_resource(&pdev->dev, regs); atmel_hw_nand_init_params() 1601 if (IS_ERR(host->ecc)) atmel_hw_nand_init_params() 1602 return PTR_ERR(host->ecc); atmel_hw_nand_init_params() 1611 ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_528); atmel_hw_nand_init_params() 1615 ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_1056); atmel_hw_nand_init_params() 1619 ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_2112); atmel_hw_nand_init_params() 1623 ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_4224); atmel_hw_nand_init_params() 1643 static inline u32 nfc_read_status(struct atmel_nand_host *host) nfc_read_status() argument 1646 u32 nfc_status = nfc_readl(host->nfc->hsmc_regs, SR); nfc_read_status() 1650 dev_err(host->dev, "NFC: Waiting Nand R/B Timeout Error\n"); nfc_read_status() 1652 dev_err(host->dev, "NFC: Access Undefined Area Error\n"); nfc_read_status() 1654 dev_err(host->dev, "NFC: Access memory While NFC is busy\n"); nfc_read_status() 1656 dev_err(host->dev, "NFC: Access memory Size Error\n"); nfc_read_status() 1665 struct atmel_nand_host *host = dev_id; hsmc_interrupt() local 1669 status = nfc_read_status(host); hsmc_interrupt() 1670 mask = nfc_readl(host->nfc->hsmc_regs, IMR); hsmc_interrupt() 1674 complete(&host->nfc->comp_xfer_done); hsmc_interrupt() 1675 nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_XFR_DONE); hsmc_interrupt() 1679 complete(&host->nfc->comp_ready); hsmc_interrupt() 1680 nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_RB_EDGE); hsmc_interrupt() 1684 complete(&host->nfc->comp_cmd_done); hsmc_interrupt() 1685 nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_CMD_DONE); hsmc_interrupt() 1693 static void nfc_prepare_interrupt(struct atmel_nand_host *host, u32 flag) nfc_prepare_interrupt() argument 1696 init_completion(&host->nfc->comp_xfer_done); nfc_prepare_interrupt() 1699 init_completion(&host->nfc->comp_ready); nfc_prepare_interrupt() 1702 init_completion(&host->nfc->comp_cmd_done); nfc_prepare_interrupt() 1705 nfc_writel(host->nfc->hsmc_regs, IER, flag); nfc_prepare_interrupt() 1708 static int nfc_wait_interrupt(struct atmel_nand_host *host, u32 flag) nfc_wait_interrupt() argument 1714 comp[index++] = &host->nfc->comp_xfer_done; nfc_wait_interrupt() 1717 comp[index++] = &host->nfc->comp_ready; nfc_wait_interrupt() 1720 comp[index++] = &host->nfc->comp_cmd_done; nfc_wait_interrupt() 1723 dev_err(host->dev, "Unknown interrupt flag: 0x%08x\n", flag); nfc_wait_interrupt() 1738 dev_err(host->dev, "Time out to wait for interrupt: 0x%08x\n", flag); nfc_wait_interrupt() 1740 nfc_writel(host->nfc->hsmc_regs, IDR, flag); nfc_wait_interrupt() 1744 static int nfc_send_command(struct atmel_nand_host *host, nfc_send_command() argument 1751 dev_dbg(host->dev, nfc_send_command() 1756 while (nfc_readl(host->nfc->hsmc_regs, SR) & NFC_SR_BUSY) { nfc_send_command() 1758 dev_err(host->dev, nfc_send_command() 1764 nfc_prepare_interrupt(host, flag); nfc_send_command() 1765 nfc_writel(host->nfc->hsmc_regs, CYCLE0, cycle0); nfc_send_command() 1766 nfc_cmd_addr1234_writel(cmd, addr, host->nfc->base_cmd_regs); nfc_send_command() 1767 return nfc_wait_interrupt(host, flag); nfc_send_command() 1774 struct atmel_nand_host *host = nand_chip->priv; nfc_device_ready() local 1776 status = nfc_read_status(host); nfc_device_ready() 1777 mask = nfc_readl(host->nfc->hsmc_regs, IMR); nfc_device_ready() 1781 dev_err(host->dev, "Lost the interrupt flags: 0x%08x\n", nfc_device_ready() 1790 struct atmel_nand_host *host = nand_chip->priv; nfc_select_chip() local 1793 nfc_writel(host->nfc->hsmc_regs, CTRL, NFC_CTRL_DISABLE); nfc_select_chip() 1795 nfc_writel(host->nfc->hsmc_regs, CTRL, NFC_CTRL_ENABLE); nfc_select_chip() 1842 struct atmel_nand_host *host = chip->priv; nfc_nand_command() local 1858 host->nfc->data_in_sram = NULL; nfc_nand_command() 1860 dev_dbg(host->dev, "%s: cmd = 0x%02x, col = 0x%08x, page = 0x%08x\n", nfc_nand_command() 1866 nfc_send_command(host, nfc_addr_cmd, addr1234, cycle0); nfc_nand_command() 1873 dev_err(host->dev, nfc_nand_command() 1900 if (host->nfc->use_nfc_sram) { nfc_nand_command() 1907 if (chip->ecc.mode == NAND_ECC_HW && host->has_pmecc) nfc_nand_command() 1908 pmecc_enable(host, NAND_ECC_READ); nfc_nand_command() 1919 if (host->nfc->will_write_sram && command == NAND_CMD_SEQIN) nfc_nand_command() 1931 nfc_send_command(host, nfc_addr_cmd, addr1234, cycle0); nfc_nand_command() 1951 host->nfc->data_in_sram = host->nfc->sram_bank0 + nfc_nand_command() 1952 nfc_get_sram_off(host); nfc_nand_command() 1957 nfc_prepare_interrupt(host, NFC_SR_RB_EDGE); nfc_nand_command() 1958 nfc_wait_interrupt(host, NFC_SR_RB_EDGE); nfc_nand_command() 1968 struct atmel_nand_host *host = chip->priv; nfc_sram_write_page() local 1969 void *sram = host->nfc->sram_bank0 + nfc_get_sram_off(host); nfc_sram_write_page() 1985 cfg = nfc_readl(host->nfc->hsmc_regs, CFG); nfc_sram_write_page() 1989 nfc_writel(host->nfc->hsmc_regs, CFG, cfg | NFC_CFG_WSPARE); nfc_sram_write_page() 1991 nfc_writel(host->nfc->hsmc_regs, CFG, cfg & ~NFC_CFG_WSPARE); nfc_sram_write_page() 1994 if (chip->ecc.mode == NAND_ECC_HW && host->has_pmecc) nfc_sram_write_page() 2000 pmecc_enable(host, NAND_ECC_WRITE); nfc_sram_write_page() 2002 host->nfc->will_write_sram = true; nfc_sram_write_page() 2004 host->nfc->will_write_sram = false; nfc_sram_write_page() 2028 struct atmel_nand_host *host = chip->priv; nfc_sram_init() local 2052 dev_err(host->dev, "Unsupported page size for NFC.\n"); nfc_sram_init() 2065 nfc_writel(host->nfc->hsmc_regs, CFG, cfg_nfc); nfc_sram_init() 2067 host->nfc->will_write_sram = false; nfc_sram_init() 2068 nfc_set_sram_bank(host, 0); nfc_sram_init() 2071 if (host->nfc->write_by_sram) { nfc_sram_init() 2072 if ((chip->ecc.mode == NAND_ECC_HW && host->has_pmecc) || nfc_sram_init() 2076 host->nfc->write_by_sram = false; nfc_sram_init() 2079 dev_info(host->dev, "Using NFC Sram read %s\n", nfc_sram_init() 2080 host->nfc->write_by_sram ? "and write" : ""); nfc_sram_init() 2090 struct atmel_nand_host *host; atmel_nand_probe() local 2098 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); atmel_nand_probe() 2099 if (!host) atmel_nand_probe() 2107 host->io_base = devm_ioremap_resource(&pdev->dev, mem); atmel_nand_probe() 2108 if (IS_ERR(host->io_base)) { atmel_nand_probe() 2109 res = PTR_ERR(host->io_base); atmel_nand_probe() 2112 host->io_phys = (dma_addr_t)mem->start; atmel_nand_probe() 2114 mtd = &host->mtd; atmel_nand_probe() 2115 nand_chip = &host->nand_chip; atmel_nand_probe() 2116 host->dev = &pdev->dev; atmel_nand_probe() 2119 res = atmel_of_init_port(host, pdev->dev.of_node); atmel_nand_probe() 2123 memcpy(&host->board, dev_get_platdata(&pdev->dev), atmel_nand_probe() 2127 nand_chip->priv = host; /* link the private data structures */ atmel_nand_probe() 2132 nand_chip->IO_ADDR_R = host->io_base; atmel_nand_probe() 2133 nand_chip->IO_ADDR_W = host->io_base; atmel_nand_probe() 2137 host->nfc = &nand_nfc; atmel_nand_probe() 2146 dev_err(host->dev, "Cannot get HSMC irq!\n"); atmel_nand_probe() 2152 0, "hsmc", host); atmel_nand_probe() 2166 nand_chip->ecc.mode = host->board.ecc_mode; atmel_nand_probe() 2169 if (host->board.bus_width_16) /* 16-bit bus width */ atmel_nand_probe() 2175 platform_set_drvdata(pdev, host); atmel_nand_probe() 2176 atmel_nand_enable(host); atmel_nand_probe() 2178 if (gpio_is_valid(host->board.det_pin)) { atmel_nand_probe() 2180 host->board.det_pin, "nand_det"); atmel_nand_probe() 2184 host->board.det_pin); atmel_nand_probe() 2188 res = gpio_direction_input(host->board.det_pin); atmel_nand_probe() 2192 host->board.det_pin); atmel_nand_probe() 2196 if (gpio_get_value(host->board.det_pin)) { atmel_nand_probe() 2203 if (host->board.on_flash_bbt || on_flash_bbt) { atmel_nand_probe() 2208 if (!host->board.has_dma) atmel_nand_probe() 2216 host->dma_chan = dma_request_channel(mask, NULL, NULL); atmel_nand_probe() 2217 if (!host->dma_chan) { atmel_nand_probe() 2218 dev_err(host->dev, "Failed to request DMA channel\n"); atmel_nand_probe() 2223 dev_info(host->dev, "Using %s for DMA transfers.\n", atmel_nand_probe() 2224 dma_chan_name(host->dma_chan)); atmel_nand_probe() 2226 dev_info(host->dev, "No DMA support for NAND access.\n"); atmel_nand_probe() 2235 if (host->has_pmecc) atmel_nand_probe() 2236 res = atmel_pmecc_nand_init_params(pdev, host); atmel_nand_probe() 2238 res = atmel_hw_nand_init_params(pdev, host); atmel_nand_probe() 2245 if (host->nfc && host->nfc->use_nfc_sram) { atmel_nand_probe() 2248 host->nfc->use_nfc_sram = false; atmel_nand_probe() 2249 dev_err(host->dev, "Disable use nfc sram for data transfer.\n"); atmel_nand_probe() 2262 host->board.parts, host->board.num_parts); atmel_nand_probe() 2267 if (host->has_pmecc && host->nand_chip.ecc.mode == NAND_ECC_HW) atmel_nand_probe() 2268 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE); atmel_nand_probe() 2272 atmel_nand_disable(host); atmel_nand_probe() 2273 if (host->dma_chan) atmel_nand_probe() 2274 dma_release_channel(host->dma_chan); atmel_nand_probe() 2284 struct atmel_nand_host *host = platform_get_drvdata(pdev); atmel_nand_remove() local 2285 struct mtd_info *mtd = &host->mtd; atmel_nand_remove() 2289 atmel_nand_disable(host); atmel_nand_remove() 2291 if (host->has_pmecc && host->nand_chip.ecc.mode == NAND_ECC_HW) { atmel_nand_remove() 2292 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE); atmel_nand_remove() 2293 pmerrloc_writel(host->pmerrloc_base, ELDIS, atmel_nand_remove() 2297 if (host->dma_chan) atmel_nand_remove() 2298 dma_release_channel(host->dma_chan); atmel_nand_remove() 1158 atmel_pmecc_nand_init_params(struct platform_device *pdev, struct atmel_nand_host *host) atmel_pmecc_nand_init_params() argument 1585 atmel_hw_nand_init_params(struct platform_device *pdev, struct atmel_nand_host *host) atmel_hw_nand_init_params() argument
|
H A D | lpc32xx_mlc.c | 228 static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host) lpc32xx_nand_setup() argument 233 writel(MLCCMD_RESET, MLC_CMD(host->io_base)); lpc32xx_nand_setup() 237 clkrate = clk_get_rate(host->clk); lpc32xx_nand_setup() 243 writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base)); lpc32xx_nand_setup() 247 writel(tmp, MLC_ICR(host->io_base)); lpc32xx_nand_setup() 251 writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base)); lpc32xx_nand_setup() 255 tmp |= MLCTIMEREG_TCEA_DELAY(clkrate / host->ncfg->tcea_delay + 1); lpc32xx_nand_setup() 256 tmp |= MLCTIMEREG_BUSY_DELAY(clkrate / host->ncfg->busy_delay + 1); lpc32xx_nand_setup() 257 tmp |= MLCTIMEREG_NAND_TA(clkrate / host->ncfg->nand_ta + 1); lpc32xx_nand_setup() 258 tmp |= MLCTIMEREG_RD_HIGH(clkrate / host->ncfg->rd_high + 1); lpc32xx_nand_setup() 259 tmp |= MLCTIMEREG_RD_LOW(clkrate / host->ncfg->rd_low); lpc32xx_nand_setup() 260 tmp |= MLCTIMEREG_WR_HIGH(clkrate / host->ncfg->wr_high + 1); lpc32xx_nand_setup() 261 tmp |= MLCTIMEREG_WR_LOW(clkrate / host->ncfg->wr_low); lpc32xx_nand_setup() 262 writel(tmp, MLC_TIME_REG(host->io_base)); lpc32xx_nand_setup() 266 MLC_IRQ_MR(host->io_base)); lpc32xx_nand_setup() 269 writel(MLCCEH_NORMAL, MLC_CEH(host->io_base)); lpc32xx_nand_setup() 279 struct lpc32xx_nand_host *host = nand_chip->priv; lpc32xx_nand_cmd_ctrl() local 283 writel(cmd, MLC_CMD(host->io_base)); lpc32xx_nand_cmd_ctrl() 285 writel(cmd, MLC_ADDR(host->io_base)); lpc32xx_nand_cmd_ctrl() 295 struct lpc32xx_nand_host *host = nand_chip->priv; lpc32xx_nand_device_ready() local 297 if ((readb(MLC_ISR(host->io_base)) & lpc32xx_nand_device_ready() 305 static irqreturn_t lpc3xxx_nand_irq(int irq, struct lpc32xx_nand_host *host) lpc3xxx_nand_irq() argument 310 sr = readb(MLC_IRQ_SR(host->io_base)); lpc3xxx_nand_irq() 312 complete(&host->comp_nand); lpc3xxx_nand_irq() 314 complete(&host->comp_controller); lpc3xxx_nand_irq() 321 struct lpc32xx_nand_host *host = chip->priv; lpc32xx_waitfunc_nand() local 323 if (readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY) lpc32xx_waitfunc_nand() 326 wait_for_completion(&host->comp_nand); lpc32xx_waitfunc_nand() 328 while (!(readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)) { lpc32xx_waitfunc_nand() 341 struct lpc32xx_nand_host *host = chip->priv; lpc32xx_waitfunc_controller() local 343 if (readb(MLC_ISR(host->io_base)) & MLCISR_CONTROLLER_READY) lpc32xx_waitfunc_controller() 346 wait_for_completion(&host->comp_controller); lpc32xx_waitfunc_controller() 348 while (!(readb(MLC_ISR(host->io_base)) & lpc32xx_waitfunc_controller() 369 static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host) lpc32xx_wp_enable() argument 371 if (gpio_is_valid(host->ncfg->wp_gpio)) lpc32xx_wp_enable() 372 gpio_set_value(host->ncfg->wp_gpio, 0); lpc32xx_wp_enable() 378 static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host) lpc32xx_wp_disable() argument 380 if (gpio_is_valid(host->ncfg->wp_gpio)) lpc32xx_wp_disable() 381 gpio_set_value(host->ncfg->wp_gpio, 1); lpc32xx_wp_disable() 393 struct lpc32xx_nand_host *host = chip->priv; lpc32xx_xmit_dma() local 398 sg_init_one(&host->sgl, mem, len); lpc32xx_xmit_dma() 400 res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1, lpc32xx_xmit_dma() 406 desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir, lpc32xx_xmit_dma() 413 init_completion(&host->comp_dma); lpc32xx_xmit_dma() 415 desc->callback_param = &host->comp_dma; lpc32xx_xmit_dma() 418 dma_async_issue_pending(host->dma_chan); lpc32xx_xmit_dma() 420 wait_for_completion_timeout(&host->comp_dma, msecs_to_jiffies(1000)); lpc32xx_xmit_dma() 422 dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1, lpc32xx_xmit_dma() 426 dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1, lpc32xx_xmit_dma() 434 struct lpc32xx_nand_host *host = chip->priv; lpc32xx_read_page() local 446 dma_buf = host->dma_buf; lpc32xx_read_page() 454 for (i = 0; i < host->mlcsubpages; i++) { lpc32xx_read_page() 456 writeb(0x00, MLC_ECC_AUTO_DEC_REG(host->io_base)); lpc32xx_read_page() 462 mlc_isr = readl(MLC_ISR(host->io_base)); lpc32xx_read_page() 479 readl(MLC_BUFF(host->io_base)); lpc32xx_read_page() 485 readl(MLC_BUFF(host->io_base)); lpc32xx_read_page() 500 struct lpc32xx_nand_host *host = chip->priv; lpc32xx_write_page_lowlevel() local 507 dma_buf = host->dma_buf; lpc32xx_write_page_lowlevel() 511 for (i = 0; i < host->mlcsubpages; i++) { lpc32xx_write_page_lowlevel() 513 writeb(0x00, MLC_ECC_ENC_REG(host->io_base)); lpc32xx_write_page_lowlevel() 524 MLC_BUFF(host->io_base)); lpc32xx_write_page_lowlevel() 528 writel(*((uint32_t *)(oobbuf)), MLC_BUFF(host->io_base)); lpc32xx_write_page_lowlevel() 530 writew(*((uint16_t *)(oobbuf)), MLC_BUFF(host->io_base)); lpc32xx_write_page_lowlevel() 534 writeb(0x00, MLC_ECC_AUTO_ENC_REG(host->io_base)); lpc32xx_write_page_lowlevel() 545 struct lpc32xx_nand_host *host = chip->priv; lpc32xx_read_oob() local 548 lpc32xx_read_page(mtd, chip, host->dummy_buf, 1, page); lpc32xx_read_oob() 566 static int lpc32xx_dma_setup(struct lpc32xx_nand_host *host) lpc32xx_dma_setup() argument 568 struct mtd_info *mtd = &host->mtd; lpc32xx_dma_setup() 571 if (!host->pdata || !host->pdata->dma_filter) { lpc32xx_dma_setup() 578 host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter, lpc32xx_dma_setup() 580 if (!host->dma_chan) { lpc32xx_dma_setup() 590 host->dma_slave_config.direction = DMA_DEV_TO_MEM; lpc32xx_dma_setup() 591 host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; lpc32xx_dma_setup() 592 host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; lpc32xx_dma_setup() 593 host->dma_slave_config.src_maxburst = 128; lpc32xx_dma_setup() 594 host->dma_slave_config.dst_maxburst = 128; lpc32xx_dma_setup() 596 host->dma_slave_config.device_fc = false; lpc32xx_dma_setup() 597 host->dma_slave_config.src_addr = MLC_BUFF(host->io_base_phy); lpc32xx_dma_setup() 598 host->dma_slave_config.dst_addr = MLC_BUFF(host->io_base_phy); lpc32xx_dma_setup() 599 if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) { lpc32xx_dma_setup() 606 dma_release_channel(host->dma_chan); lpc32xx_dma_setup() 644 struct lpc32xx_nand_host *host; lpc32xx_nand_probe() local 652 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); lpc32xx_nand_probe() 653 if (!host) lpc32xx_nand_probe() 657 host->io_base = devm_ioremap_resource(&pdev->dev, rc); lpc32xx_nand_probe() 658 if (IS_ERR(host->io_base)) lpc32xx_nand_probe() 659 return PTR_ERR(host->io_base); lpc32xx_nand_probe() 661 host->io_base_phy = rc->start; lpc32xx_nand_probe() 663 mtd = &host->mtd; lpc32xx_nand_probe() 664 nand_chip = &host->nand_chip; lpc32xx_nand_probe() 666 host->ncfg = lpc32xx_parse_dt(&pdev->dev); lpc32xx_nand_probe() 667 if (!host->ncfg) { lpc32xx_nand_probe() 672 if (host->ncfg->wp_gpio == -EPROBE_DEFER) lpc32xx_nand_probe() 674 if (gpio_is_valid(host->ncfg->wp_gpio) && lpc32xx_nand_probe() 675 gpio_request(host->ncfg->wp_gpio, "NAND WP")) { lpc32xx_nand_probe() 679 lpc32xx_wp_disable(host); lpc32xx_nand_probe() 681 host->pdata = dev_get_platdata(&pdev->dev); lpc32xx_nand_probe() 683 nand_chip->priv = host; /* link the private data structures */ lpc32xx_nand_probe() 689 host->clk = clk_get(&pdev->dev, NULL); lpc32xx_nand_probe() 690 if (IS_ERR(host->clk)) { lpc32xx_nand_probe() 695 clk_enable(host->clk); lpc32xx_nand_probe() 700 nand_chip->IO_ADDR_R = MLC_DATA(host->io_base); lpc32xx_nand_probe() 701 nand_chip->IO_ADDR_W = MLC_DATA(host->io_base); lpc32xx_nand_probe() 704 lpc32xx_nand_setup(host); lpc32xx_nand_probe() 706 platform_set_drvdata(pdev, host); lpc32xx_nand_probe() 725 res = lpc32xx_dma_setup(host); lpc32xx_nand_probe() 741 host->dma_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL); lpc32xx_nand_probe() 742 if (!host->dma_buf) { lpc32xx_nand_probe() 747 host->dummy_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL); lpc32xx_nand_probe() 748 if (!host->dummy_buf) { lpc32xx_nand_probe() 756 host->mlcsubpages = mtd->writesize / 512; lpc32xx_nand_probe() 759 readb(MLC_IRQ_SR(host->io_base)); lpc32xx_nand_probe() 761 init_completion(&host->comp_nand); lpc32xx_nand_probe() 762 init_completion(&host->comp_controller); lpc32xx_nand_probe() 764 host->irq = platform_get_irq(pdev, 0); lpc32xx_nand_probe() 765 if ((host->irq < 0) || (host->irq >= NR_IRQS)) { lpc32xx_nand_probe() 771 if (request_irq(host->irq, (irq_handler_t)&lpc3xxx_nand_irq, lpc32xx_nand_probe() 772 IRQF_TRIGGER_HIGH, DRV_NAME, host)) { lpc32xx_nand_probe() 790 res = mtd_device_parse_register(mtd, NULL, &ppdata, host->ncfg->parts, lpc32xx_nand_probe() 791 host->ncfg->num_parts); lpc32xx_nand_probe() 798 free_irq(host->irq, host); lpc32xx_nand_probe() 801 dma_release_channel(host->dma_chan); lpc32xx_nand_probe() 803 clk_disable(host->clk); lpc32xx_nand_probe() 804 clk_put(host->clk); lpc32xx_nand_probe() 806 lpc32xx_wp_enable(host); lpc32xx_nand_probe() 807 gpio_free(host->ncfg->wp_gpio); lpc32xx_nand_probe() 817 struct lpc32xx_nand_host *host = platform_get_drvdata(pdev); lpc32xx_nand_remove() local 818 struct mtd_info *mtd = &host->mtd; lpc32xx_nand_remove() 821 free_irq(host->irq, host); lpc32xx_nand_remove() 823 dma_release_channel(host->dma_chan); lpc32xx_nand_remove() 825 clk_disable(host->clk); lpc32xx_nand_remove() 826 clk_put(host->clk); lpc32xx_nand_remove() 828 lpc32xx_wp_enable(host); lpc32xx_nand_remove() 829 gpio_free(host->ncfg->wp_gpio); lpc32xx_nand_remove() 837 struct lpc32xx_nand_host *host = platform_get_drvdata(pdev); lpc32xx_nand_resume() local 840 clk_enable(host->clk); lpc32xx_nand_resume() 843 lpc32xx_nand_setup(host); lpc32xx_nand_resume() 846 lpc32xx_wp_disable(host); lpc32xx_nand_resume() 853 struct lpc32xx_nand_host *host = platform_get_drvdata(pdev); lpc32xx_nand_suspend() local 856 lpc32xx_wp_enable(host); lpc32xx_nand_suspend() 859 clk_disable(host->clk); lpc32xx_nand_suspend()
|
H A D | mxc_nand.c | 45 #define NFC_V1_V2_BUF_SIZE (host->regs + 0x00) 46 #define NFC_V1_V2_BUF_ADDR (host->regs + 0x04) 47 #define NFC_V1_V2_FLASH_ADDR (host->regs + 0x06) 48 #define NFC_V1_V2_FLASH_CMD (host->regs + 0x08) 49 #define NFC_V1_V2_CONFIG (host->regs + 0x0a) 50 #define NFC_V1_V2_ECC_STATUS_RESULT (host->regs + 0x0c) 51 #define NFC_V1_V2_RSLTMAIN_AREA (host->regs + 0x0e) 52 #define NFC_V1_V2_RSLTSPARE_AREA (host->regs + 0x10) 53 #define NFC_V1_V2_WRPROT (host->regs + 0x12) 54 #define NFC_V1_UNLOCKSTART_BLKADDR (host->regs + 0x14) 55 #define NFC_V1_UNLOCKEND_BLKADDR (host->regs + 0x16) 56 #define NFC_V21_UNLOCKSTART_BLKADDR0 (host->regs + 0x20) 57 #define NFC_V21_UNLOCKSTART_BLKADDR1 (host->regs + 0x24) 58 #define NFC_V21_UNLOCKSTART_BLKADDR2 (host->regs + 0x28) 59 #define NFC_V21_UNLOCKSTART_BLKADDR3 (host->regs + 0x2c) 60 #define NFC_V21_UNLOCKEND_BLKADDR0 (host->regs + 0x22) 61 #define NFC_V21_UNLOCKEND_BLKADDR1 (host->regs + 0x26) 62 #define NFC_V21_UNLOCKEND_BLKADDR2 (host->regs + 0x2a) 63 #define NFC_V21_UNLOCKEND_BLKADDR3 (host->regs + 0x2e) 64 #define NFC_V1_V2_NF_WRPRST (host->regs + 0x18) 65 #define NFC_V1_V2_CONFIG1 (host->regs + 0x1a) 66 #define NFC_V1_V2_CONFIG2 (host->regs + 0x1c) 92 #define NFC_V3_FLASH_CMD (host->regs_axi + 0x00) 93 #define NFC_V3_FLASH_ADDR0 (host->regs_axi + 0x04) 95 #define NFC_V3_CONFIG1 (host->regs_axi + 0x34) 99 #define NFC_V3_ECC_STATUS_RESULT (host->regs_axi + 0x38) 101 #define NFC_V3_LAUNCH (host->regs_axi + 0x40) 103 #define NFC_V3_WRPROT (host->regs_ip + 0x0) 109 #define NFC_V3_WRPROT_UNLOCK_BLK_ADD0 (host->regs_ip + 0x04) 111 #define NFC_V3_CONFIG2 (host->regs_ip + 0x24) 126 #define NFC_V3_CONFIG3 (host->regs_ip + 0x28) 134 #define NFC_V3_IPC (host->regs_ip + 0x2C) 138 #define NFC_V3_DELAY_LINE (host->regs_ip + 0x34) 289 static int check_int_v3(struct mxc_nand_host *host) check_int_v3() argument 303 static int check_int_v1_v2(struct mxc_nand_host *host) check_int_v1_v2() argument 311 if (!host->devtype_data->irqpending_quirk) check_int_v1_v2() 317 static void irq_control_v1_v2(struct mxc_nand_host *host, int activate) irq_control_v1_v2() argument 331 static void irq_control_v3(struct mxc_nand_host *host, int activate) irq_control_v3() argument 345 static void irq_control(struct mxc_nand_host *host, int activate) irq_control() argument 347 if (host->devtype_data->irqpending_quirk) { irq_control() 349 enable_irq(host->irq); irq_control() 351 disable_irq_nosync(host->irq); irq_control() 353 host->devtype_data->irq_control(host, activate); irq_control() 357 static u32 get_ecc_status_v1(struct mxc_nand_host *host) get_ecc_status_v1() argument 362 static u32 get_ecc_status_v2(struct mxc_nand_host *host) get_ecc_status_v2() argument 367 static u32 get_ecc_status_v3(struct mxc_nand_host *host) get_ecc_status_v3() argument 374 struct mxc_nand_host *host = dev_id; mxc_nfc_irq() local 376 if (!host->devtype_data->check_int(host)) mxc_nfc_irq() 379 irq_control(host, 0); mxc_nfc_irq() 381 complete(&host->op_completion); mxc_nfc_irq() 389 static int wait_op_done(struct mxc_nand_host *host, int useirq) wait_op_done() argument 397 if (host->devtype_data->check_int(host)) wait_op_done() 403 reinit_completion(&host->op_completion); wait_op_done() 405 irq_control(host, 1); wait_op_done() 407 timeout = wait_for_completion_timeout(&host->op_completion, HZ); wait_op_done() 408 if (!timeout && !host->devtype_data->check_int(host)) { wait_op_done() 409 dev_dbg(host->dev, "timeout waiting for irq\n"); wait_op_done() 419 done = host->devtype_data->check_int(host); wait_op_done() 426 dev_dbg(host->dev, "timeout polling for completion\n"); wait_op_done() 436 static void send_cmd_v3(struct mxc_nand_host *host, uint16_t cmd, int useirq) send_cmd_v3() argument 445 wait_op_done(host, useirq); send_cmd_v3() 450 static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq) send_cmd_v1_v2() argument 452 pr_debug("send_cmd(host, 0x%x, %d)\n", cmd, useirq); send_cmd_v1_v2() 457 if (host->devtype_data->irqpending_quirk && (cmd == NAND_CMD_RESET)) { send_cmd_v1_v2() 471 wait_op_done(host, useirq); send_cmd_v1_v2() 475 static void send_addr_v3(struct mxc_nand_host *host, uint16_t addr, int islast) send_addr_v3() argument 483 wait_op_done(host, 0); send_addr_v3() 489 static void send_addr_v1_v2(struct mxc_nand_host *host, uint16_t addr, int islast) send_addr_v1_v2() argument 491 pr_debug("send_addr(host, 0x%x %d)\n", addr, islast); send_addr_v1_v2() 497 wait_op_done(host, islast); send_addr_v1_v2() 503 struct mxc_nand_host *host = nand_chip->priv; send_page_v3() local 513 wait_op_done(host, false); send_page_v3() 519 struct mxc_nand_host *host = nand_chip->priv; send_page_v2() local 522 writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR); send_page_v2() 527 wait_op_done(host, true); send_page_v2() 533 struct mxc_nand_host *host = nand_chip->priv; send_page_v1() local 544 writew((host->active_cs << 4) | i, NFC_V1_V2_BUF_ADDR); send_page_v1() 549 wait_op_done(host, true); send_page_v1() 553 static void send_read_id_v3(struct mxc_nand_host *host) send_read_id_v3() argument 558 wait_op_done(host, true); send_read_id_v3() 560 memcpy32_fromio(host->data_buf, host->main_area0, 16); send_read_id_v3() 564 static void send_read_id_v1_v2(struct mxc_nand_host *host) send_read_id_v1_v2() argument 567 writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR); send_read_id_v1_v2() 572 wait_op_done(host, true); send_read_id_v1_v2() 574 memcpy32_fromio(host->data_buf, host->main_area0, 16); send_read_id_v1_v2() 577 static uint16_t get_dev_status_v3(struct mxc_nand_host *host) get_dev_status_v3() argument 580 wait_op_done(host, true); get_dev_status_v3() 587 static uint16_t get_dev_status_v1_v2(struct mxc_nand_host *host) get_dev_status_v1_v2() argument 589 void __iomem *main_buf = host->main_area0; get_dev_status_v1_v2() 593 writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR); get_dev_status_v1_v2() 603 wait_op_done(host, true); get_dev_status_v1_v2() 634 struct mxc_nand_host *host = nand_chip->priv; mxc_nand_correct_data_v1() local 641 uint16_t ecc_status = get_ecc_status_v1(host); mxc_nand_correct_data_v1() 655 struct mxc_nand_host *host = nand_chip->priv; mxc_nand_correct_data_v2_v3() local 661 ecc_bit_mask = (host->eccsize == 4) ? 0x7 : 0xf; mxc_nand_correct_data_v2_v3() 662 err_limit = (host->eccsize == 4) ? 0x4 : 0x8; mxc_nand_correct_data_v2_v3() 666 ecc_stat = host->devtype_data->get_ecc_status(host); mxc_nand_correct_data_v2_v3() 693 struct mxc_nand_host *host = nand_chip->priv; mxc_nand_read_byte() local 697 if (host->status_request) mxc_nand_read_byte() 698 return host->devtype_data->get_dev_status(host) & 0xFF; mxc_nand_read_byte() 702 ret = *(uint16_t *)(host->data_buf + host->buf_start); mxc_nand_read_byte() 704 host->buf_start += 2; mxc_nand_read_byte() 706 ret = *(uint8_t *)(host->data_buf + host->buf_start); mxc_nand_read_byte() 707 host->buf_start++; mxc_nand_read_byte() 710 pr_debug("%s: ret=0x%hhx (start=%u)\n", __func__, ret, host->buf_start); mxc_nand_read_byte() 717 struct mxc_nand_host *host = nand_chip->priv; mxc_nand_read_word() local 720 ret = *(uint16_t *)(host->data_buf + host->buf_start); mxc_nand_read_word() 721 host->buf_start += 2; mxc_nand_read_word() 733 struct mxc_nand_host *host = nand_chip->priv; mxc_nand_write_buf() local 734 u16 col = host->buf_start; mxc_nand_write_buf() 739 memcpy(host->data_buf + col, buf, n); mxc_nand_write_buf() 741 host->buf_start += n; mxc_nand_write_buf() 751 struct mxc_nand_host *host = nand_chip->priv; mxc_nand_read_buf() local 752 u16 col = host->buf_start; mxc_nand_read_buf() 757 memcpy(buf, host->data_buf + col, n); mxc_nand_read_buf() 759 host->buf_start += n; mxc_nand_read_buf() 767 struct mxc_nand_host *host = nand_chip->priv; mxc_nand_select_chip_v1_v3() local 771 if (host->clk_act) { mxc_nand_select_chip_v1_v3() 772 clk_disable_unprepare(host->clk); mxc_nand_select_chip_v1_v3() 773 host->clk_act = 0; mxc_nand_select_chip_v1_v3() 778 if (!host->clk_act) { mxc_nand_select_chip_v1_v3() 780 clk_prepare_enable(host->clk); mxc_nand_select_chip_v1_v3() 781 host->clk_act = 1; mxc_nand_select_chip_v1_v3() 788 struct mxc_nand_host *host = nand_chip->priv; mxc_nand_select_chip_v2() local 792 if (host->clk_act) { mxc_nand_select_chip_v2() 793 clk_disable_unprepare(host->clk); mxc_nand_select_chip_v2() 794 host->clk_act = 0; mxc_nand_select_chip_v2() 799 if (!host->clk_act) { mxc_nand_select_chip_v2() 801 clk_prepare_enable(host->clk); mxc_nand_select_chip_v2() 802 host->clk_act = 1; mxc_nand_select_chip_v2() 805 host->active_cs = chip; mxc_nand_select_chip_v2() 806 writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR); mxc_nand_select_chip_v2() 815 struct mxc_nand_host *host = this->priv; copy_spare() local 818 u8 *d = host->data_buf + mtd->writesize; copy_spare() 819 u8 __iomem *s = host->spare0; copy_spare() 820 u16 t = host->devtype_data->spare_len; copy_spare() 848 struct mxc_nand_host *host = nand_chip->priv; mxc_do_addr_cycle() local 852 host->devtype_data->send_addr(host, column & 0xff, mxc_do_addr_cycle() 856 host->devtype_data->send_addr(host, mxc_do_addr_cycle() 864 host->devtype_data->send_addr(host, (page_addr & 0xff), false); mxc_do_addr_cycle() 869 host->devtype_data->send_addr(host, mxc_do_addr_cycle() 872 host->devtype_data->send_addr(host, mxc_do_addr_cycle() 877 host->devtype_data->send_addr(host, mxc_do_addr_cycle() 883 host->devtype_data->send_addr(host, mxc_do_addr_cycle() 886 host->devtype_data->send_addr(host, mxc_do_addr_cycle() 891 host->devtype_data->send_addr(host, mxc_do_addr_cycle() 917 struct mxc_nand_host *host = nand_chip->priv; preset_v1() local 923 if (!host->devtype_data->irqpending_quirk) preset_v1() 926 host->eccsize = 1; preset_v1() 945 struct mxc_nand_host *host = nand_chip->priv; preset_v2() local 950 if (!host->devtype_data->irqpending_quirk) preset_v2() 959 host->eccsize = get_eccsize(mtd); preset_v2() 960 if (host->eccsize == 4) preset_v2() 965 host->eccsize = 1; preset_v2() 991 struct mxc_nand_host *host = chip->priv; preset_v3() local 1035 host->devtype_data->ppb_shift); preset_v3() 1036 host->eccsize = get_eccsize(mtd); preset_v3() 1037 if (host->eccsize == 8) preset_v3() 1063 struct mxc_nand_host *host = nand_chip->priv; mxc_nand_command() local 1069 host->status_request = false; mxc_nand_command() 1074 host->devtype_data->preset(mtd); mxc_nand_command() 1075 host->devtype_data->send_cmd(host, command, false); mxc_nand_command() 1079 host->buf_start = 0; mxc_nand_command() 1080 host->status_request = true; mxc_nand_command() 1082 host->devtype_data->send_cmd(host, command, true); mxc_nand_command() 1092 host->buf_start = column; mxc_nand_command() 1094 host->buf_start = column + mtd->writesize; mxc_nand_command() 1098 host->devtype_data->send_cmd(host, command, false); mxc_nand_command() 1105 host->devtype_data->send_cmd(host, mxc_nand_command() 1108 host->devtype_data->send_page(mtd, NFC_OUTPUT); mxc_nand_command() 1110 memcpy32_fromio(host->data_buf, host->main_area0, mxc_nand_command() 1120 host->buf_start = column; mxc_nand_command() 1122 host->devtype_data->send_cmd(host, command, false); mxc_nand_command() 1130 memcpy32_toio(host->main_area0, host->data_buf, mtd->writesize); mxc_nand_command() 1132 host->devtype_data->send_page(mtd, NFC_INPUT); mxc_nand_command() 1133 host->devtype_data->send_cmd(host, command, true); mxc_nand_command() 1141 host->devtype_data->send_cmd(host, command, true); mxc_nand_command() 1143 host->devtype_data->send_read_id(host); mxc_nand_command() 1144 host->buf_start = 0; mxc_nand_command() 1149 host->devtype_data->send_cmd(host, command, false); mxc_nand_command() 1157 host->devtype_data->send_cmd(host, command, false); mxc_nand_command() 1159 host->devtype_data->send_page(mtd, NFC_OUTPUT); mxc_nand_command() 1160 memcpy32_fromio(host->data_buf, host->main_area0, 512); mxc_nand_command() 1161 host->buf_start = 0; mxc_nand_command() 1328 static inline int is_imx21_nfc(struct mxc_nand_host *host) is_imx21_nfc() argument 1330 return host->devtype_data == &imx21_nand_devtype_data; is_imx21_nfc() 1333 static inline int is_imx27_nfc(struct mxc_nand_host *host) is_imx27_nfc() argument 1335 return host->devtype_data == &imx27_nand_devtype_data; is_imx27_nfc() 1338 static inline int is_imx25_nfc(struct mxc_nand_host *host) is_imx25_nfc() argument 1340 return host->devtype_data == &imx25_nand_devtype_data; is_imx25_nfc() 1343 static inline int is_imx51_nfc(struct mxc_nand_host *host) is_imx51_nfc() argument 1345 return host->devtype_data == &imx51_nand_devtype_data; is_imx51_nfc() 1348 static inline int is_imx53_nfc(struct mxc_nand_host *host) is_imx53_nfc() argument 1350 return host->devtype_data == &imx53_nand_devtype_data; is_imx53_nfc() 1396 static int __init mxcnd_probe_dt(struct mxc_nand_host *host) mxcnd_probe_dt() argument 1398 struct device_node *np = host->dev->of_node; mxcnd_probe_dt() 1399 struct mxc_nand_platform_data *pdata = &host->pdata; mxcnd_probe_dt() 1401 of_match_device(mxcnd_dt_ids, host->dev); mxcnd_probe_dt() 1418 host->devtype_data = of_id->data; mxcnd_probe_dt() 1423 static int __init mxcnd_probe_dt(struct mxc_nand_host *host) mxcnd_probe_dt() argument 1433 struct mxc_nand_host *host; mxcnd_probe() local 1438 host = devm_kzalloc(&pdev->dev, sizeof(struct mxc_nand_host), mxcnd_probe() 1440 if (!host) mxcnd_probe() 1444 host->data_buf = devm_kzalloc(&pdev->dev, PAGE_SIZE, GFP_KERNEL); mxcnd_probe() 1445 if (!host->data_buf) mxcnd_probe() 1448 host->dev = &pdev->dev; mxcnd_probe() 1450 this = &host->nand; mxcnd_probe() 1451 mtd = &host->mtd; mxcnd_probe() 1460 this->priv = host; mxcnd_probe() 1468 host->clk = devm_clk_get(&pdev->dev, NULL); mxcnd_probe() 1469 if (IS_ERR(host->clk)) mxcnd_probe() 1470 return PTR_ERR(host->clk); mxcnd_probe() 1472 err = mxcnd_probe_dt(host); mxcnd_probe() 1477 host->pdata = *pdata; mxcnd_probe() 1478 host->devtype_data = (struct mxc_nand_devtype_data *) mxcnd_probe() 1487 if (host->devtype_data->needs_ip) { mxcnd_probe() 1489 host->regs_ip = devm_ioremap_resource(&pdev->dev, res); mxcnd_probe() 1490 if (IS_ERR(host->regs_ip)) mxcnd_probe() 1491 return PTR_ERR(host->regs_ip); mxcnd_probe() 1498 host->base = devm_ioremap_resource(&pdev->dev, res); mxcnd_probe() 1499 if (IS_ERR(host->base)) mxcnd_probe() 1500 return PTR_ERR(host->base); mxcnd_probe() 1502 host->main_area0 = host->base; mxcnd_probe() 1504 if (host->devtype_data->regs_offset) mxcnd_probe() 1505 host->regs = host->base + host->devtype_data->regs_offset; mxcnd_probe() 1506 host->spare0 = host->base + host->devtype_data->spare0_offset; mxcnd_probe() 1507 if (host->devtype_data->axi_offset) mxcnd_probe() 1508 host->regs_axi = host->base + host->devtype_data->axi_offset; mxcnd_probe() 1510 this->ecc.bytes = host->devtype_data->eccbytes; mxcnd_probe() 1511 host->eccsize = host->devtype_data->eccsize; mxcnd_probe() 1513 this->select_chip = host->devtype_data->select_chip; mxcnd_probe() 1515 this->ecc.layout = host->devtype_data->ecclayout_512; mxcnd_probe() 1517 if (host->pdata.hw_ecc) { mxcnd_probe() 1520 this->ecc.correct = host->devtype_data->correct_data; mxcnd_probe() 1527 if (host->pdata.width == 2) mxcnd_probe() 1530 if (host->pdata.flash_bbt) { mxcnd_probe() 1537 init_completion(&host->op_completion); mxcnd_probe() 1539 host->irq = platform_get_irq(pdev, 0); mxcnd_probe() 1540 if (host->irq < 0) mxcnd_probe() 1541 return host->irq; mxcnd_probe() 1544 * Use host->devtype_data->irq_control() here instead of irq_control() mxcnd_probe() 1548 host->devtype_data->irq_control(host, 0); mxcnd_probe() 1550 err = devm_request_irq(&pdev->dev, host->irq, mxc_nfc_irq, mxcnd_probe() 1551 0, DRIVER_NAME, host); mxcnd_probe() 1555 err = clk_prepare_enable(host->clk); mxcnd_probe() 1558 host->clk_act = 1; mxcnd_probe() 1565 if (host->devtype_data->irqpending_quirk) { mxcnd_probe() 1566 disable_irq_nosync(host->irq); mxcnd_probe() 1567 host->devtype_data->irq_control(host, 1); mxcnd_probe() 1571 if (nand_scan_ident(mtd, is_imx25_nfc(host) ? 4 : 1, NULL)) { mxcnd_probe() 1577 devm_kfree(&pdev->dev, (void *)host->data_buf); mxcnd_probe() 1578 host->data_buf = devm_kzalloc(&pdev->dev, mtd->writesize + mtd->oobsize, mxcnd_probe() 1580 if (!host->data_buf) { mxcnd_probe() 1586 host->devtype_data->preset(mtd); mxcnd_probe() 1589 this->ecc.layout = host->devtype_data->ecclayout_2k; mxcnd_probe() 1591 this->ecc.layout = host->devtype_data->ecclayout_4k; mxcnd_probe() 1594 if (is_imx21_nfc(host) || is_imx27_nfc(host)) mxcnd_probe() 1597 this->ecc.strength = (host->eccsize == 4) ? 4 : 8; mxcnd_probe() 1611 host->pdata.parts, mxcnd_probe() 1612 host->pdata.nr_parts); mxcnd_probe() 1614 platform_set_drvdata(pdev, host); mxcnd_probe() 1619 if (host->clk_act) mxcnd_probe() 1620 clk_disable_unprepare(host->clk); mxcnd_probe() 1627 struct mxc_nand_host *host = platform_get_drvdata(pdev); mxcnd_remove() local 1629 nand_release(&host->mtd); mxcnd_remove() 1630 if (host->clk_act) mxcnd_remove() 1631 clk_disable_unprepare(host->clk); mxcnd_remove()
|
H A D | socrates_nand.c | 49 struct socrates_nand_host *host = this->priv; socrates_nand_write_buf() local 52 out_be32(host->io_base, FPGA_NAND_ENABLE | socrates_nand_write_buf() 68 struct socrates_nand_host *host = this->priv; socrates_nand_read_buf() local 73 out_be32(host->io_base, val); socrates_nand_read_buf() 75 buf[i] = (in_be32(host->io_base) >> socrates_nand_read_buf() 109 struct socrates_nand_host *host = nand_chip->priv; socrates_nand_cmd_ctrl() local 125 out_be32(host->io_base, val); socrates_nand_cmd_ctrl() 134 struct socrates_nand_host *host = nand_chip->priv; socrates_nand_device_ready() local 136 if (in_be32(host->io_base) & FPGA_NAND_BUSY) socrates_nand_device_ready() 146 struct socrates_nand_host *host; socrates_nand_probe() local 153 host = devm_kzalloc(&ofdev->dev, sizeof(*host), GFP_KERNEL); socrates_nand_probe() 154 if (!host) socrates_nand_probe() 157 host->io_base = of_iomap(ofdev->dev.of_node, 0); socrates_nand_probe() 158 if (host->io_base == NULL) { socrates_nand_probe() 163 mtd = &host->mtd; socrates_nand_probe() 164 nand_chip = &host->nand_chip; socrates_nand_probe() 165 host->dev = &ofdev->dev; socrates_nand_probe() 167 nand_chip->priv = host; /* link the private data structures */ socrates_nand_probe() 190 dev_set_drvdata(&ofdev->dev, host); socrates_nand_probe() 211 iounmap(host->io_base); socrates_nand_probe() 220 struct socrates_nand_host *host = dev_get_drvdata(&ofdev->dev); socrates_nand_remove() local 221 struct mtd_info *mtd = &host->mtd; socrates_nand_remove() 225 iounmap(host->io_base); socrates_nand_remove()
|
H A D | lpc32xx_slc.c | 222 static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host) lpc32xx_nand_setup() argument 227 writel(SLCCTRL_SW_RESET, SLC_CTRL(host->io_base)); lpc32xx_nand_setup() 231 writel(0, SLC_CFG(host->io_base)); lpc32xx_nand_setup() 232 writel(0, SLC_IEN(host->io_base)); lpc32xx_nand_setup() 234 SLC_ICR(host->io_base)); lpc32xx_nand_setup() 237 clkrate = clk_get_rate(host->clk); lpc32xx_nand_setup() 242 tmp = SLCTAC_WDR(host->ncfg->wdr_clks) | lpc32xx_nand_setup() 243 SLCTAC_WWIDTH(1 + (clkrate / host->ncfg->wwidth)) | lpc32xx_nand_setup() 244 SLCTAC_WHOLD(1 + (clkrate / host->ncfg->whold)) | lpc32xx_nand_setup() 245 SLCTAC_WSETUP(1 + (clkrate / host->ncfg->wsetup)) | lpc32xx_nand_setup() 246 SLCTAC_RDR(host->ncfg->rdr_clks) | lpc32xx_nand_setup() 247 SLCTAC_RWIDTH(1 + (clkrate / host->ncfg->rwidth)) | lpc32xx_nand_setup() 248 SLCTAC_RHOLD(1 + (clkrate / host->ncfg->rhold)) | lpc32xx_nand_setup() 249 SLCTAC_RSETUP(1 + (clkrate / host->ncfg->rsetup)); lpc32xx_nand_setup() 250 writel(tmp, SLC_TAC(host->io_base)); lpc32xx_nand_setup() 261 struct lpc32xx_nand_host *host = chip->priv; lpc32xx_nand_cmd_ctrl() local 264 tmp = readl(SLC_CFG(host->io_base)); lpc32xx_nand_cmd_ctrl() 269 writel(tmp, SLC_CFG(host->io_base)); lpc32xx_nand_cmd_ctrl() 273 writel(cmd, SLC_CMD(host->io_base)); lpc32xx_nand_cmd_ctrl() 275 writel(cmd, SLC_ADDR(host->io_base)); lpc32xx_nand_cmd_ctrl() 285 struct lpc32xx_nand_host *host = chip->priv; lpc32xx_nand_device_ready() local 288 if ((readl(SLC_STAT(host->io_base)) & SLCSTAT_NAND_READY) != 0) lpc32xx_nand_device_ready() 297 static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host) lpc32xx_wp_enable() argument 299 if (gpio_is_valid(host->ncfg->wp_gpio)) lpc32xx_wp_enable() 300 gpio_set_value(host->ncfg->wp_gpio, 0); lpc32xx_wp_enable() 306 static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host) lpc32xx_wp_disable() argument 308 if (gpio_is_valid(host->ncfg->wp_gpio)) lpc32xx_wp_disable() 309 gpio_set_value(host->ncfg->wp_gpio, 1); lpc32xx_wp_disable() 340 struct lpc32xx_nand_host *host = chip->priv; lpc32xx_nand_read_byte() local 342 return (uint8_t)readl(SLC_DATA(host->io_base)); lpc32xx_nand_read_byte() 351 struct lpc32xx_nand_host *host = chip->priv; lpc32xx_nand_read_buf() local 355 *buf++ = (uint8_t)readl(SLC_DATA(host->io_base)); lpc32xx_nand_read_buf() 364 struct lpc32xx_nand_host *host = chip->priv; lpc32xx_nand_write_buf() local 368 writel((uint32_t)*buf++, SLC_DATA(host->io_base)); lpc32xx_nand_write_buf() 429 struct lpc32xx_nand_host *host = chip->priv; lpc32xx_xmit_dma() local 434 host->dma_slave_config.direction = dir; lpc32xx_xmit_dma() 435 host->dma_slave_config.src_addr = dma; lpc32xx_xmit_dma() 436 host->dma_slave_config.dst_addr = dma; lpc32xx_xmit_dma() 437 host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; lpc32xx_xmit_dma() 438 host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; lpc32xx_xmit_dma() 439 host->dma_slave_config.src_maxburst = 4; lpc32xx_xmit_dma() 440 host->dma_slave_config.dst_maxburst = 4; lpc32xx_xmit_dma() 442 host->dma_slave_config.device_fc = false; lpc32xx_xmit_dma() 443 if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) { lpc32xx_xmit_dma() 448 sg_init_one(&host->sgl, mem, len); lpc32xx_xmit_dma() 450 res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1, lpc32xx_xmit_dma() 456 desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir, lpc32xx_xmit_dma() 463 init_completion(&host->comp); lpc32xx_xmit_dma() 465 desc->callback_param = &host->comp; lpc32xx_xmit_dma() 468 dma_async_issue_pending(host->dma_chan); lpc32xx_xmit_dma() 470 wait_for_completion_timeout(&host->comp, msecs_to_jiffies(1000)); lpc32xx_xmit_dma() 472 dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1, lpc32xx_xmit_dma() 477 dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1, lpc32xx_xmit_dma() 489 struct lpc32xx_nand_host *host = chip->priv; lpc32xx_xfer() local 502 dma_buf = host->data_buf; lpc32xx_xfer() 505 memcpy(host->data_buf, buf, mtd->writesize); lpc32xx_xfer() 509 writel(readl(SLC_CFG(host->io_base)) | lpc32xx_xfer() 511 SLCCFG_DMA_BURST, SLC_CFG(host->io_base)); lpc32xx_xfer() 513 writel((readl(SLC_CFG(host->io_base)) | lpc32xx_xfer() 516 SLC_CFG(host->io_base)); lpc32xx_xfer() 520 writel(SLCCTRL_ECC_CLEAR, SLC_CTRL(host->io_base)); lpc32xx_xfer() 523 writel(mtd->writesize, SLC_TC(host->io_base)); lpc32xx_xfer() 526 writel(readl(SLC_CTRL(host->io_base)) | SLCCTRL_DMA_START, lpc32xx_xfer() 527 SLC_CTRL(host->io_base)); lpc32xx_xfer() 531 res = lpc32xx_xmit_dma(mtd, SLC_DMA_DATA(host->io_base_dma), lpc32xx_xfer() 542 res = lpc32xx_xmit_dma(mtd, SLC_ECC(host->io_base_dma), lpc32xx_xfer() 543 &host->ecc_buf[i], 4, DMA_DEV_TO_MEM); lpc32xx_xfer() 555 if (readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) { lpc32xx_xfer() 558 while ((readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) && lpc32xx_xfer() 570 host->ecc_buf[chip->ecc.steps - 1] = lpc32xx_xfer() 571 readl(SLC_ECC(host->io_base)); lpc32xx_xfer() 574 dmaengine_terminate_all(host->dma_chan); lpc32xx_xfer() 576 if (readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO || lpc32xx_xfer() 577 readl(SLC_TC(host->io_base))) { lpc32xx_xfer() 584 writel(readl(SLC_CTRL(host->io_base)) & ~SLCCTRL_DMA_START, lpc32xx_xfer() 585 SLC_CTRL(host->io_base)); lpc32xx_xfer() 586 writel(readl(SLC_CFG(host->io_base)) & lpc32xx_xfer() 588 SLCCFG_DMA_BURST), SLC_CFG(host->io_base)); lpc32xx_xfer() 591 memcpy(buf, host->data_buf, mtd->writesize); lpc32xx_xfer() 604 struct lpc32xx_nand_host *host = chip->priv; lpc32xx_nand_read_page_syndrome() local 618 lpc32xx_slc_ecc_copy(tmpecc, (uint32_t *) host->ecc_buf, chip->ecc.steps); lpc32xx_nand_read_page_syndrome() 665 struct lpc32xx_nand_host *host = chip->priv; lpc32xx_nand_write_page_syndrome() local 678 lpc32xx_slc_ecc_copy(pb, (uint32_t *)host->ecc_buf, chip->ecc.steps); lpc32xx_nand_write_page_syndrome() 700 static int lpc32xx_nand_dma_setup(struct lpc32xx_nand_host *host) lpc32xx_nand_dma_setup() argument 702 struct mtd_info *mtd = &host->mtd; lpc32xx_nand_dma_setup() 705 if (!host->pdata || !host->pdata->dma_filter) { lpc32xx_nand_dma_setup() 712 host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter, lpc32xx_nand_dma_setup() 714 if (!host->dma_chan) { lpc32xx_nand_dma_setup() 758 struct lpc32xx_nand_host *host; lpc32xx_nand_probe() local 772 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); lpc32xx_nand_probe() 773 if (!host) lpc32xx_nand_probe() 775 host->io_base_dma = rc->start; lpc32xx_nand_probe() 777 host->io_base = devm_ioremap_resource(&pdev->dev, rc); lpc32xx_nand_probe() 778 if (IS_ERR(host->io_base)) lpc32xx_nand_probe() 779 return PTR_ERR(host->io_base); lpc32xx_nand_probe() 782 host->ncfg = lpc32xx_parse_dt(&pdev->dev); lpc32xx_nand_probe() 783 if (!host->ncfg) { lpc32xx_nand_probe() 788 if (host->ncfg->wp_gpio == -EPROBE_DEFER) lpc32xx_nand_probe() 790 if (gpio_is_valid(host->ncfg->wp_gpio) && devm_gpio_request(&pdev->dev, lpc32xx_nand_probe() 791 host->ncfg->wp_gpio, "NAND WP")) { lpc32xx_nand_probe() 795 lpc32xx_wp_disable(host); lpc32xx_nand_probe() 797 host->pdata = dev_get_platdata(&pdev->dev); lpc32xx_nand_probe() 799 mtd = &host->mtd; lpc32xx_nand_probe() 800 chip = &host->nand_chip; lpc32xx_nand_probe() 801 chip->priv = host; lpc32xx_nand_probe() 807 host->clk = devm_clk_get(&pdev->dev, NULL); lpc32xx_nand_probe() 808 if (IS_ERR(host->clk)) { lpc32xx_nand_probe() 813 clk_enable(host->clk); lpc32xx_nand_probe() 816 chip->IO_ADDR_R = SLC_DATA(host->io_base); lpc32xx_nand_probe() 817 chip->IO_ADDR_W = SLC_DATA(host->io_base); lpc32xx_nand_probe() 823 lpc32xx_nand_setup(host); lpc32xx_nand_probe() 825 platform_set_drvdata(pdev, host); lpc32xx_nand_probe() 847 host->dma_buf_len = LPC32XX_DMA_DATA_SIZE + LPC32XX_ECC_SAVE_SIZE; lpc32xx_nand_probe() 848 host->data_buf = devm_kzalloc(&pdev->dev, host->dma_buf_len, lpc32xx_nand_probe() 850 if (host->data_buf == NULL) { lpc32xx_nand_probe() 855 res = lpc32xx_nand_dma_setup(host); lpc32xx_nand_probe() 868 host->ecc_buf = (uint32_t *)(host->data_buf + LPC32XX_DMA_DATA_SIZE); lpc32xx_nand_probe() 884 if (host->ncfg->use_bbt) { lpc32xx_nand_probe() 908 res = mtd_device_parse_register(mtd, NULL, &ppdata, host->ncfg->parts, lpc32xx_nand_probe() 909 host->ncfg->num_parts); lpc32xx_nand_probe() 916 dma_release_channel(host->dma_chan); lpc32xx_nand_probe() 918 clk_disable(host->clk); lpc32xx_nand_probe() 920 lpc32xx_wp_enable(host); lpc32xx_nand_probe() 931 struct lpc32xx_nand_host *host = platform_get_drvdata(pdev); lpc32xx_nand_remove() local 932 struct mtd_info *mtd = &host->mtd; lpc32xx_nand_remove() 935 dma_release_channel(host->dma_chan); lpc32xx_nand_remove() 938 tmp = readl(SLC_CTRL(host->io_base)); lpc32xx_nand_remove() 940 writel(tmp, SLC_CTRL(host->io_base)); lpc32xx_nand_remove() 942 clk_disable(host->clk); lpc32xx_nand_remove() 943 lpc32xx_wp_enable(host); lpc32xx_nand_remove() 951 struct lpc32xx_nand_host *host = platform_get_drvdata(pdev); lpc32xx_nand_resume() local 954 clk_enable(host->clk); lpc32xx_nand_resume() 957 lpc32xx_nand_setup(host); lpc32xx_nand_resume() 960 lpc32xx_wp_disable(host); lpc32xx_nand_resume() 968 struct lpc32xx_nand_host *host = platform_get_drvdata(pdev); lpc32xx_nand_suspend() local 971 tmp = readl(SLC_CTRL(host->io_base)); lpc32xx_nand_suspend() 973 writel(tmp, SLC_CTRL(host->io_base)); lpc32xx_nand_suspend() 976 lpc32xx_wp_enable(host); lpc32xx_nand_suspend() 979 clk_disable(host->clk); lpc32xx_nand_suspend()
|
H A D | fsmc_nand.c | 333 struct fsmc_nand_data *host; fsmc_select_chip() local 335 host = container_of(mtd, struct fsmc_nand_data, mtd); fsmc_select_chip() 345 if (host->select_chip) fsmc_select_chip() 346 host->select_chip(chipnr, fsmc_select_chip() 362 struct fsmc_nand_data *host = container_of(mtd, fsmc_cmd_ctrl() local 364 void __iomem *regs = host->regs_va; fsmc_cmd_ctrl() 365 unsigned int bank = host->bank; fsmc_cmd_ctrl() 371 this->IO_ADDR_R = host->cmd_va; fsmc_cmd_ctrl() 372 this->IO_ADDR_W = host->cmd_va; fsmc_cmd_ctrl() 374 this->IO_ADDR_R = host->addr_va; fsmc_cmd_ctrl() 375 this->IO_ADDR_W = host->addr_va; fsmc_cmd_ctrl() 377 this->IO_ADDR_R = host->data_va; fsmc_cmd_ctrl() 378 this->IO_ADDR_W = host->data_va; fsmc_cmd_ctrl() 448 struct fsmc_nand_data *host = container_of(mtd, fsmc_enable_hwecc() local 450 void __iomem *regs = host->regs_va; fsmc_enable_hwecc() 451 uint32_t bank = host->bank; fsmc_enable_hwecc() 469 struct fsmc_nand_data *host = container_of(mtd, fsmc_read_hwecc_ecc4() local 471 void __iomem *regs = host->regs_va; fsmc_read_hwecc_ecc4() 472 uint32_t bank = host->bank; fsmc_read_hwecc_ecc4() 484 dev_err(host->dev, "calculate ecc timed out\n"); fsmc_read_hwecc_ecc4() 520 struct fsmc_nand_data *host = container_of(mtd, fsmc_read_hwecc_ecc1() local 522 void __iomem *regs = host->regs_va; fsmc_read_hwecc_ecc1() 523 uint32_t bank = host->bank; fsmc_read_hwecc_ecc1() 550 struct fsmc_nand_data *host = param; dma_complete() local 552 complete(&host->dma_access_complete); dma_complete() 555 static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len, dma_xfer() argument 567 chan = host->write_dma_chan; dma_xfer() 569 chan = host->read_dma_chan; dma_xfer() 578 dma_dst = host->data_pa; dma_xfer() 580 dma_src = host->data_pa; dma_xfer() 587 dev_err(host->dev, "device_prep_dma_memcpy error\n"); dma_xfer() 593 tx->callback_param = host; dma_xfer() 598 dev_err(host->dev, "dma_submit_error %d\n", cookie); dma_xfer() 605 wait_for_completion_timeout(&host->dma_access_complete, dma_xfer() 609 dev_err(host->dev, "wait_for_completion_timeout\n"); dma_xfer() 677 struct fsmc_nand_data *host; fsmc_read_buf_dma() local 679 host = container_of(mtd, struct fsmc_nand_data, mtd); fsmc_read_buf_dma() 680 dma_xfer(host, buf, len, DMA_FROM_DEVICE); fsmc_read_buf_dma() 692 struct fsmc_nand_data *host; fsmc_write_buf_dma() local 694 host = container_of(mtd, struct fsmc_nand_data, mtd); fsmc_write_buf_dma() 695 dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE); fsmc_write_buf_dma() 715 struct fsmc_nand_data *host = container_of(mtd, fsmc_read_page_hwecc() local 717 struct fsmc_eccplace *ecc_place = host->ecc_place; fsmc_read_page_hwecc() 785 struct fsmc_nand_data *host = container_of(mtd, fsmc_bch8_correct_data() local 788 void __iomem *regs = host->regs_va; fsmc_bch8_correct_data() 789 unsigned int bank = host->bank; fsmc_bch8_correct_data() 930 struct fsmc_nand_data *host; fsmc_nand_probe() local 955 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); fsmc_nand_probe() 956 if (!host) fsmc_nand_probe() 960 host->data_va = devm_ioremap_resource(&pdev->dev, res); fsmc_nand_probe() 961 if (IS_ERR(host->data_va)) fsmc_nand_probe() 962 return PTR_ERR(host->data_va); fsmc_nand_probe() 964 host->data_pa = (dma_addr_t)res->start; fsmc_nand_probe() 967 host->addr_va = devm_ioremap_resource(&pdev->dev, res); fsmc_nand_probe() 968 if (IS_ERR(host->addr_va)) fsmc_nand_probe() 969 return PTR_ERR(host->addr_va); fsmc_nand_probe() 972 host->cmd_va = devm_ioremap_resource(&pdev->dev, res); fsmc_nand_probe() 973 if (IS_ERR(host->cmd_va)) fsmc_nand_probe() 974 return PTR_ERR(host->cmd_va); fsmc_nand_probe() 977 host->regs_va = devm_ioremap_resource(&pdev->dev, res); fsmc_nand_probe() 978 if (IS_ERR(host->regs_va)) fsmc_nand_probe() 979 return PTR_ERR(host->regs_va); fsmc_nand_probe() 981 host->clk = clk_get(&pdev->dev, NULL); fsmc_nand_probe() 982 if (IS_ERR(host->clk)) { fsmc_nand_probe() 984 return PTR_ERR(host->clk); fsmc_nand_probe() 987 ret = clk_prepare_enable(host->clk); fsmc_nand_probe() 996 pid |= (readl(host->regs_va + resource_size(res) - 0x20 + 4 * i) & 255) << (i * 8); fsmc_nand_probe() 997 host->pid = pid; fsmc_nand_probe() 1003 host->bank = pdata->bank; fsmc_nand_probe() 1004 host->select_chip = pdata->select_bank; fsmc_nand_probe() 1005 host->partitions = pdata->partitions; fsmc_nand_probe() 1006 host->nr_partitions = pdata->nr_partitions; fsmc_nand_probe() 1007 host->dev = &pdev->dev; fsmc_nand_probe() 1008 host->dev_timings = pdata->nand_timings; fsmc_nand_probe() 1009 host->mode = pdata->mode; fsmc_nand_probe() 1011 if (host->mode == USE_DMA_ACCESS) fsmc_nand_probe() 1012 init_completion(&host->dma_access_complete); fsmc_nand_probe() 1015 mtd = &host->mtd; fsmc_nand_probe() 1016 nand = &host->nand; fsmc_nand_probe() 1018 nand->priv = host; fsmc_nand_probe() 1020 host->mtd.owner = THIS_MODULE; fsmc_nand_probe() 1021 nand->IO_ADDR_R = host->data_va; fsmc_nand_probe() 1022 nand->IO_ADDR_W = host->data_va; fsmc_nand_probe() 1036 switch (host->mode) { fsmc_nand_probe() 1040 host->read_dma_chan = dma_request_channel(mask, filter, fsmc_nand_probe() 1042 if (!host->read_dma_chan) { fsmc_nand_probe() 1046 host->write_dma_chan = dma_request_channel(mask, filter, fsmc_nand_probe() 1048 if (!host->write_dma_chan) { fsmc_nand_probe() 1063 fsmc_nand_setup(host->regs_va, host->bank, fsmc_nand_probe() 1065 host->dev_timings); fsmc_nand_probe() 1067 if (AMBA_REV_BITS(host->pid) >= 8) { fsmc_nand_probe() 1083 if (nand_scan_ident(&host->mtd, 1, NULL)) { fsmc_nand_probe() 1089 if (AMBA_REV_BITS(host->pid) >= 8) { fsmc_nand_probe() 1090 switch (host->mtd.oobsize) { fsmc_nand_probe() 1093 host->ecc_place = &fsmc_ecc4_sp_place; fsmc_nand_probe() 1097 host->ecc_place = &fsmc_ecc4_lp_place; fsmc_nand_probe() 1101 host->ecc_place = &fsmc_ecc4_lp_place; fsmc_nand_probe() 1105 host->ecc_place = &fsmc_ecc4_lp_place; fsmc_nand_probe() 1109 host->ecc_place = &fsmc_ecc4_lp_place; fsmc_nand_probe() 1117 switch (host->mtd.oobsize) { fsmc_nand_probe() 1135 if (nand_scan_tail(&host->mtd)) { fsmc_nand_probe() 1150 host->mtd.name = "nand"; fsmc_nand_probe() 1152 ret = mtd_device_parse_register(&host->mtd, NULL, &ppdata, fsmc_nand_probe() 1153 host->partitions, host->nr_partitions); fsmc_nand_probe() 1157 platform_set_drvdata(pdev, host); fsmc_nand_probe() 1163 if (host->mode == USE_DMA_ACCESS) fsmc_nand_probe() 1164 dma_release_channel(host->write_dma_chan); fsmc_nand_probe() 1166 if (host->mode == USE_DMA_ACCESS) fsmc_nand_probe() 1167 dma_release_channel(host->read_dma_chan); fsmc_nand_probe() 1169 clk_disable_unprepare(host->clk); fsmc_nand_probe() 1171 clk_put(host->clk); fsmc_nand_probe() 1180 struct fsmc_nand_data *host = platform_get_drvdata(pdev); fsmc_nand_remove() local 1182 if (host) { fsmc_nand_remove() 1183 nand_release(&host->mtd); fsmc_nand_remove() 1185 if (host->mode == USE_DMA_ACCESS) { fsmc_nand_remove() 1186 dma_release_channel(host->write_dma_chan); fsmc_nand_remove() 1187 dma_release_channel(host->read_dma_chan); fsmc_nand_remove() 1189 clk_disable_unprepare(host->clk); fsmc_nand_remove() 1190 clk_put(host->clk); fsmc_nand_remove() 1199 struct fsmc_nand_data *host = dev_get_drvdata(dev); fsmc_nand_suspend() local 1200 if (host) fsmc_nand_suspend() 1201 clk_disable_unprepare(host->clk); fsmc_nand_suspend() 1207 struct fsmc_nand_data *host = dev_get_drvdata(dev); fsmc_nand_resume() local 1208 if (host) { fsmc_nand_resume() 1209 clk_prepare_enable(host->clk); fsmc_nand_resume() 1210 fsmc_nand_setup(host->regs_va, host->bank, fsmc_nand_resume() 1211 host->nand.options & NAND_BUSWIDTH_16, fsmc_nand_resume() 1212 host->dev_timings); fsmc_nand_resume()
|
/linux-4.1.27/drivers/gpu/host1x/hw/ |
H A D | intr_hw.c | 34 struct host1x *host = syncpt->host; host1x_intr_syncpt_handle() local 36 host1x_sync_writel(host, BIT_MASK(id), host1x_intr_syncpt_handle() 38 host1x_sync_writel(host, BIT_MASK(id), host1x_intr_syncpt_handle() 41 queue_work(host->intr_wq, &syncpt->intr.work); host1x_intr_syncpt_handle() 46 struct host1x *host = dev_id; syncpt_thresh_isr() local 50 for (i = 0; i < DIV_ROUND_UP(host->info->nb_pts, 32); i++) { syncpt_thresh_isr() 51 reg = host1x_sync_readl(host, syncpt_thresh_isr() 55 host->syncpt + (i * BITS_PER_LONG + id); syncpt_thresh_isr() 63 static void _host1x_intr_disable_all_syncpt_intrs(struct host1x *host) _host1x_intr_disable_all_syncpt_intrs() argument 67 for (i = 0; i < DIV_ROUND_UP(host->info->nb_pts, 32); ++i) { _host1x_intr_disable_all_syncpt_intrs() 68 host1x_sync_writel(host, 0xffffffffu, _host1x_intr_disable_all_syncpt_intrs() 70 host1x_sync_writel(host, 0xffffffffu, _host1x_intr_disable_all_syncpt_intrs() 75 static int _host1x_intr_init_host_sync(struct host1x *host, u32 cpm, _host1x_intr_init_host_sync() argument 80 host1x_hw_intr_disable_all_syncpt_intrs(host); _host1x_intr_init_host_sync() 82 for (i = 0; i < host->info->nb_pts; i++) _host1x_intr_init_host_sync() 83 INIT_WORK(&host->syncpt[i].intr.work, syncpt_thresh_work); _host1x_intr_init_host_sync() 85 err = devm_request_irq(host->dev, host->intr_syncpt_irq, _host1x_intr_init_host_sync() 87 "host1x_syncpt", host); _host1x_intr_init_host_sync() 94 host1x_sync_writel(host, 0, HOST1X_SYNC_IP_BUSY_TIMEOUT); _host1x_intr_init_host_sync() 100 host1x_sync_writel(host, 0xff, HOST1X_SYNC_CTXSW_TIMEOUT_CFG); _host1x_intr_init_host_sync() 102 /* update host clocks per usec */ _host1x_intr_init_host_sync() 103 host1x_sync_writel(host, cpm, HOST1X_SYNC_USEC_CLK); _host1x_intr_init_host_sync() 108 static void _host1x_intr_set_syncpt_threshold(struct host1x *host, _host1x_intr_set_syncpt_threshold() argument 111 host1x_sync_writel(host, thresh, HOST1X_SYNC_SYNCPT_INT_THRESH(id)); _host1x_intr_set_syncpt_threshold() 114 static void _host1x_intr_enable_syncpt_intr(struct host1x *host, u32 id) _host1x_intr_enable_syncpt_intr() argument 116 host1x_sync_writel(host, BIT_MASK(id), _host1x_intr_enable_syncpt_intr() 120 static void _host1x_intr_disable_syncpt_intr(struct host1x *host, u32 id) _host1x_intr_disable_syncpt_intr() argument 122 host1x_sync_writel(host, BIT_MASK(id), _host1x_intr_disable_syncpt_intr() 124 host1x_sync_writel(host, BIT_MASK(id), _host1x_intr_disable_syncpt_intr() 128 static int _host1x_free_syncpt_irq(struct host1x *host) _host1x_free_syncpt_irq() argument 130 devm_free_irq(host->dev, host->intr_syncpt_irq, host); _host1x_free_syncpt_irq() 131 flush_workqueue(host->intr_wq); _host1x_free_syncpt_irq()
|
H A D | syncpt_hw.c | 29 struct host1x *host = sp->host; syncpt_restore() local 31 host1x_sync_writel(host, min, HOST1X_SYNC_SYNCPT(sp->id)); syncpt_restore() 39 struct host1x *host = sp->host; syncpt_restore_wait_base() local 40 host1x_sync_writel(host, sp->base_val, syncpt_restore_wait_base() 49 struct host1x *host = sp->host; syncpt_read_wait_base() local 51 host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(sp->id)); syncpt_read_wait_base() 59 struct host1x *host = sp->host; syncpt_load() local 65 live = host1x_sync_readl(host, HOST1X_SYNC_SYNCPT(sp->id)); syncpt_load() 69 dev_err(host->dev, "%s failed: id=%u, min=%d, max=%d\n", syncpt_load() 82 struct host1x *host = sp->host; syncpt_cpu_incr() local 88 host1x_sync_writel(host, BIT_MASK(sp->id), syncpt_cpu_incr()
|
H A D | host1x01.c | 32 int host1x01_init(struct host1x *host) host1x01_init() argument 34 host->channel_op = &host1x_channel_ops; host1x01_init() 35 host->cdma_op = &host1x_cdma_ops; host1x01_init() 36 host->cdma_pb_op = &host1x_pushbuffer_ops; host1x01_init() 37 host->syncpt_op = &host1x_syncpt_ops; host1x01_init() 38 host->intr_op = &host1x_intr_ops; host1x01_init() 39 host->debug_op = &host1x_debug_ops; host1x01_init()
|
H A D | host1x02.c | 32 int host1x02_init(struct host1x *host) host1x02_init() argument 34 host->channel_op = &host1x_channel_ops; host1x02_init() 35 host->cdma_op = &host1x_cdma_ops; host1x02_init() 36 host->cdma_pb_op = &host1x_pushbuffer_ops; host1x02_init() 37 host->syncpt_op = &host1x_syncpt_ops; host1x02_init() 38 host->intr_op = &host1x_intr_ops; host1x02_init() 39 host->debug_op = &host1x_debug_ops; host1x02_init()
|
H A D | host1x04.c | 32 int host1x04_init(struct host1x *host) host1x04_init() argument 34 host->channel_op = &host1x_channel_ops; host1x04_init() 35 host->cdma_op = &host1x_cdma_ops; host1x04_init() 36 host->cdma_pb_op = &host1x_pushbuffer_ops; host1x04_init() 37 host->syncpt_op = &host1x_syncpt_ops; host1x04_init() 38 host->intr_op = &host1x_intr_ops; host1x04_init() 39 host->debug_op = &host1x_debug_ops; host1x04_init()
|
H A D | debug_hw.c | 178 static void host1x_debug_show_channel_cdma(struct host1x *host, host1x_debug_show_channel_cdma() argument 190 cbread = host1x_sync_readl(host, HOST1X_SYNC_CBREAD(ch->id)); host1x_debug_show_channel_cdma() 191 cbstat = host1x_sync_readl(host, HOST1X_SYNC_CBSTAT(ch->id)); host1x_debug_show_channel_cdma() 213 host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(base)); host1x_debug_show_channel_cdma() 232 static void host1x_debug_show_channel_fifo(struct host1x *host, host1x_debug_show_channel_fifo() argument 248 host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL); host1x_debug_show_channel_fifo() 249 host1x_sync_writel(host, HOST1X_SYNC_CFPEEK_CTRL_ENA_F(1) | host1x_debug_show_channel_fifo() 253 val = host1x_sync_readl(host, HOST1X_SYNC_CFPEEK_PTRS); host1x_debug_show_channel_fifo() 257 val = host1x_sync_readl(host, HOST1X_SYNC_CF_SETUP(ch->id)); host1x_debug_show_channel_fifo() 262 host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL); host1x_debug_show_channel_fifo() 263 host1x_sync_writel(host, HOST1X_SYNC_CFPEEK_CTRL_ENA_F(1) | host1x_debug_show_channel_fifo() 267 val = host1x_sync_readl(host, HOST1X_SYNC_CFPEEK_READ); host1x_debug_show_channel_fifo() 288 host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL); host1x_debug_show_channel_fifo() 291 static void host1x_debug_show_mlocks(struct host1x *host, struct output *o) host1x_debug_show_mlocks() argument 296 for (i = 0; i < host1x_syncpt_nb_mlocks(host); i++) { host1x_debug_show_mlocks() 298 host1x_sync_readl(host, HOST1X_SYNC_MLOCK_OWNER(i)); host1x_debug_show_mlocks()
|
/linux-4.1.27/fs/lockd/ |
H A D | host.c | 2 * linux/fs/lockd/host.c 36 #define for_each_host(host, chain, table) \ 39 hlist_for_each_entry((host), (chain), h_hash) 41 #define for_each_host_safe(host, next, chain, table) \ 44 hlist_for_each_entry_safe((host), (next), \ 112 struct nlm_host *host = NULL; nlm_alloc_host() local 118 host = NULL; nlm_alloc_host() 128 host = kmalloc(sizeof(*host), GFP_KERNEL); nlm_alloc_host() 129 if (unlikely(host == NULL)) { nlm_alloc_host() 135 memcpy(nlm_addr(host), ni->sap, ni->salen); nlm_alloc_host() 136 host->h_addrlen = ni->salen; nlm_alloc_host() 137 rpc_set_port(nlm_addr(host), 0); nlm_alloc_host() 138 host->h_srcaddrlen = 0; nlm_alloc_host() 140 host->h_rpcclnt = NULL; nlm_alloc_host() 141 host->h_name = nsm->sm_name; nlm_alloc_host() 142 host->h_version = ni->version; nlm_alloc_host() 143 host->h_proto = ni->protocol; nlm_alloc_host() 144 host->h_reclaiming = 0; nlm_alloc_host() 145 host->h_server = ni->server; nlm_alloc_host() 146 host->h_noresvport = ni->noresvport; nlm_alloc_host() 147 host->h_inuse = 0; nlm_alloc_host() 148 init_waitqueue_head(&host->h_gracewait); nlm_alloc_host() 149 init_rwsem(&host->h_rwsem); nlm_alloc_host() 150 host->h_state = 0; nlm_alloc_host() 151 host->h_nsmstate = 0; nlm_alloc_host() 152 host->h_pidcount = 0; nlm_alloc_host() 153 atomic_set(&host->h_count, 1); nlm_alloc_host() 154 mutex_init(&host->h_mutex); nlm_alloc_host() 155 host->h_nextrebind = now + NLM_HOST_REBIND; nlm_alloc_host() 156 host->h_expires = now + NLM_HOST_EXPIRE; nlm_alloc_host() 157 INIT_LIST_HEAD(&host->h_lockowners); nlm_alloc_host() 158 spin_lock_init(&host->h_lock); nlm_alloc_host() 159 INIT_LIST_HEAD(&host->h_granted); nlm_alloc_host() 160 INIT_LIST_HEAD(&host->h_reclaim); nlm_alloc_host() 161 host->h_nsmhandle = nsm; nlm_alloc_host() 162 host->h_addrbuf = nsm->sm_addrbuf; nlm_alloc_host() 163 host->net = ni->net; nlm_alloc_host() 166 return host; nlm_alloc_host() 174 static void nlm_destroy_host_locked(struct nlm_host *host) nlm_destroy_host_locked() argument 177 struct lockd_net *ln = net_generic(host->net, lockd_net_id); nlm_destroy_host_locked() 179 dprintk("lockd: destroy host %s\n", host->h_name); nlm_destroy_host_locked() 181 hlist_del_init(&host->h_hash); nlm_destroy_host_locked() 183 nsm_unmonitor(host); nlm_destroy_host_locked() 184 nsm_release(host->h_nsmhandle); nlm_destroy_host_locked() 186 clnt = host->h_rpcclnt; nlm_destroy_host_locked() 189 kfree(host); nlm_destroy_host_locked() 196 * nlmclnt_lookup_host - Find an NLM host handle matching a remote server 206 * If one doesn't already exist in the host cache, a new handle is 229 struct nlm_host *host; nlmclnt_lookup_host() local 233 dprintk("lockd: %s(host='%s', vers=%u, proto=%s)\n", __func__, nlmclnt_lookup_host() 240 hlist_for_each_entry(host, chain, h_hash) { hlist_for_each_entry() 241 if (host->net != net) hlist_for_each_entry() 243 if (!rpc_cmp_addr(nlm_addr(host), sap)) hlist_for_each_entry() 248 nsm = host->h_nsmhandle; hlist_for_each_entry() 250 if (host->h_proto != protocol) hlist_for_each_entry() 252 if (host->h_version != version) hlist_for_each_entry() 255 nlm_get_host(host); hlist_for_each_entry() 256 dprintk("lockd: %s found host %s (%s)\n", __func__, hlist_for_each_entry() 257 host->h_name, host->h_addrbuf); hlist_for_each_entry() 261 host = nlm_alloc_host(&ni, nsm); 262 if (unlikely(host == NULL)) 265 hlist_add_head(&host->h_hash, chain); 269 dprintk("lockd: %s created host %s (%s)\n", __func__, 270 host->h_name, host->h_addrbuf); 274 return host; 279 * @host: nlm_host to release 282 void nlmclnt_release_host(struct nlm_host *host) nlmclnt_release_host() argument 284 if (host == NULL) nlmclnt_release_host() 287 dprintk("lockd: release client host %s\n", host->h_name); nlmclnt_release_host() 289 WARN_ON_ONCE(host->h_server); nlmclnt_release_host() 291 if (atomic_dec_and_test(&host->h_count)) { nlmclnt_release_host() 292 WARN_ON_ONCE(!list_empty(&host->h_lockowners)); nlmclnt_release_host() 293 WARN_ON_ONCE(!list_empty(&host->h_granted)); nlmclnt_release_host() 294 WARN_ON_ONCE(!list_empty(&host->h_reclaim)); nlmclnt_release_host() 297 nlm_destroy_host_locked(host); nlmclnt_release_host() 303 * nlmsvc_lookup_host - Find an NLM host handle matching a remote client 305 * @hostname: name of client host 310 * NLM request. If one doesn't already exist in the host cache, a 325 struct nlm_host *host = NULL; nlmsvc_lookup_host() local 342 dprintk("lockd: %s(host='%*s', vers=%u, proto=%s)\n", __func__, nlmsvc_lookup_host() 352 hlist_for_each_entry(host, chain, h_hash) { hlist_for_each_entry() 353 if (host->net != net) hlist_for_each_entry() 355 if (!rpc_cmp_addr(nlm_addr(host), ni.sap)) hlist_for_each_entry() 360 nsm = host->h_nsmhandle; hlist_for_each_entry() 362 if (host->h_proto != ni.protocol) hlist_for_each_entry() 364 if (host->h_version != ni.version) hlist_for_each_entry() 366 if (!rpc_cmp_addr(nlm_srcaddr(host), src_sap)) hlist_for_each_entry() 370 hlist_del(&host->h_hash); hlist_for_each_entry() 371 hlist_add_head(&host->h_hash, chain); hlist_for_each_entry() 373 nlm_get_host(host); hlist_for_each_entry() 374 dprintk("lockd: %s found host %s (%s)\n", hlist_for_each_entry() 375 __func__, host->h_name, host->h_addrbuf); hlist_for_each_entry() 379 host = nlm_alloc_host(&ni, nsm); 380 if (unlikely(host == NULL)) 383 memcpy(nlm_srcaddr(host), src_sap, src_len); 384 host->h_srcaddrlen = src_len; 385 hlist_add_head(&host->h_hash, chain); 389 dprintk("lockd: %s created host %s (%s)\n", 390 __func__, host->h_name, host->h_addrbuf); 394 return host; 399 * @host: nlm_host to release 403 void nlmsvc_release_host(struct nlm_host *host) nlmsvc_release_host() argument 405 if (host == NULL) nlmsvc_release_host() 408 dprintk("lockd: release server host %s\n", host->h_name); nlmsvc_release_host() 410 WARN_ON_ONCE(!host->h_server); nlmsvc_release_host() 411 atomic_dec(&host->h_count); nlmsvc_release_host() 418 nlm_bind_host(struct nlm_host *host) nlm_bind_host() argument 423 host->h_name, host->h_addrbuf); nlm_bind_host() 425 /* Lock host handle */ nlm_bind_host() 426 mutex_lock(&host->h_mutex); nlm_bind_host() 431 if ((clnt = host->h_rpcclnt) != NULL) { nlm_bind_host() 432 if (time_after_eq(jiffies, host->h_nextrebind)) { nlm_bind_host() 434 host->h_nextrebind = jiffies + NLM_HOST_REBIND; nlm_bind_host() 436 host->h_nextrebind - jiffies); nlm_bind_host() 447 .net = host->net, nlm_bind_host() 448 .protocol = host->h_proto, nlm_bind_host() 449 .address = nlm_addr(host), nlm_bind_host() 450 .addrsize = host->h_addrlen, nlm_bind_host() 452 .servername = host->h_name, nlm_bind_host() 454 .version = host->h_version, nlm_bind_host() 465 if (!host->h_server) nlm_bind_host() 467 if (host->h_noresvport) nlm_bind_host() 469 if (host->h_srcaddrlen) nlm_bind_host() 470 args.saddress = nlm_srcaddr(host); nlm_bind_host() 474 host->h_rpcclnt = clnt; nlm_bind_host() 476 printk("lockd: couldn't create RPC handle for %s\n", host->h_name); nlm_bind_host() 481 mutex_unlock(&host->h_mutex); nlm_bind_host() 489 nlm_rebind_host(struct nlm_host *host) nlm_rebind_host() argument 491 dprintk("lockd: rebind host %s\n", host->h_name); nlm_rebind_host() 492 if (host->h_rpcclnt && time_after_eq(jiffies, host->h_nextrebind)) { nlm_rebind_host() 493 rpc_force_rebind(host->h_rpcclnt); nlm_rebind_host() 494 host->h_nextrebind = jiffies + NLM_HOST_REBIND; nlm_rebind_host() 499 * Increment NLM host count 501 struct nlm_host * nlm_get_host(struct nlm_host *host) nlm_get_host() argument 503 if (host) { nlm_get_host() 504 dprintk("lockd: get host %s\n", host->h_name); nlm_get_host() 505 atomic_inc(&host->h_count); nlm_get_host() 506 host->h_expires = jiffies + NLM_HOST_EXPIRE; nlm_get_host() 508 return host; nlm_get_host() 515 struct nlm_host *host; next_host_state() local 519 for_each_host(host, chain, cache) { for_each_host() 520 if (host->h_nsmhandle == nsm for_each_host() 521 && host->h_nsmstate != info->state) { for_each_host() 522 host->h_nsmstate = info->state; for_each_host() 523 host->h_state++; for_each_host() 525 nlm_get_host(host); for_each_host() 527 return host; for_each_host() 536 * nlm_host_rebooted - Release all resources held by rebooted host 539 * We were notified that the specified host has rebooted. Release 545 struct nlm_host *host; nlm_host_rebooted() local 552 * We run the loop repeatedly, because we drop the host table nlm_host_rebooted() 554 * To avoid processing a host several times, we match the nsmstate. nlm_host_rebooted() 556 while ((host = next_host_state(nlm_server_hosts, nsm, info)) != NULL) { nlm_host_rebooted() 557 nlmsvc_free_host_resources(host); nlm_host_rebooted() 558 nlmsvc_release_host(host); nlm_host_rebooted() 560 while ((host = next_host_state(nlm_client_hosts, nsm, info)) != NULL) { nlm_host_rebooted() 561 nlmclnt_recovery(host); nlm_host_rebooted() 562 nlmclnt_release_host(host); nlm_host_rebooted() 571 struct nlm_host *host; nlm_complain_hosts() local 578 printk(KERN_WARNING "lockd: couldn't shutdown host module for net %p!\n", net); nlm_complain_hosts() 583 printk(KERN_WARNING "lockd: couldn't shutdown host module!\n"); nlm_complain_hosts() 587 for_each_host(host, chain, nlm_server_hosts) { for_each_host() 588 if (net && host->net != net) for_each_host() 591 host->h_name, atomic_read(&host->h_count), for_each_host() 592 host->h_inuse, host->h_expires, host->net); for_each_host() 600 struct nlm_host *host; nlm_shutdown_hosts_net() local 606 for_each_host(host, chain, nlm_server_hosts) { for_each_host() 607 if (net && host->net != net) for_each_host() 609 host->h_expires = jiffies - 1; for_each_host() 610 if (host->h_rpcclnt) { for_each_host() 611 rpc_shutdown_client(host->h_rpcclnt); for_each_host() 612 host->h_rpcclnt = NULL; for_each_host() 630 dprintk("lockd: shutting down host module\n"); nlm_shutdown_hosts() 644 struct nlm_host *host; nlm_gc_hosts() local 646 dprintk("lockd: host garbage collection for net %p\n", net); for_each_host() 647 for_each_host(host, chain, nlm_server_hosts) { for_each_host() 648 if (net && host->net != net) for_each_host() 650 host->h_inuse = 0; for_each_host() 656 for_each_host_safe(host, next, chain, nlm_server_hosts) { for_each_host_safe() 657 if (net && host->net != net) for_each_host_safe() 659 if (atomic_read(&host->h_count) || host->h_inuse for_each_host_safe() 660 || time_before(jiffies, host->h_expires)) { for_each_host_safe() 663 host->h_name, atomic_read(&host->h_count), for_each_host_safe() 664 host->h_inuse, host->h_expires, host->net); for_each_host_safe() 667 nlm_destroy_host_locked(host); for_each_host_safe()
|
H A D | clntlock.c | 55 struct nlm_host *host; nlmclnt_init() local 63 host = nlmclnt_lookup_host(nlm_init->address, nlm_init->addrlen, nlmclnt_init() 67 if (host == NULL) nlmclnt_init() 69 if (host->h_rpcclnt == NULL && nlm_bind_host(host) == NULL) nlmclnt_init() 72 return host; nlmclnt_init() 74 nlmclnt_release_host(host); nlmclnt_init() 83 * @host: nlm_host structure reserved by nlmclnt_init() 86 void nlmclnt_done(struct nlm_host *host) nlmclnt_done() argument 88 struct net *net = host->net; nlmclnt_done() 90 nlmclnt_release_host(host); nlmclnt_done() 98 struct nlm_wait *nlmclnt_prepare_block(struct nlm_host *host, struct file_lock *fl) nlmclnt_prepare_block() argument 104 block->b_host = host; nlmclnt_prepare_block() 208 * Reclaim all locks on server host. We do this by spawning a separate 212 nlmclnt_recovery(struct nlm_host *host) nlmclnt_recovery() argument 216 if (!host->h_reclaiming++) { nlmclnt_recovery() 217 nlm_get_host(host); nlmclnt_recovery() 218 task = kthread_run(reclaimer, host, "%s-reclaim", host->h_name); nlmclnt_recovery() 222 "(%ld)\n", host->h_name, PTR_ERR(task)); nlmclnt_recovery() 229 struct nlm_host *host = (struct nlm_host *) ptr; reclaimer() local 234 struct net *net = host->net; reclaimer() 240 host->h_name); reclaimer() 246 down_write(&host->h_rwsem); reclaimer() 249 dprintk("lockd: reclaiming locks for host %s\n", host->h_name); reclaimer() 252 nsmstate = host->h_nsmstate; reclaimer() 257 host->h_nextrebind = jiffies; reclaimer() 258 nlm_rebind_host(host); reclaimer() 261 list_splice_init(&host->h_granted, &host->h_reclaim); reclaimer() 262 list_for_each_entry_safe(fl, next, &host->h_reclaim, fl_u.nfs_fl.list) { reclaimer() 269 * reclaimer thread is spawned for this host. reclaimer() 273 if (nlmclnt_reclaim(host, fl, req) != 0) reclaimer() 275 list_add_tail(&fl->fl_u.nfs_fl.list, &host->h_granted); reclaimer() 276 if (host->h_nsmstate != nsmstate) { reclaimer() 282 host->h_reclaiming = 0; reclaimer() 283 up_write(&host->h_rwsem); reclaimer() 284 dprintk("NLM: done reclaiming locks for host %s\n", host->h_name); reclaimer() 289 if (block->b_host == host) { reclaimer() 296 /* Release host handle after use */ reclaimer() 297 nlmclnt_release_host(host); reclaimer()
|
H A D | Makefile | 7 lockd-objs-y := clntlock.o clntproc.o clntxdr.o host.o svc.o svclock.o \
|
H A D | clntproc.c | 30 static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host); 57 if (!atomic_dec_and_lock(&lockowner->count, &lockowner->host->h_lock)) nlm_put_lockowner() 60 spin_unlock(&lockowner->host->h_lock); nlm_put_lockowner() 61 nlmclnt_release_host(lockowner->host); nlm_put_lockowner() 65 static inline int nlm_pidbusy(struct nlm_host *host, uint32_t pid) nlm_pidbusy() argument 68 list_for_each_entry(lockowner, &host->h_lockowners, list) { nlm_pidbusy() 75 static inline uint32_t __nlm_alloc_pid(struct nlm_host *host) __nlm_alloc_pid() argument 79 res = host->h_pidcount++; __nlm_alloc_pid() 80 } while (nlm_pidbusy(host, res) < 0); __nlm_alloc_pid() 84 static struct nlm_lockowner *__nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner) __nlm_find_lockowner() argument 87 list_for_each_entry(lockowner, &host->h_lockowners, list) { __nlm_find_lockowner() 95 static struct nlm_lockowner *nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner) nlm_find_lockowner() argument 99 spin_lock(&host->h_lock); nlm_find_lockowner() 100 res = __nlm_find_lockowner(host, owner); nlm_find_lockowner() 102 spin_unlock(&host->h_lock); nlm_find_lockowner() 104 spin_lock(&host->h_lock); nlm_find_lockowner() 105 res = __nlm_find_lockowner(host, owner); nlm_find_lockowner() 110 new->pid = __nlm_alloc_pid(host); nlm_find_lockowner() 111 new->host = nlm_get_host(host); nlm_find_lockowner() 112 list_add(&new->list, &host->h_lockowners); nlm_find_lockowner() 116 spin_unlock(&host->h_lock); nlm_find_lockowner() 150 * @host: address of a valid nlm_host context representing the NLM server 155 int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl) nlmclnt_proc() argument 160 call = nlm_alloc_call(host); nlmclnt_proc() 164 nlmclnt_locks_init_private(fl, host); nlmclnt_proc() 194 struct nlm_rqst *nlm_alloc_call(struct nlm_host *host) nlm_alloc_call() argument 204 call->a_host = nlm_get_host(host); nlm_alloc_call() 251 struct nlm_host *host = req->a_host; nlmclnt_call() local 263 (int)proc, host->h_name); nlmclnt_call() 266 if (host->h_reclaiming && !argp->reclaim) nlmclnt_call() 270 if ((clnt = nlm_bind_host(host)) == NULL) nlmclnt_call() 284 nlm_rebind_host(host); nlmclnt_call() 304 wake_up_all(&host->h_gracewait); nlmclnt_call() 318 status = nlm_wait_on_grace(&host->h_gracewait); nlmclnt_call() 329 struct nlm_host *host = req->a_host; __nlm_async_call() local 339 (int)proc, host->h_name); __nlm_async_call() 342 clnt = nlm_bind_host(host); __nlm_async_call() 447 spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock); nlmclnt_locks_copy_lock() 450 list_add_tail(&new->fl_u.nfs_fl.list, &fl->fl_u.nfs_fl.owner->host->h_granted); nlmclnt_locks_copy_lock() 451 spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock); nlmclnt_locks_copy_lock() 456 spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock); nlmclnt_locks_release_private() 458 spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock); nlmclnt_locks_release_private() 467 static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host) nlmclnt_locks_init_private() argument 470 fl->fl_u.nfs_fl.owner = nlm_find_lockowner(host, fl->fl_owner); nlmclnt_locks_init_private() 515 struct nlm_host *host = req->a_host; nlmclnt_lock() local 522 if (nsm_monitor(host) < 0) nlmclnt_lock() 532 block = nlmclnt_prepare_block(host, fl); nlmclnt_lock() 541 fl->fl_u.nfs_fl.state = host->h_state; nlmclnt_lock() 564 if (nlmclnt_cancel(host, req->a_args.block, fl) == 0) nlmclnt_lock() 569 down_read(&host->h_rwsem); nlmclnt_lock() 571 if (fl->fl_u.nfs_fl.state != host->h_state) { nlmclnt_lock() 572 up_read(&host->h_rwsem); nlmclnt_lock() 579 up_read(&host->h_rwsem); nlmclnt_lock() 606 down_read(&host->h_rwsem); nlmclnt_lock() 608 up_read(&host->h_rwsem); nlmclnt_lock() 619 nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl, nlmclnt_reclaim() argument 627 req->a_host = host; nlmclnt_reclaim() 662 struct nlm_host *host = req->a_host; nlmclnt_unlock() local 673 down_read(&host->h_rwsem); nlmclnt_unlock() 675 up_read(&host->h_rwsem); nlmclnt_unlock() 743 static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl) nlmclnt_cancel() argument 751 req = nlm_alloc_call(host); nlmclnt_cancel()
|
H A D | svcshare.c | 27 nlmsvc_share_file(struct nlm_host *host, struct nlm_file *file, nlmsvc_share_file() argument 35 if (share->s_host == host && nlm_cmp_owner(share, oh)) nlmsvc_share_file() 52 share->s_host = host; nlmsvc_share_file() 68 nlmsvc_unshare_file(struct nlm_host *host, struct nlm_file *file, nlmsvc_unshare_file() argument 76 if (share->s_host == host && nlm_cmp_owner(share, oh)) { nlmsvc_unshare_file() 90 * those owned by the given (type of) host 92 void nlmsvc_traverse_shares(struct nlm_host *host, struct nlm_file *file, nlmsvc_traverse_shares() argument 99 if (match(share->s_host, host)) { nlmsvc_traverse_shares()
|
H A D | svc4proc.c | 25 struct nlm_host *host = NULL; nlm4svc_retrieve_args() local 34 /* Obtain host handle */ nlm4svc_retrieve_args() 35 if (!(host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len)) nlm4svc_retrieve_args() 36 || (argp->monitor && nsm_monitor(host) < 0)) nlm4svc_retrieve_args() 38 *hostp = host; nlm4svc_retrieve_args() 48 lock->fl.fl_owner = (fl_owner_t) host; nlm4svc_retrieve_args() 55 nlmsvc_release_host(host); nlm4svc_retrieve_args() 78 struct nlm_host *host; nlm4svc_proc_test() local 86 if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file))) nlm4svc_proc_test() 90 resp->status = nlmsvc_testlock(rqstp, file, host, &argp->lock, &resp->lock, &resp->cookie); nlm4svc_proc_test() 96 nlmsvc_release_host(host); nlm4svc_proc_test() 105 struct nlm_host *host; nlm4svc_proc_lock() local 114 if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file))) nlm4svc_proc_lock() 122 * NB: We don't retrieve the remote host's state yet. nlm4svc_proc_lock() 124 if (host->h_nsmstate && host->h_nsmstate != argp->state) { nlm4svc_proc_lock() 130 resp->status = nlmsvc_lock(rqstp, file, host, &argp->lock, nlm4svc_proc_lock() 138 nlmsvc_release_host(host); nlm4svc_proc_lock() 147 struct nlm_host *host; nlm4svc_proc_cancel() local 161 if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file))) nlm4svc_proc_cancel() 168 nlmsvc_release_host(host); nlm4svc_proc_cancel() 180 struct nlm_host *host; nlm4svc_proc_unlock() local 194 if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file))) nlm4svc_proc_unlock() 201 nlmsvc_release_host(host); nlm4svc_proc_unlock() 249 struct nlm_host *host; nlm4svc_callback() local 253 host = nlmsvc_lookup_host(rqstp, nlm4svc_callback() 256 if (host == NULL) nlm4svc_callback() 259 call = nlm_alloc_call(host); nlm4svc_callback() 260 nlmsvc_release_host(host); nlm4svc_callback() 318 struct nlm_host *host; nlm4svc_proc_share() local 332 if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file))) nlm4svc_proc_share() 336 resp->status = nlmsvc_share_file(host, file, argp); nlm4svc_proc_share() 339 nlmsvc_release_host(host); nlm4svc_proc_share() 351 struct nlm_host *host; nlm4svc_proc_unshare() local 365 if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file))) nlm4svc_proc_unshare() 369 resp->status = nlmsvc_unshare_file(host, file, argp); nlm4svc_proc_unshare() 372 nlmsvc_release_host(host); nlm4svc_proc_unshare() 397 struct nlm_host *host; nlm4svc_proc_free_all() local 400 if (nlm4svc_retrieve_args(rqstp, argp, &host, NULL)) nlm4svc_proc_free_all() 403 nlmsvc_free_host_resources(host); nlm4svc_proc_free_all() 404 nlmsvc_release_host(host); nlm4svc_proc_free_all()
|
H A D | svcproc.c | 54 struct nlm_host *host = NULL; nlmsvc_retrieve_args() local 63 /* Obtain host handle */ nlmsvc_retrieve_args() 64 if (!(host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len)) nlmsvc_retrieve_args() 65 || (argp->monitor && nsm_monitor(host) < 0)) nlmsvc_retrieve_args() 67 *hostp = host; nlmsvc_retrieve_args() 78 lock->fl.fl_owner = (fl_owner_t) host; nlmsvc_retrieve_args() 85 nlmsvc_release_host(host); nlmsvc_retrieve_args() 108 struct nlm_host *host; nlmsvc_proc_test() local 116 if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) nlmsvc_proc_test() 120 resp->status = cast_status(nlmsvc_testlock(rqstp, file, host, &argp->lock, &resp->lock, &resp->cookie)); nlmsvc_proc_test() 127 nlmsvc_release_host(host); nlmsvc_proc_test() 136 struct nlm_host *host; nlmsvc_proc_lock() local 145 if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) nlmsvc_proc_lock() 153 * NB: We don't retrieve the remote host's state yet. nlmsvc_proc_lock() 155 if (host->h_nsmstate && host->h_nsmstate != argp->state) { nlmsvc_proc_lock() 161 resp->status = cast_status(nlmsvc_lock(rqstp, file, host, &argp->lock, nlmsvc_proc_lock() 169 nlmsvc_release_host(host); nlmsvc_proc_lock() 178 struct nlm_host *host; nlmsvc_proc_cancel() local 193 if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) nlmsvc_proc_cancel() 200 nlmsvc_release_host(host); nlmsvc_proc_cancel() 212 struct nlm_host *host; nlmsvc_proc_unlock() local 227 if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) nlmsvc_proc_unlock() 234 nlmsvc_release_host(host); nlmsvc_proc_unlock() 290 struct nlm_host *host; nlmsvc_callback() local 294 host = nlmsvc_lookup_host(rqstp, nlmsvc_callback() 297 if (host == NULL) nlmsvc_callback() 300 call = nlm_alloc_call(host); nlmsvc_callback() 301 nlmsvc_release_host(host); nlmsvc_callback() 361 struct nlm_host *host; nlmsvc_proc_share() local 375 if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) nlmsvc_proc_share() 379 resp->status = cast_status(nlmsvc_share_file(host, file, argp)); nlmsvc_proc_share() 382 nlmsvc_release_host(host); nlmsvc_proc_share() 394 struct nlm_host *host; nlmsvc_proc_unshare() local 408 if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) nlmsvc_proc_unshare() 412 resp->status = cast_status(nlmsvc_unshare_file(host, file, argp)); nlmsvc_proc_unshare() 415 nlmsvc_release_host(host); nlmsvc_proc_unshare() 440 struct nlm_host *host; nlmsvc_proc_free_all() local 443 if (nlmsvc_retrieve_args(rqstp, argp, &host, NULL)) nlmsvc_proc_free_all() 446 nlmsvc_free_host_resources(host); nlmsvc_proc_free_all() 447 nlmsvc_release_host(host); nlmsvc_proc_free_all()
|
H A D | svcsubs.c | 162 nlm_traverse_locks(struct nlm_host *host, struct nlm_file *file, nlm_traverse_locks() argument 183 if (match(lockhost, host)) { nlm_traverse_locks() 213 nlm_inspect_file(struct nlm_host *host, struct nlm_file *file, nlm_host_match_fn_t match) nlm_inspect_file() argument 215 nlmsvc_traverse_blocks(host, file, match); nlm_inspect_file() 216 nlmsvc_traverse_shares(host, file, match); nlm_inspect_file() 217 return nlm_traverse_locks(host, file, match); nlm_inspect_file() 322 * all resources bound to a specific host. 325 * returns 1 iff the host is a client. 332 struct nlm_host *host = data; nlmsvc_mark_host() local 335 (host->net == hint->net)) nlmsvc_mark_host() 336 host->h_inuse = 1; nlmsvc_mark_host() 343 struct nlm_host *host = data; nlmsvc_same_host() local 345 return host == other; nlmsvc_same_host() 351 struct nlm_host *host = data; nlmsvc_is_client() local 353 if (host->h_server) { nlmsvc_is_client() 358 if (host->h_nsmhandle) nlmsvc_is_client() 359 host->h_nsmhandle->sm_sticky = 1; nlmsvc_is_client() 382 nlmsvc_free_host_resources(struct nlm_host *host) nlmsvc_free_host_resources() argument 386 if (nlm_traverse_files(host, nlmsvc_same_host, NULL)) { nlmsvc_free_host_resources() 389 host->h_name); nlmsvc_free_host_resources() 437 nlmsvc_match_ip(void *datap, struct nlm_host *host) nlmsvc_match_ip() argument 439 return rpc_cmp_addr(nlm_srcaddr(host), datap); nlmsvc_match_ip() 446 * Release all locks held by clients accessing this host
|
/linux-4.1.27/drivers/gpu/host1x/ |
H A D | dev.h | 39 int (*init)(struct host1x_channel *channel, struct host1x *host, 62 void (*show_channel_cdma)(struct host1x *host, 65 void (*show_channel_fifo)(struct host1x *host, 68 void (*show_mlocks)(struct host1x *host, struct output *output); 82 int (*init_host_sync)(struct host1x *host, u32 cpm, 85 struct host1x *host, u32 id, u32 thresh); 86 void (*enable_syncpt_intr)(struct host1x *host, u32 id); 87 void (*disable_syncpt_intr)(struct host1x *host, u32 id); 88 void (*disable_all_syncpt_intrs)(struct host1x *host); 89 int (*free_syncpt_irq)(struct host1x *host); 141 static inline void host1x_hw_syncpt_restore(struct host1x *host, host1x_hw_syncpt_restore() argument 144 host->syncpt_op->restore(sp); host1x_hw_syncpt_restore() 147 static inline void host1x_hw_syncpt_restore_wait_base(struct host1x *host, host1x_hw_syncpt_restore_wait_base() argument 150 host->syncpt_op->restore_wait_base(sp); host1x_hw_syncpt_restore_wait_base() 153 static inline void host1x_hw_syncpt_load_wait_base(struct host1x *host, host1x_hw_syncpt_load_wait_base() argument 156 host->syncpt_op->load_wait_base(sp); host1x_hw_syncpt_load_wait_base() 159 static inline u32 host1x_hw_syncpt_load(struct host1x *host, host1x_hw_syncpt_load() argument 162 return host->syncpt_op->load(sp); host1x_hw_syncpt_load() 165 static inline int host1x_hw_syncpt_cpu_incr(struct host1x *host, host1x_hw_syncpt_cpu_incr() argument 168 return host->syncpt_op->cpu_incr(sp); host1x_hw_syncpt_cpu_incr() 171 static inline int host1x_hw_syncpt_patch_wait(struct host1x *host, host1x_hw_syncpt_patch_wait() argument 175 return host->syncpt_op->patch_wait(sp, patch_addr); host1x_hw_syncpt_patch_wait() 178 static inline int host1x_hw_intr_init_host_sync(struct host1x *host, u32 cpm, host1x_hw_intr_init_host_sync() argument 181 return host->intr_op->init_host_sync(host, cpm, syncpt_thresh_work); host1x_hw_intr_init_host_sync() 184 static inline void host1x_hw_intr_set_syncpt_threshold(struct host1x *host, host1x_hw_intr_set_syncpt_threshold() argument 187 host->intr_op->set_syncpt_threshold(host, id, thresh); host1x_hw_intr_set_syncpt_threshold() 190 static inline void host1x_hw_intr_enable_syncpt_intr(struct host1x *host, host1x_hw_intr_enable_syncpt_intr() argument 193 host->intr_op->enable_syncpt_intr(host, id); host1x_hw_intr_enable_syncpt_intr() 196 static inline void host1x_hw_intr_disable_syncpt_intr(struct host1x *host, host1x_hw_intr_disable_syncpt_intr() argument 199 host->intr_op->disable_syncpt_intr(host, id); host1x_hw_intr_disable_syncpt_intr() 202 static inline void host1x_hw_intr_disable_all_syncpt_intrs(struct host1x *host) host1x_hw_intr_disable_all_syncpt_intrs() argument 204 host->intr_op->disable_all_syncpt_intrs(host); host1x_hw_intr_disable_all_syncpt_intrs() 207 static inline int host1x_hw_intr_free_syncpt_irq(struct host1x *host) host1x_hw_intr_free_syncpt_irq() argument 209 return host->intr_op->free_syncpt_irq(host); host1x_hw_intr_free_syncpt_irq() 212 static inline int host1x_hw_channel_init(struct host1x *host, host1x_hw_channel_init() argument 216 return host->channel_op->init(channel, host, chid); host1x_hw_channel_init() 219 static inline int host1x_hw_channel_submit(struct host1x *host, host1x_hw_channel_submit() argument 222 return host->channel_op->submit(job); host1x_hw_channel_submit() 225 static inline void host1x_hw_cdma_start(struct host1x *host, host1x_hw_cdma_start() argument 228 host->cdma_op->start(cdma); host1x_hw_cdma_start() 231 static inline void host1x_hw_cdma_stop(struct host1x *host, host1x_hw_cdma_stop() argument 234 host->cdma_op->stop(cdma); host1x_hw_cdma_stop() 237 static inline void host1x_hw_cdma_flush(struct host1x *host, host1x_hw_cdma_flush() argument 240 host->cdma_op->flush(cdma); host1x_hw_cdma_flush() 243 static inline int host1x_hw_cdma_timeout_init(struct host1x *host, host1x_hw_cdma_timeout_init() argument 247 return host->cdma_op->timeout_init(cdma, syncpt_id); host1x_hw_cdma_timeout_init() 250 static inline void host1x_hw_cdma_timeout_destroy(struct host1x *host, host1x_hw_cdma_timeout_destroy() argument 253 host->cdma_op->timeout_destroy(cdma); host1x_hw_cdma_timeout_destroy() 256 static inline void host1x_hw_cdma_freeze(struct host1x *host, host1x_hw_cdma_freeze() argument 259 host->cdma_op->freeze(cdma); host1x_hw_cdma_freeze() 262 static inline void host1x_hw_cdma_resume(struct host1x *host, host1x_hw_cdma_resume() argument 265 host->cdma_op->resume(cdma, getptr); host1x_hw_cdma_resume() 268 static inline void host1x_hw_cdma_timeout_cpu_incr(struct host1x *host, host1x_hw_cdma_timeout_cpu_incr() argument 274 host->cdma_op->timeout_cpu_incr(cdma, getptr, syncpt_incrs, syncval, host1x_hw_cdma_timeout_cpu_incr() 278 static inline void host1x_hw_pushbuffer_init(struct host1x *host, host1x_hw_pushbuffer_init() argument 281 host->cdma_pb_op->init(pb); host1x_hw_pushbuffer_init() 284 static inline void host1x_hw_debug_init(struct host1x *host, struct dentry *de) host1x_hw_debug_init() argument 286 if (host->debug_op && host->debug_op->debug_init) host1x_hw_debug_init() 287 host->debug_op->debug_init(de); host1x_hw_debug_init() 290 static inline void host1x_hw_show_channel_cdma(struct host1x *host, host1x_hw_show_channel_cdma() argument 294 host->debug_op->show_channel_cdma(host, channel, o); host1x_hw_show_channel_cdma() 297 static inline void host1x_hw_show_channel_fifo(struct host1x *host, host1x_hw_show_channel_fifo() argument 301 host->debug_op->show_channel_fifo(host, channel, o); host1x_hw_show_channel_fifo() 304 static inline void host1x_hw_show_mlocks(struct host1x *host, struct output *o) host1x_hw_show_mlocks() argument 306 host->debug_op->show_mlocks(host, o); host1x_hw_show_mlocks()
|
H A D | dev.c | 102 struct host1x *host; host1x_probe() local 123 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); host1x_probe() 124 if (!host) host1x_probe() 127 mutex_init(&host->devices_lock); host1x_probe() 128 INIT_LIST_HEAD(&host->devices); host1x_probe() 129 INIT_LIST_HEAD(&host->list); host1x_probe() 130 host->dev = &pdev->dev; host1x_probe() 131 host->info = id->data; host1x_probe() 134 platform_set_drvdata(pdev, host); host1x_probe() 136 host->regs = devm_ioremap_resource(&pdev->dev, regs); host1x_probe() 137 if (IS_ERR(host->regs)) host1x_probe() 138 return PTR_ERR(host->regs); host1x_probe() 140 if (host->info->init) { host1x_probe() 141 err = host->info->init(host); host1x_probe() 146 host->clk = devm_clk_get(&pdev->dev, NULL); host1x_probe() 147 if (IS_ERR(host->clk)) { host1x_probe() 149 err = PTR_ERR(host->clk); host1x_probe() 153 err = host1x_channel_list_init(host); host1x_probe() 159 err = clk_prepare_enable(host->clk); host1x_probe() 165 err = host1x_syncpt_init(host); host1x_probe() 171 err = host1x_intr_init(host, syncpt_irq); host1x_probe() 177 host1x_debug_init(host); host1x_probe() 179 err = host1x_register(host); host1x_probe() 186 host1x_intr_deinit(host); host1x_probe() 188 host1x_syncpt_deinit(host); host1x_probe() 190 clk_disable_unprepare(host->clk); host1x_probe() 196 struct host1x *host = platform_get_drvdata(pdev); host1x_remove() local 198 host1x_unregister(host); host1x_remove() 199 host1x_intr_deinit(host); host1x_remove() 200 host1x_syncpt_deinit(host); host1x_remove() 201 clk_disable_unprepare(host->clk); host1x_remove()
|
H A D | syncpt.c | 34 host1x_syncpt_base_request(struct host1x *host) host1x_syncpt_base_request() argument 36 struct host1x_syncpt_base *bases = host->bases; host1x_syncpt_base_request() 39 for (i = 0; i < host->info->nb_bases; i++) host1x_syncpt_base_request() 43 if (i >= host->info->nb_bases) host1x_syncpt_base_request() 56 static struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host, host1x_syncpt_alloc() argument 61 struct host1x_syncpt *sp = host->syncpt; host1x_syncpt_alloc() 64 for (i = 0; i < host->info->nb_pts && sp->name; i++, sp++) host1x_syncpt_alloc() 67 if (i >= host->info->nb_pts) host1x_syncpt_alloc() 71 sp->base = host1x_syncpt_base_request(host); host1x_syncpt_alloc() 110 void host1x_syncpt_restore(struct host1x *host) host1x_syncpt_restore() argument 112 struct host1x_syncpt *sp_base = host->syncpt; host1x_syncpt_restore() 115 for (i = 0; i < host1x_syncpt_nb_pts(host); i++) host1x_syncpt_restore() 116 host1x_hw_syncpt_restore(host, sp_base + i); host1x_syncpt_restore() 117 for (i = 0; i < host1x_syncpt_nb_bases(host); i++) host1x_syncpt_restore() 118 host1x_hw_syncpt_restore_wait_base(host, sp_base + i); host1x_syncpt_restore() 126 void host1x_syncpt_save(struct host1x *host) host1x_syncpt_save() argument 128 struct host1x_syncpt *sp_base = host->syncpt; host1x_syncpt_save() 131 for (i = 0; i < host1x_syncpt_nb_pts(host); i++) { host1x_syncpt_save() 133 host1x_hw_syncpt_load(host, sp_base + i); host1x_syncpt_save() 138 for (i = 0; i < host1x_syncpt_nb_bases(host); i++) host1x_syncpt_save() 139 host1x_hw_syncpt_load_wait_base(host, sp_base + i); host1x_syncpt_save() 149 val = host1x_hw_syncpt_load(sp->host, sp); host1x_syncpt_load() 161 host1x_hw_syncpt_load_wait_base(sp->host, sp); host1x_syncpt_load_wait_base() 171 return host1x_hw_syncpt_cpu_incr(sp->host, sp); host1x_syncpt_incr() 181 host1x_hw_syncpt_load(sp->host, sp); syncpt_load_min_is_expired() 208 val = host1x_hw_syncpt_load(sp->host, sp); host1x_syncpt_wait() 228 err = host1x_intr_add_action(sp->host, sp->id, thresh, host1x_syncpt_wait() 257 dev_warn(sp->host->dev, host1x_syncpt_wait() 262 host1x_debug_dump_syncpts(sp->host); host1x_syncpt_wait() 264 host1x_debug_dump(sp->host); host1x_syncpt_wait() 268 host1x_intr_put_ref(sp->host, sp->id, ref); host1x_syncpt_wait() 337 return host1x_hw_syncpt_patch_wait(sp->host, sp, patch_addr); host1x_syncpt_patch_wait() 340 int host1x_syncpt_init(struct host1x *host) host1x_syncpt_init() argument 346 syncpt = devm_kzalloc(host->dev, sizeof(*syncpt) * host->info->nb_pts, host1x_syncpt_init() 351 bases = devm_kzalloc(host->dev, sizeof(*bases) * host->info->nb_bases, host1x_syncpt_init() 356 for (i = 0; i < host->info->nb_pts; i++) { host1x_syncpt_init() 358 syncpt[i].host = host; host1x_syncpt_init() 361 for (i = 0; i < host->info->nb_bases; i++) host1x_syncpt_init() 364 host->syncpt = syncpt; host1x_syncpt_init() 365 host->bases = bases; host1x_syncpt_init() 367 host1x_syncpt_restore(host); host1x_syncpt_init() 370 host->nop_sp = host1x_syncpt_alloc(host, NULL, 0); host1x_syncpt_init() 371 if (!host->nop_sp) host1x_syncpt_init() 380 struct host1x *host = dev_get_drvdata(dev->parent); host1x_syncpt_request() local 381 return host1x_syncpt_alloc(host, dev, flags); host1x_syncpt_request() 399 void host1x_syncpt_deinit(struct host1x *host) host1x_syncpt_deinit() argument 402 struct host1x_syncpt *sp = host->syncpt; host1x_syncpt_deinit() 403 for (i = 0; i < host->info->nb_pts; i++, sp++) host1x_syncpt_deinit() 434 int host1x_syncpt_nb_pts(struct host1x *host) host1x_syncpt_nb_pts() argument 436 return host->info->nb_pts; host1x_syncpt_nb_pts() 439 int host1x_syncpt_nb_bases(struct host1x *host) host1x_syncpt_nb_bases() argument 441 return host->info->nb_bases; host1x_syncpt_nb_bases() 444 int host1x_syncpt_nb_mlocks(struct host1x *host) host1x_syncpt_nb_mlocks() argument 446 return host->info->nb_mlocks; host1x_syncpt_nb_mlocks() 449 struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id) host1x_syncpt_get() argument 451 if (host->info->nb_pts < id) host1x_syncpt_get() 453 return host->syncpt + id; host1x_syncpt_get()
|
H A D | intr.c | 99 static void reset_threshold_interrupt(struct host1x *host, reset_threshold_interrupt() argument 106 host1x_hw_intr_set_syncpt_threshold(host, id, thresh); reset_threshold_interrupt() 107 host1x_hw_intr_enable_syncpt_intr(host, id); reset_threshold_interrupt() 164 static int process_wait_list(struct host1x *host, process_wait_list() argument 182 host1x_hw_intr_disable_syncpt_intr(host, syncpt->id); process_wait_list() 184 reset_threshold_interrupt(host, &syncpt->intr.wait_head, process_wait_list() 206 struct host1x *host = syncpt->host; syncpt_thresh_work() local 208 (void)process_wait_list(host, syncpt, syncpt_thresh_work() 209 host1x_syncpt_load(host->syncpt + id)); syncpt_thresh_work() 212 int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh, host1x_intr_add_action() argument 235 syncpt = host->syncpt + id; host1x_intr_add_action() 243 host1x_hw_intr_set_syncpt_threshold(host, id, thresh); host1x_intr_add_action() 247 host1x_hw_intr_enable_syncpt_intr(host, id); host1x_intr_add_action() 257 void host1x_intr_put_ref(struct host1x *host, u32 id, void *ref) host1x_intr_put_ref() argument 266 syncpt = host->syncpt + id; host1x_intr_put_ref() 267 (void)process_wait_list(host, syncpt, host1x_intr_put_ref() 268 host1x_syncpt_load(host->syncpt + id)); host1x_intr_put_ref() 273 int host1x_intr_init(struct host1x *host, unsigned int irq_sync) host1x_intr_init() argument 276 u32 nb_pts = host1x_syncpt_nb_pts(host); host1x_intr_init() 278 mutex_init(&host->intr_mutex); host1x_intr_init() 279 host->intr_syncpt_irq = irq_sync; host1x_intr_init() 280 host->intr_wq = create_workqueue("host_syncpt"); host1x_intr_init() 281 if (!host->intr_wq) host1x_intr_init() 285 struct host1x_syncpt *syncpt = host->syncpt + id; host1x_intr_init() 294 host1x_intr_start(host); host1x_intr_init() 299 void host1x_intr_deinit(struct host1x *host) host1x_intr_deinit() argument 301 host1x_intr_stop(host); host1x_intr_deinit() 302 destroy_workqueue(host->intr_wq); host1x_intr_deinit() 305 void host1x_intr_start(struct host1x *host) host1x_intr_start() argument 307 u32 hz = clk_get_rate(host->clk); host1x_intr_start() 310 mutex_lock(&host->intr_mutex); host1x_intr_start() 311 err = host1x_hw_intr_init_host_sync(host, DIV_ROUND_UP(hz, 1000000), host1x_intr_start() 314 mutex_unlock(&host->intr_mutex); host1x_intr_start() 317 mutex_unlock(&host->intr_mutex); host1x_intr_start() 320 void host1x_intr_stop(struct host1x *host) host1x_intr_stop() argument 323 struct host1x_syncpt *syncpt = host->syncpt; host1x_intr_stop() 324 u32 nb_pts = host1x_syncpt_nb_pts(host); host1x_intr_stop() 326 mutex_lock(&host->intr_mutex); host1x_intr_stop() 328 host1x_hw_intr_disable_all_syncpt_intrs(host); host1x_intr_stop() 344 mutex_unlock(&host->intr_mutex); host1x_intr_stop() 351 host1x_hw_intr_free_syncpt_irq(host); host1x_intr_stop() 353 mutex_unlock(&host->intr_mutex); host1x_intr_stop()
|
H A D | channel.c | 27 int host1x_channel_list_init(struct host1x *host) host1x_channel_list_init() argument 29 INIT_LIST_HEAD(&host->chlist.list); host1x_channel_list_init() 30 mutex_init(&host->chlist_mutex); host1x_channel_list_init() 32 if (host->info->nb_channels > BITS_PER_LONG) { host1x_channel_list_init() 42 struct host1x *host = dev_get_drvdata(job->channel->dev->parent); host1x_job_submit() local 44 return host1x_hw_channel_submit(host, job); host1x_job_submit() 71 struct host1x *host = dev_get_drvdata(channel->dev->parent); host1x_channel_put() local 73 host1x_hw_cdma_stop(host, &channel->cdma); host1x_channel_put() 85 struct host1x *host = dev_get_drvdata(dev->parent); host1x_channel_request() local 86 int max_channels = host->info->nb_channels; host1x_channel_request() 90 mutex_lock(&host->chlist_mutex); host1x_channel_request() 92 index = find_first_zero_bit(&host->allocated_channels, max_channels); host1x_channel_request() 100 err = host1x_hw_channel_init(host, channel, index); host1x_channel_request() 108 list_add_tail(&channel->list, &host->chlist.list); host1x_channel_request() 110 host->allocated_channels |= BIT(index); host1x_channel_request() 112 mutex_unlock(&host->chlist_mutex); host1x_channel_request() 118 mutex_unlock(&host->chlist_mutex); host1x_channel_request() 125 struct host1x *host = dev_get_drvdata(channel->dev->parent); host1x_channel_free() local 127 host->allocated_channels &= ~BIT(channel->id); host1x_channel_free()
|
H A D | syncpt.h | 46 struct host1x *host; member in struct:host1x_syncpt 55 int host1x_syncpt_init(struct host1x *host); 58 void host1x_syncpt_deinit(struct host1x *host); 61 int host1x_syncpt_nb_pts(struct host1x *host); 64 int host1x_syncpt_nb_bases(struct host1x *host); 67 int host1x_syncpt_nb_mlocks(struct host1x *host); 110 void host1x_syncpt_save(struct host1x *host); 113 void host1x_syncpt_restore(struct host1x *host); 124 return sp->id < host1x_syncpt_nb_pts(sp->host); host1x_syncpt_is_valid()
|
H A D | channel.h | 41 int host1x_channel_list_init(struct host1x *host); 43 #define host1x_for_each_channel(host, channel) \ 44 list_for_each_entry(channel, &host->chlist.list, list)
|
H A D | intr.h | 78 int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh, 87 void host1x_intr_put_ref(struct host1x *host, u32 id, void *ref); 90 int host1x_intr_init(struct host1x *host, unsigned int irq_sync); 93 void host1x_intr_deinit(struct host1x *host); 96 void host1x_intr_start(struct host1x *host); 99 void host1x_intr_stop(struct host1x *host);
|
/linux-4.1.27/drivers/memstick/core/ |
H A D | memstick.c | 184 struct memstick_host *host = container_of(dev, struct memstick_host, memstick_free() local 186 kfree(host); memstick_free() 207 * memstick_detect_change - schedule media detection on memstick host 208 * @host - host to use 210 void memstick_detect_change(struct memstick_host *host) memstick_detect_change() argument 212 queue_work(workqueue, &host->media_checker); memstick_detect_change() 217 * memstick_next_req - called by host driver to obtain next request to process 218 * @host - host to use 224 * of 0 means that new request was assigned to the host. 226 int memstick_next_req(struct memstick_host *host, struct memstick_request **mrq) memstick_next_req() argument 230 if ((*mrq) && (*mrq)->error && host->retries) { memstick_next_req() 232 host->retries--; memstick_next_req() 236 if (host->card && host->card->next_request) memstick_next_req() 237 rc = host->card->next_request(host->card, mrq); memstick_next_req() 240 host->retries = cmd_retries > 1 ? cmd_retries - 1 : 1; memstick_next_req() 249 * memstick_new_req - notify the host that some requests are pending 250 * @host - host to use 252 void memstick_new_req(struct memstick_host *host) memstick_new_req() argument 254 if (host->card) { memstick_new_req() 255 host->retries = cmd_retries; memstick_new_req() 256 reinit_completion(&host->card->mrq_complete); memstick_new_req() 257 host->request(host); memstick_new_req() 374 memstick_new_req(card->host); memstick_set_rw_addr() 381 static struct memstick_dev *memstick_alloc_card(struct memstick_host *host) memstick_alloc_card() argument 385 struct memstick_dev *old_card = host->card; memstick_alloc_card() 389 card->host = host; memstick_alloc_card() 390 dev_set_name(&card->dev, "%s", dev_name(&host->dev)); memstick_alloc_card() 391 card->dev.parent = &host->dev; memstick_alloc_card() 403 host->card = card; memstick_alloc_card() 408 memstick_new_req(host); memstick_alloc_card() 414 host->card = old_card; memstick_alloc_card() 417 host->card = old_card; memstick_alloc_card() 422 static int memstick_power_on(struct memstick_host *host) memstick_power_on() argument 424 int rc = host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_ON); memstick_power_on() 427 rc = host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_SERIAL); memstick_power_on() 434 struct memstick_host *host = container_of(work, struct memstick_host, memstick_check() local 438 dev_dbg(&host->dev, "memstick_check started\n"); memstick_check() 439 mutex_lock(&host->lock); memstick_check() 440 if (!host->card) { memstick_check() 441 if (memstick_power_on(host)) memstick_check() 443 } else if (host->card->stop) memstick_check() 444 host->card->stop(host->card); memstick_check() 446 card = memstick_alloc_card(host); memstick_check() 449 if (host->card) { memstick_check() 450 device_unregister(&host->card->dev); memstick_check() 451 host->card = NULL; memstick_check() 454 dev_dbg(&host->dev, "new card %02x, %02x, %02x\n", memstick_check() 456 if (host->card) { memstick_check() 457 if (memstick_set_rw_addr(host->card) memstick_check() 458 || !memstick_dev_match(host->card, &card->id) memstick_check() 459 || !(host->card->check(host->card))) { memstick_check() 460 device_unregister(&host->card->dev); memstick_check() 461 host->card = NULL; memstick_check() 462 } else if (host->card->start) memstick_check() 463 host->card->start(host->card); memstick_check() 466 if (!host->card) { memstick_check() 467 host->card = card; memstick_check() 470 kfree(host->card); memstick_check() 471 host->card = NULL; memstick_check() 478 if (!host->card) memstick_check() 479 host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_OFF); memstick_check() 481 mutex_unlock(&host->lock); memstick_check() 482 dev_dbg(&host->dev, "memstick_check finished\n"); memstick_check() 488 * @dev: parent device of the host 493 struct memstick_host *host; memstick_alloc_host() local 495 host = kzalloc(sizeof(struct memstick_host) + extra, GFP_KERNEL); memstick_alloc_host() 496 if (host) { memstick_alloc_host() 497 mutex_init(&host->lock); memstick_alloc_host() 498 INIT_WORK(&host->media_checker, memstick_check); memstick_alloc_host() 499 host->dev.class = &memstick_host_class; memstick_alloc_host() 500 host->dev.parent = dev; memstick_alloc_host() 501 device_initialize(&host->dev); memstick_alloc_host() 503 return host; memstick_alloc_host() 508 * memstick_add_host - start request processing on memstick host 509 * @host - host to use 511 int memstick_add_host(struct memstick_host *host) memstick_add_host() argument 518 rc = idr_alloc(&memstick_host_idr, host, 0, 0, GFP_NOWAIT); memstick_add_host() 520 host->id = rc; memstick_add_host() 527 dev_set_name(&host->dev, "memstick%u", host->id); memstick_add_host() 529 rc = device_add(&host->dev); memstick_add_host() 532 idr_remove(&memstick_host_idr, host->id); memstick_add_host() 537 host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_OFF); memstick_add_host() 538 memstick_detect_change(host); memstick_add_host() 544 * memstick_remove_host - stop request processing on memstick host 545 * @host - host to use 547 void memstick_remove_host(struct memstick_host *host) memstick_remove_host() argument 550 mutex_lock(&host->lock); memstick_remove_host() 551 if (host->card) memstick_remove_host() 552 device_unregister(&host->card->dev); memstick_remove_host() 553 host->card = NULL; memstick_remove_host() 554 host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_OFF); memstick_remove_host() 555 mutex_unlock(&host->lock); memstick_remove_host() 558 idr_remove(&memstick_host_idr, host->id); memstick_remove_host() 560 device_del(&host->dev); memstick_remove_host() 565 * memstick_free_host - free memstick host 566 * @host - host to use 568 void memstick_free_host(struct memstick_host *host) memstick_free_host() argument 570 mutex_destroy(&host->lock); memstick_free_host() 571 put_device(&host->dev); memstick_free_host() 576 * memstick_suspend_host - notify bus driver of host suspension 577 * @host - host to use 579 void memstick_suspend_host(struct memstick_host *host) memstick_suspend_host() argument 581 mutex_lock(&host->lock); memstick_suspend_host() 582 host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_OFF); memstick_suspend_host() 583 mutex_unlock(&host->lock); memstick_suspend_host() 588 * memstick_resume_host - notify bus driver of host resumption 589 * @host - host to use 591 void memstick_resume_host(struct memstick_host *host) memstick_resume_host() argument 595 mutex_lock(&host->lock); memstick_resume_host() 596 if (host->card) memstick_resume_host() 597 rc = memstick_power_on(host); memstick_resume_host() 598 mutex_unlock(&host->lock); memstick_resume_host() 601 memstick_detect_change(host); memstick_resume_host()
|
/linux-4.1.27/arch/powerpc/include/asm/ |
H A D | dcr.h | 41 #define DCR_MAP_OK(host) dcr_map_ok_generic(host) 43 #define dcr_unmap(host, dcr_c) dcr_unmap_generic(host, dcr_c) 44 #define dcr_read(host, dcr_n) dcr_read_generic(host, dcr_n) 45 #define dcr_write(host, dcr_n, value) dcr_write_generic(host, dcr_n, value) 51 #define DCR_MAP_OK(host) dcr_map_ok_native(host) 53 #define dcr_unmap(host, dcr_c) dcr_unmap_native(host, dcr_c) 54 #define dcr_read(host, dcr_n) dcr_read_native(host, dcr_n) 55 #define dcr_write(host, dcr_n, value) dcr_write_native(host, dcr_n, value) 58 #define DCR_MAP_OK(host) dcr_map_ok_mmio(host) 60 #define dcr_unmap(host, dcr_c) dcr_unmap_mmio(host, dcr_c) 61 #define dcr_read(host, dcr_n) dcr_read_mmio(host, dcr_n) 62 #define dcr_write(host, dcr_n, value) dcr_write_mmio(host, dcr_n, value)
|
H A D | dcr-mmio.h | 32 static inline bool dcr_map_ok_mmio(dcr_host_mmio_t host) dcr_map_ok_mmio() argument 34 return host.token != NULL; dcr_map_ok_mmio() 40 extern void dcr_unmap_mmio(dcr_host_mmio_t host, unsigned int dcr_c); 42 static inline u32 dcr_read_mmio(dcr_host_mmio_t host, unsigned int dcr_n) dcr_read_mmio() argument 44 return in_be32(host.token + ((host.base + dcr_n) * host.stride)); dcr_read_mmio() 47 static inline void dcr_write_mmio(dcr_host_mmio_t host, dcr_write_mmio() argument 51 out_be32(host.token + ((host.base + dcr_n) * host.stride), value); dcr_write_mmio()
|
H A D | dcr-generic.h | 32 } host; member in struct:__anon2255 35 extern bool dcr_map_ok_generic(dcr_host_t host); 39 extern void dcr_unmap_generic(dcr_host_t host, unsigned int dcr_c); 41 extern u32 dcr_read_generic(dcr_host_t host, unsigned int dcr_n); 43 extern void dcr_write_generic(dcr_host_t host, unsigned int dcr_n, u32 value);
|
/linux-4.1.27/drivers/block/ |
H A D | mg_disk.c | 149 static bool mg_end_request(struct mg_host *host, int err, unsigned int nr_bytes) mg_end_request() argument 151 if (__blk_end_request(host->req, err, nr_bytes)) mg_end_request() 154 host->req = NULL; mg_end_request() 158 static bool mg_end_request_cur(struct mg_host *host, int err) mg_end_request_cur() argument 160 return mg_end_request(host, err, blk_rq_cur_bytes(host->req)); mg_end_request_cur() 164 struct mg_host *host) mg_dump_status() 168 if (host->req) mg_dump_status() 169 name = host->req->rq_disk->disk_name; mg_dump_status() 188 host->error = 0; mg_dump_status() 190 host->error = inb((unsigned long)host->dev_base + MG_REG_ERROR); mg_dump_status() 192 host->error & 0xff); mg_dump_status() 193 if (host->error & ATA_BBK) mg_dump_status() 195 if (host->error & ATA_UNC) mg_dump_status() 197 if (host->error & ATA_IDNF) mg_dump_status() 199 if (host->error & ATA_ABORTED) mg_dump_status() 201 if (host->error & ATA_AMNF) mg_dump_status() 204 if (host->error & (ATA_BBK | ATA_UNC | ATA_IDNF | ATA_AMNF)) { mg_dump_status() 205 if (host->req) mg_dump_status() 207 (unsigned int)blk_rq_pos(host->req)); mg_dump_status() 213 static unsigned int mg_wait(struct mg_host *host, u32 expect, u32 msec) mg_wait() argument 217 struct mg_drv_data *prv_data = host->dev->platform_data; mg_wait() 219 host->error = MG_ERR_NONE; mg_wait() 228 status = inb((unsigned long)host->dev_base + MG_REG_STATUS); mg_wait() 229 status = inb((unsigned long)host->dev_base + MG_REG_STATUS); mg_wait() 232 status = inb((unsigned long)host->dev_base + MG_REG_STATUS); mg_wait() 242 mg_dump_status("mg_wait", status, host); mg_wait() 255 mg_dump_status("not ready", status, host); mg_wait() 259 status = inb((unsigned long)host->dev_base + MG_REG_STATUS); mg_wait() 263 host->error = MG_ERR_TIMEOUT; mg_wait() 265 return host->error; mg_wait() 282 static void mg_unexpected_intr(struct mg_host *host) mg_unexpected_intr() argument 284 u32 status = inb((unsigned long)host->dev_base + MG_REG_STATUS); mg_unexpected_intr() 286 mg_dump_status("mg_unexpected_intr", status, host); mg_unexpected_intr() 291 struct mg_host *host = dev_id; mg_irq() local 292 void (*handler)(struct mg_host *) = host->mg_do_intr; mg_irq() 294 spin_lock(&host->lock); mg_irq() 296 host->mg_do_intr = NULL; mg_irq() 297 del_timer(&host->timer); mg_irq() 300 handler(host); mg_irq() 302 spin_unlock(&host->lock); mg_irq() 343 static int mg_get_disk_id(struct mg_host *host) mg_get_disk_id() argument 347 const u16 *id = host->id; mg_get_disk_id() 348 struct mg_drv_data *prv_data = host->dev->platform_data; mg_get_disk_id() 354 outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); mg_get_disk_id() 356 outb(MG_CMD_ID, (unsigned long)host->dev_base + MG_REG_COMMAND); mg_get_disk_id() 357 err = mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_RD_DRQ); mg_get_disk_id() 362 host->id[i] = le16_to_cpu(inw((unsigned long)host->dev_base + mg_get_disk_id() 365 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); mg_get_disk_id() 366 err = mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD); mg_get_disk_id() 373 host->n_sectors = ata_id_u32(id, ATA_ID_LBA_CAPACITY); mg_get_disk_id() 374 host->cyls = id[ATA_ID_CYLS]; mg_get_disk_id() 375 host->heads = id[ATA_ID_HEADS]; mg_get_disk_id() 376 host->sectors = id[ATA_ID_SECTORS]; mg_get_disk_id() 378 if (MG_RES_SEC && host->heads && host->sectors) { mg_get_disk_id() 380 host->cyls = (host->n_sectors - MG_RES_SEC) / mg_get_disk_id() 381 host->heads / host->sectors; mg_get_disk_id() 382 host->nres_sectors = host->n_sectors - host->cyls * mg_get_disk_id() 383 host->heads * host->sectors; mg_get_disk_id() 384 host->n_sectors -= host->nres_sectors; mg_get_disk_id() 394 host->n_sectors, host->nres_sectors); mg_get_disk_id() 397 outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); mg_get_disk_id() 403 static int mg_disk_init(struct mg_host *host) mg_disk_init() argument 405 struct mg_drv_data *prv_data = host->dev->platform_data; mg_disk_init() 410 gpio_set_value(host->rst, 0); mg_disk_init() 411 err = mg_wait(host, ATA_BUSY, MG_TMAX_RST_TO_BUSY); mg_disk_init() 416 gpio_set_value(host->rst, 1); mg_disk_init() 417 err = mg_wait(host, MG_STAT_READY, MG_TMAX_HDRST_TO_RDY); mg_disk_init() 423 (unsigned long)host->dev_base + MG_REG_DRV_CTRL); mg_disk_init() 424 err = mg_wait(host, ATA_BUSY, MG_TMAX_RST_TO_BUSY); mg_disk_init() 430 (unsigned long)host->dev_base + MG_REG_DRV_CTRL); mg_disk_init() 431 err = mg_wait(host, MG_STAT_READY, MG_TMAX_SWRST_TO_RDY); mg_disk_init() 435 init_status = inb((unsigned long)host->dev_base + MG_REG_STATUS) & 0xf; mg_disk_init() 443 static void mg_bad_rw_intr(struct mg_host *host) mg_bad_rw_intr() argument 445 if (host->req) mg_bad_rw_intr() 446 if (++host->req->errors >= MG_MAX_ERRORS || mg_bad_rw_intr() 447 host->error == MG_ERR_TIMEOUT) mg_bad_rw_intr() 448 mg_end_request_cur(host, -EIO); mg_bad_rw_intr() 451 static unsigned int mg_out(struct mg_host *host, mg_out() argument 457 struct mg_drv_data *prv_data = host->dev->platform_data; mg_out() 459 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) mg_out() 460 return host->error; mg_out() 463 host->mg_do_intr = intr_addr; mg_out() 464 mod_timer(&host->timer, jiffies + 3 * HZ); mg_out() 468 outb((u8)sect_cnt, (unsigned long)host->dev_base + MG_REG_SECT_CNT); mg_out() 469 outb((u8)sect_num, (unsigned long)host->dev_base + MG_REG_SECT_NUM); mg_out() 470 outb((u8)(sect_num >> 8), (unsigned long)host->dev_base + mg_out() 472 outb((u8)(sect_num >> 16), (unsigned long)host->dev_base + mg_out() 475 (unsigned long)host->dev_base + MG_REG_DRV_HEAD); mg_out() 476 outb(cmd, (unsigned long)host->dev_base + MG_REG_COMMAND); mg_out() 480 static void mg_read_one(struct mg_host *host, struct request *req) mg_read_one() argument 486 *buff++ = inw((unsigned long)host->dev_base + MG_BUFF_OFFSET + mg_read_one() 492 struct mg_host *host = req->rq_disk->private_data; mg_read() local 494 if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req), mg_read() 496 mg_bad_rw_intr(host); mg_read() 502 if (mg_wait(host, ATA_DRQ, mg_read() 504 mg_bad_rw_intr(host); mg_read() 508 mg_read_one(host, req); mg_read() 510 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + mg_read() 512 } while (mg_end_request(host, 0, MG_SECTOR_SIZE)); mg_read() 515 static void mg_write_one(struct mg_host *host, struct request *req) mg_write_one() argument 521 outw(*buff++, (unsigned long)host->dev_base + MG_BUFF_OFFSET + mg_write_one() 527 struct mg_host *host = req->rq_disk->private_data; mg_write() local 530 if (mg_out(host, blk_rq_pos(req), rem, mg_write() 532 mg_bad_rw_intr(host); mg_write() 539 if (mg_wait(host, ATA_DRQ, mg_write() 541 mg_bad_rw_intr(host); mg_write() 546 mg_write_one(host, req); mg_write() 548 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + mg_write() 552 if (rem > 1 && mg_wait(host, ATA_DRQ, mg_write() 554 mg_bad_rw_intr(host); mg_write() 556 } else if (mg_wait(host, MG_STAT_READY, mg_write() 558 mg_bad_rw_intr(host); mg_write() 561 } while (mg_end_request(host, 0, MG_SECTOR_SIZE)); mg_write() 564 static void mg_read_intr(struct mg_host *host) mg_read_intr() argument 566 struct request *req = host->req; mg_read_intr() 571 i = inb((unsigned long)host->dev_base + MG_REG_STATUS); mg_read_intr() 579 mg_dump_status("mg_read_intr", i, host); mg_read_intr() 580 mg_bad_rw_intr(host); mg_read_intr() 581 mg_request(host->breq); mg_read_intr() 585 mg_read_one(host, req); mg_read_intr() 591 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); mg_read_intr() 593 if (mg_end_request(host, 0, MG_SECTOR_SIZE)) { mg_read_intr() 595 host->mg_do_intr = mg_read_intr; mg_read_intr() 596 mod_timer(&host->timer, jiffies + 3 * HZ); mg_read_intr() 598 mg_request(host->breq); mg_read_intr() 601 static void mg_write_intr(struct mg_host *host) mg_write_intr() argument 603 struct request *req = host->req; mg_write_intr() 609 i = inb((unsigned long)host->dev_base + MG_REG_STATUS); mg_write_intr() 617 mg_dump_status("mg_write_intr", i, host); mg_write_intr() 618 mg_bad_rw_intr(host); mg_write_intr() 619 mg_request(host->breq); mg_write_intr() 623 if ((rem = mg_end_request(host, 0, MG_SECTOR_SIZE))) { mg_write_intr() 625 mg_write_one(host, req); mg_write_intr() 628 host->mg_do_intr = mg_write_intr; mg_write_intr() 629 mod_timer(&host->timer, jiffies + 3 * HZ); mg_write_intr() 633 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); mg_write_intr() 636 mg_request(host->breq); mg_write_intr() 641 struct mg_host *host = (struct mg_host *)data; mg_times_out() local 644 spin_lock_irq(&host->lock); mg_times_out() 646 if (!host->req) mg_times_out() 649 host->mg_do_intr = NULL; mg_times_out() 651 name = host->req->rq_disk->disk_name; mg_times_out() 654 host->error = MG_ERR_TIMEOUT; mg_times_out() 655 mg_bad_rw_intr(host); mg_times_out() 658 mg_request(host->breq); mg_times_out() 659 spin_unlock_irq(&host->lock); mg_times_out() 664 struct mg_host *host = q->queuedata; mg_request_poll() local 667 if (!host->req) { mg_request_poll() 668 host->req = blk_fetch_request(q); mg_request_poll() 669 if (!host->req) mg_request_poll() 673 if (unlikely(host->req->cmd_type != REQ_TYPE_FS)) { mg_request_poll() 674 mg_end_request_cur(host, -EIO); mg_request_poll() 678 if (rq_data_dir(host->req) == READ) mg_request_poll() 679 mg_read(host->req); mg_request_poll() 681 mg_write(host->req); mg_request_poll() 686 struct mg_host *host, mg_issue_req() 692 if (mg_out(host, sect_num, sect_cnt, MG_CMD_RD, &mg_read_intr) mg_issue_req() 694 mg_bad_rw_intr(host); mg_issue_req() 695 return host->error; mg_issue_req() 700 outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); mg_issue_req() 701 if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr) mg_issue_req() 703 mg_bad_rw_intr(host); mg_issue_req() 704 return host->error; mg_issue_req() 706 del_timer(&host->timer); mg_issue_req() 707 mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ); mg_issue_req() 708 outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); mg_issue_req() 709 if (host->error) { mg_issue_req() 710 mg_bad_rw_intr(host); mg_issue_req() 711 return host->error; mg_issue_req() 713 mg_write_one(host, req); mg_issue_req() 714 mod_timer(&host->timer, jiffies + 3 * HZ); mg_issue_req() 715 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + mg_issue_req() 725 struct mg_host *host = q->queuedata; mg_request() local 730 if (!host->req) { mg_request() 731 host->req = blk_fetch_request(q); mg_request() 732 if (!host->req) mg_request() 735 req = host->req; mg_request() 738 if (host->mg_do_intr) mg_request() 741 del_timer(&host->timer); mg_request() 755 mg_end_request_cur(host, -EIO); mg_request() 760 mg_end_request_cur(host, -EIO); mg_request() 764 if (!mg_issue_req(req, host, sect_num, sect_cnt)) mg_request() 771 struct mg_host *host = bdev->bd_disk->private_data; mg_getgeo() local 773 geo->cylinders = (unsigned short)host->cyls; mg_getgeo() 774 geo->heads = (unsigned char)host->heads; mg_getgeo() 775 geo->sectors = (unsigned char)host->sectors; mg_getgeo() 787 struct mg_host *host = prv_data->host; mg_suspend() local 789 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) mg_suspend() 793 outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); mg_suspend() 795 outb(MG_CMD_SLEEP, (unsigned long)host->dev_base + MG_REG_COMMAND); mg_suspend() 799 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) { mg_suspend() 801 outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); mg_suspend() 811 struct mg_host *host = prv_data->host; mg_resume() local 813 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) mg_resume() 816 outb(MG_CMD_WAKEUP, (unsigned long)host->dev_base + MG_REG_COMMAND); mg_resume() 820 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) mg_resume() 824 outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); mg_resume() 834 struct mg_host *host; mg_probe() local 847 host = kzalloc(sizeof(struct mg_host), GFP_KERNEL); mg_probe() 848 if (!host) { mg_probe() 854 host->major = MG_DISK_MAJ; mg_probe() 857 prv_data->host = host; mg_probe() 858 host->dev = &plat_dev->dev; mg_probe() 868 host->dev_base = ioremap(rsc->start, resource_size(rsc)); mg_probe() 869 if (!host->dev_base) { mg_probe() 875 MG_DBG("dev_base = 0x%x\n", (u32)host->dev_base); mg_probe() 886 host->rst = rsc->start; mg_probe() 889 err = gpio_request(host->rst, MG_RST_PIN); mg_probe() 892 gpio_direction_output(host->rst, 1); mg_probe() 909 host->rstout = rsc->start; mg_probe() 910 err = gpio_request(host->rstout, MG_RSTOUT_PIN); mg_probe() 913 gpio_direction_input(host->rstout); mg_probe() 919 err = mg_wait_rstout(host->rstout, MG_TMAX_RSTOUT); mg_probe() 922 err = mg_disk_init(host); mg_probe() 933 host->irq = platform_get_irq(plat_dev, 0); mg_probe() 934 if (host->irq == -ENXIO) { mg_probe() 935 err = host->irq; mg_probe() 938 err = request_irq(host->irq, mg_irq, mg_probe() 940 MG_DEV_NAME, host); mg_probe() 950 err = mg_get_disk_id(host); mg_probe() 958 err = register_blkdev(host->major, MG_DISK_NAME); mg_probe() 964 if (!host->major) mg_probe() 965 host->major = err; mg_probe() 967 spin_lock_init(&host->lock); mg_probe() 970 host->breq = blk_init_queue(mg_request_poll, &host->lock); mg_probe() 972 host->breq = blk_init_queue(mg_request, &host->lock); mg_probe() 974 if (!host->breq) { mg_probe() 980 host->breq->queuedata = host; mg_probe() 983 err = elevator_change(host->breq, "noop"); mg_probe() 989 blk_queue_max_hw_sectors(host->breq, MG_MAX_SECTS); mg_probe() 990 blk_queue_logical_block_size(host->breq, MG_SECTOR_SIZE); mg_probe() 992 init_timer(&host->timer); mg_probe() 993 host->timer.function = mg_times_out; mg_probe() 994 host->timer.data = (unsigned long)host; mg_probe() 996 host->gd = alloc_disk(MG_DISK_MAX_PART); mg_probe() 997 if (!host->gd) { mg_probe() 1003 host->gd->major = host->major; mg_probe() 1004 host->gd->first_minor = 0; mg_probe() 1005 host->gd->fops = &mg_disk_ops; mg_probe() 1006 host->gd->queue = host->breq; mg_probe() 1007 host->gd->private_data = host; mg_probe() 1008 sprintf(host->gd->disk_name, MG_DISK_NAME"a"); mg_probe() 1010 set_capacity(host->gd, host->n_sectors); mg_probe() 1012 add_disk(host->gd); mg_probe() 1017 del_timer_sync(&host->timer); mg_probe() 1019 blk_cleanup_queue(host->breq); mg_probe() 1024 free_irq(host->irq, host); mg_probe() 1026 gpio_free(host->rstout); mg_probe() 1028 gpio_free(host->rst); mg_probe() 1030 iounmap(host->dev_base); mg_probe() 1032 kfree(host); mg_probe() 1040 struct mg_host *host = prv_data->host; mg_remove() local 1044 del_timer_sync(&host->timer); mg_remove() 1047 if (host->gd) { mg_remove() 1048 del_gendisk(host->gd); mg_remove() 1049 put_disk(host->gd); mg_remove() 1052 if (host->breq) mg_remove() 1053 blk_cleanup_queue(host->breq); mg_remove() 1056 unregister_blkdev(host->major, MG_DISK_NAME); mg_remove() 1060 free_irq(host->irq, host); mg_remove() 1064 gpio_free(host->rstout); mg_remove() 1067 if (host->rst) mg_remove() 1068 gpio_free(host->rst); mg_remove() 1071 if (host->dev_base) mg_remove() 1072 iounmap(host->dev_base); mg_remove() 1075 kfree(host); mg_remove() 163 mg_dump_status(const char *msg, unsigned int stat, struct mg_host *host) mg_dump_status() argument 685 mg_issue_req(struct request *req, struct mg_host *host, unsigned int sect_num, unsigned int sect_cnt) mg_issue_req() argument
|
H A D | sx8.c | 114 CARM_MAX_REQ = 64, /* max command msgs per host */ 117 /* S/G limits, host-wide and per-request */ 119 CARM_MAX_HOST_SG = 600, /* max s/g entries per host */ 126 CARM_HMUC = 0x18, /* host message unit control */ 246 struct carm_host *host; member in struct:carm_port 467 static inline void *carm_ref_msg(struct carm_host *host, carm_ref_msg() argument 470 return host->msg_base + (msg_idx * CARM_MSG_SIZE); carm_ref_msg() 473 static inline dma_addr_t carm_ref_msg_dma(struct carm_host *host, carm_ref_msg_dma() argument 476 return host->msg_dma + (msg_idx * CARM_MSG_SIZE); carm_ref_msg_dma() 479 static int carm_send_msg(struct carm_host *host, carm_send_msg() argument 482 void __iomem *mmio = host->mmio; carm_send_msg() 483 u32 msg = (u32) carm_ref_msg_dma(host, crq->tag); carm_send_msg() 498 DPRINTK("host msg queue full\n"); carm_send_msg() 508 static struct carm_request *carm_get_request(struct carm_host *host) carm_get_request() argument 513 if (host->hw_sg_used >= (CARM_MAX_HOST_SG - CARM_MAX_REQ_SG)) carm_get_request() 517 if ((host->msg_alloc & (1ULL << i)) == 0) { carm_get_request() 518 struct carm_request *crq = &host->req[i]; carm_get_request() 522 host->msg_alloc |= (1ULL << i); carm_get_request() 523 host->n_msgs++; carm_get_request() 525 assert(host->n_msgs <= CARM_MAX_REQ); carm_get_request() 534 static int carm_put_request(struct carm_host *host, struct carm_request *crq) carm_put_request() argument 538 if (unlikely((host->msg_alloc & (1ULL << crq->tag)) == 0)) carm_put_request() 541 assert(host->hw_sg_used >= crq->n_elem); carm_put_request() 543 host->msg_alloc &= ~(1ULL << crq->tag); carm_put_request() 544 host->hw_sg_used -= crq->n_elem; carm_put_request() 545 host->n_msgs--; carm_put_request() 550 static struct carm_request *carm_get_special(struct carm_host *host) carm_get_special() argument 558 spin_lock_irqsave(&host->lock, flags); carm_get_special() 559 crq = carm_get_request(host); carm_get_special() 560 spin_unlock_irqrestore(&host->lock, flags); carm_get_special() 570 rq = blk_get_request(host->oob_q, WRITE /* bogus */, GFP_KERNEL); carm_get_special() 572 spin_lock_irqsave(&host->lock, flags); carm_get_special() 573 carm_put_request(host, crq); carm_get_special() 574 spin_unlock_irqrestore(&host->lock, flags); carm_get_special() 582 static int carm_array_info (struct carm_host *host, unsigned int array_idx) carm_array_info() argument 591 crq = carm_get_special(host); carm_array_info() 599 ioc = carm_ref_msg(host, idx); carm_array_info() 600 msg_dma = carm_ref_msg_dma(host, idx); carm_array_info() 617 spin_lock_irq(&host->lock); carm_array_info() 618 assert(host->state == HST_DEV_SCAN_START || carm_array_info() 619 host->state == HST_DEV_SCAN); carm_array_info() 620 spin_unlock_irq(&host->lock); carm_array_info() 625 blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL); carm_array_info() 630 spin_lock_irq(&host->lock); carm_array_info() 631 host->state = HST_ERROR; carm_array_info() 632 spin_unlock_irq(&host->lock); carm_array_info() 638 static int carm_send_special (struct carm_host *host, carm_sspc_t func) carm_send_special() argument 646 crq = carm_get_special(host); carm_send_special() 652 mem = carm_ref_msg(host, idx); carm_send_special() 654 msg_size = func(host, idx, mem); carm_send_special() 666 blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL); carm_send_special() 671 static unsigned int carm_fill_sync_time(struct carm_host *host, carm_fill_sync_time() argument 688 static unsigned int carm_fill_alloc_buf(struct carm_host *host, carm_fill_alloc_buf() argument 699 ab->addr = cpu_to_le32(host->shm_dma + (PDC_SHM_SIZE >> 1)); carm_fill_alloc_buf() 701 ab->evt_pool = cpu_to_le32(host->shm_dma + (16 * 1024)); carm_fill_alloc_buf() 703 ab->rbuf_pool = cpu_to_le32(host->shm_dma); carm_fill_alloc_buf() 705 ab->msg_pool = cpu_to_le32(host->shm_dma + RBUF_LEN); carm_fill_alloc_buf() 707 ab->sg[0].start = cpu_to_le32(host->shm_dma + (PDC_SHM_SIZE >> 1)); carm_fill_alloc_buf() 713 static unsigned int carm_fill_scan_channels(struct carm_host *host, carm_fill_scan_channels() argument 717 u32 msg_data = (u32) (carm_ref_msg_dma(host, idx) + carm_fill_scan_channels() 733 static unsigned int carm_fill_get_fw_ver(struct carm_host *host, carm_fill_get_fw_ver() argument 737 u32 msg_data = (u32) (carm_ref_msg_dma(host, idx) + sizeof(*ioc)); carm_fill_get_fw_ver() 749 static inline void carm_end_request_queued(struct carm_host *host, carm_end_request_queued() argument 758 rc = carm_put_request(host, crq); carm_end_request_queued() 762 static inline void carm_push_q (struct carm_host *host, struct request_queue *q) carm_push_q() argument 764 unsigned int idx = host->wait_q_prod % CARM_MAX_WAIT_Q; carm_push_q() 769 host->wait_q[idx] = q; carm_push_q() 770 host->wait_q_prod++; carm_push_q() 771 BUG_ON(host->wait_q_prod == host->wait_q_cons); /* overrun */ carm_push_q() 774 static inline struct request_queue *carm_pop_q(struct carm_host *host) carm_pop_q() argument 778 if (host->wait_q_prod == host->wait_q_cons) carm_pop_q() 781 idx = host->wait_q_cons % CARM_MAX_WAIT_Q; carm_pop_q() 782 host->wait_q_cons++; carm_pop_q() 784 return host->wait_q[idx]; carm_pop_q() 787 static inline void carm_round_robin(struct carm_host *host) carm_round_robin() argument 789 struct request_queue *q = carm_pop_q(host); carm_round_robin() 796 static inline void carm_end_rq(struct carm_host *host, struct carm_request *crq, carm_end_rq() argument 799 carm_end_request_queued(host, crq, error); carm_end_rq() 801 carm_round_robin(host); carm_end_rq() 802 else if ((host->n_msgs <= CARM_MSG_LOW_WATER) && carm_end_rq() 803 (host->hw_sg_used <= CARM_SG_LOW_WATER)) { carm_end_rq() 804 carm_round_robin(host); carm_end_rq() 810 struct carm_host *host = q->queuedata; carm_oob_rq_fn() local 828 rc = carm_send_msg(host, crq); carm_oob_rq_fn() 831 carm_push_q(host, q); carm_oob_rq_fn() 840 struct carm_host *host = port->host; carm_rq_fn() local 855 crq = carm_get_request(host); carm_rq_fn() 857 carm_push_q(host, q); carm_rq_fn() 875 carm_end_rq(host, crq, -EIO); carm_rq_fn() 880 n_elem = pci_map_sg(host->pdev, sg, n_elem, pci_dir); carm_rq_fn() 882 carm_end_rq(host, crq, -EIO); carm_rq_fn() 887 host->hw_sg_used += n_elem; carm_rq_fn() 894 msg = (struct carm_msg_rw *) carm_ref_msg(host, crq->tag); carm_rq_fn() 930 rc = carm_send_msg(host, crq); carm_rq_fn() 932 carm_put_request(host, crq); carm_rq_fn() 934 carm_push_q(host, q); carm_rq_fn() 941 static void carm_handle_array_info(struct carm_host *host, carm_handle_array_info() argument 954 carm_end_rq(host, crq, error); carm_handle_array_info() 961 cur_port = host->cur_scan_dev; carm_handle_array_info() 970 port = &host->port[cur_port]; carm_handle_array_info() 980 host->dev_active |= (1 << cur_port); carm_handle_array_info() 991 pci_name(host->pdev), port->port_no, carm_handle_array_info() 994 pci_name(host->pdev), port->port_no, port->name); carm_handle_array_info() 997 assert(host->state == HST_DEV_SCAN); carm_handle_array_info() 998 schedule_work(&host->fsm_task); carm_handle_array_info() 1001 static void carm_handle_scan_chan(struct carm_host *host, carm_handle_scan_chan() argument 1011 carm_end_rq(host, crq, error); carm_handle_scan_chan() 1021 host->dev_present |= (1 << i); carm_handle_scan_chan() 1026 pci_name(host->pdev), dev_count); carm_handle_scan_chan() 1029 assert(host->state == HST_PORT_SCAN); carm_handle_scan_chan() 1030 host->state = new_state; carm_handle_scan_chan() 1031 schedule_work(&host->fsm_task); carm_handle_scan_chan() 1034 static void carm_handle_generic(struct carm_host *host, carm_handle_generic() argument 1040 carm_end_rq(host, crq, error); carm_handle_generic() 1042 assert(host->state == cur_state); carm_handle_generic() 1044 host->state = HST_ERROR; carm_handle_generic() 1046 host->state = next_state; carm_handle_generic() 1047 schedule_work(&host->fsm_task); carm_handle_generic() 1050 static inline void carm_handle_rw(struct carm_host *host, carm_handle_rw() argument 1062 pci_unmap_sg(host->pdev, &crq->sg[0], crq->n_elem, pci_dir); carm_handle_rw() 1064 carm_end_rq(host, crq, error); carm_handle_rw() 1067 static inline void carm_handle_resp(struct carm_host *host, carm_handle_resp() argument 1080 pci_name(host->pdev), handle); carm_handle_resp() 1087 crq = &host->req[msg_idx]; carm_handle_resp() 1092 carm_handle_rw(host, crq, error); carm_handle_resp() 1096 mem = carm_ref_msg(host, msg_idx); carm_handle_resp() 1102 carm_handle_scan_chan(host, crq, mem, error); carm_handle_resp() 1114 carm_handle_generic(host, crq, error, carm_handle_resp() 1118 carm_handle_generic(host, crq, error, carm_handle_resp() 1125 host->fw_ver = le32_to_cpu(ver->version); carm_handle_resp() 1126 host->flags |= (ver->features & FL_FW_VER_MASK); carm_handle_resp() 1128 carm_handle_generic(host, crq, error, carm_handle_resp() 1142 carm_handle_array_info(host, crq, mem, error); carm_handle_resp() 1160 pci_name(host->pdev), crq->msg_type, crq->msg_subtype); carm_handle_resp() 1161 carm_end_rq(host, crq, -EIO); carm_handle_resp() 1164 static inline void carm_handle_responses(struct carm_host *host) carm_handle_responses() argument 1166 void __iomem *mmio = host->mmio; carm_handle_responses() 1167 struct carm_response *resp = (struct carm_response *) host->shm; carm_handle_responses() 1169 unsigned int idx = host->resp_idx % RMSG_Q_LEN; carm_handle_responses() 1183 carm_handle_resp(host, resp[idx].ret_handle, status); carm_handle_responses() 1192 pci_name(host->pdev), (int) evt_type); carm_handle_responses() 1201 host->resp_idx += work; carm_handle_responses() 1206 struct carm_host *host = __host; carm_interrupt() local 1212 if (!host) { carm_interrupt() 1213 VPRINTK("no host\n"); carm_interrupt() 1217 spin_lock_irqsave(&host->lock, flags); carm_interrupt() 1219 mmio = host->mmio; carm_interrupt() 1232 if (unlikely(host->state == HST_INVALID)) { carm_interrupt() 1239 carm_handle_responses(host); carm_interrupt() 1243 spin_unlock_irqrestore(&host->lock, flags); carm_interrupt() 1250 struct carm_host *host = carm_fsm_task() local 1258 spin_lock_irqsave(&host->lock, flags); carm_fsm_task() 1259 state = host->state; carm_fsm_task() 1260 spin_unlock_irqrestore(&host->lock, flags); carm_fsm_task() 1271 rc = carm_send_special(host, carm_fill_alloc_buf); carm_fsm_task() 1279 rc = carm_send_special(host, carm_fill_sync_time); carm_fsm_task() 1287 rc = carm_send_special(host, carm_fill_get_fw_ver); carm_fsm_task() 1295 rc = carm_send_special(host, carm_fill_scan_channels); carm_fsm_task() 1303 host->cur_scan_dev = -1; carm_fsm_task() 1310 for (i = host->cur_scan_dev + 1; i < CARM_MAX_PORTS; i++) carm_fsm_task() 1311 if (host->dev_present & (1 << i)) { carm_fsm_task() 1317 host->cur_scan_dev = next_dev; carm_fsm_task() 1318 rc = carm_array_info(host, next_dev); carm_fsm_task() 1332 if (host->dev_active & (1 << i)) { carm_fsm_task() 1333 struct carm_port *port = &host->port[i]; carm_fsm_task() 1342 pci_name(host->pdev), activated); carm_fsm_task() 1350 complete(&host->probe_comp); carm_fsm_task() 1365 spin_lock_irqsave(&host->lock, flags); carm_fsm_task() 1366 host->state = new_state; carm_fsm_task() 1367 spin_unlock_irqrestore(&host->lock, flags); carm_fsm_task() 1370 schedule_work(&host->fsm_task); carm_fsm_task() 1397 static void carm_init_responses(struct carm_host *host) carm_init_responses() argument 1399 void __iomem *mmio = host->mmio; carm_init_responses() 1401 struct carm_response *resp = (struct carm_response *) host->shm; carm_init_responses() 1409 static int carm_init_host(struct carm_host *host) carm_init_host() argument 1411 void __iomem *mmio = host->mmio; carm_init_host() 1460 writel(host->shm_dma & 0xffffffff, mmio + RBUF_ADDR_LO); carm_init_host() 1461 writel((host->shm_dma >> 16) >> 16, mmio + RBUF_ADDR_HI); carm_init_host() 1478 carm_init_responses(host); carm_init_host() 1481 spin_lock_irq(&host->lock); carm_init_host() 1482 assert(host->state == HST_INVALID); carm_init_host() 1483 host->state = HST_PROBE_START; carm_init_host() 1484 spin_unlock_irq(&host->lock); carm_init_host() 1485 schedule_work(&host->fsm_task); carm_init_host() 1491 static int carm_init_disks(struct carm_host *host) carm_init_disks() argument 1501 port = &host->port[i]; carm_init_disks() 1502 port->host = host; carm_init_disks() 1513 (unsigned int) (host->id * CARM_MAX_PORTS) + i); carm_init_disks() 1514 disk->major = host->major; carm_init_disks() 1519 q = blk_init_queue(carm_rq_fn, &host->lock); carm_init_disks() 1534 static void carm_free_disks(struct carm_host *host) carm_free_disks() argument 1539 struct gendisk *disk = host->port[i].disk; carm_free_disks() 1552 static int carm_init_shm(struct carm_host *host) carm_init_shm() argument 1554 host->shm = pci_alloc_consistent(host->pdev, CARM_SHM_SIZE, carm_init_shm() 1555 &host->shm_dma); carm_init_shm() 1556 if (!host->shm) carm_init_shm() 1559 host->msg_base = host->shm + RBUF_LEN; carm_init_shm() 1560 host->msg_dma = host->shm_dma + RBUF_LEN; carm_init_shm() 1562 memset(host->shm, 0xff, RBUF_LEN); carm_init_shm() 1563 memset(host->msg_base, 0, PDC_SHM_SIZE - RBUF_LEN); carm_init_shm() 1570 struct carm_host *host; carm_init_one() local 1609 host = kzalloc(sizeof(*host), GFP_KERNEL); carm_init_one() 1610 if (!host) { carm_init_one() 1617 host->pdev = pdev; carm_init_one() 1618 host->flags = pci_dac ? FL_DAC : 0; carm_init_one() 1619 spin_lock_init(&host->lock); carm_init_one() 1620 INIT_WORK(&host->fsm_task, carm_fsm_task); carm_init_one() 1621 init_completion(&host->probe_comp); carm_init_one() 1623 for (i = 0; i < ARRAY_SIZE(host->req); i++) carm_init_one() 1624 host->req[i].tag = i; carm_init_one() 1626 host->mmio = ioremap(pci_resource_start(pdev, 0), carm_init_one() 1628 if (!host->mmio) { carm_init_one() 1635 rc = carm_init_shm(host); carm_init_one() 1642 q = blk_init_queue(carm_oob_rq_fn, &host->lock); carm_init_one() 1649 host->oob_q = q; carm_init_one() 1650 q->queuedata = host; carm_init_one() 1656 host->major = 160; carm_init_one() 1658 host->major = 161; carm_init_one() 1660 host->flags |= FL_DYN_MAJOR; carm_init_one() 1662 host->id = carm_host_id; carm_init_one() 1663 sprintf(host->name, DRV_NAME "%d", carm_host_id); carm_init_one() 1665 rc = register_blkdev(host->major, host->name); carm_init_one() 1668 if (host->flags & FL_DYN_MAJOR) carm_init_one() 1669 host->major = rc; carm_init_one() 1671 rc = carm_init_disks(host); carm_init_one() 1677 rc = request_irq(pdev->irq, carm_interrupt, IRQF_SHARED, DRV_NAME, host); carm_init_one() 1684 rc = carm_init_host(host); carm_init_one() 1689 wait_for_completion(&host->probe_comp); carm_init_one() 1692 host->name, pci_name(pdev), (int) CARM_MAX_PORTS, carm_init_one() 1694 pdev->irq, host->major); carm_init_one() 1697 pci_set_drvdata(pdev, host); carm_init_one() 1701 free_irq(pdev->irq, host); carm_init_one() 1703 carm_free_disks(host); carm_init_one() 1704 unregister_blkdev(host->major, host->name); carm_init_one() 1706 if (host->major == 160) carm_init_one() 1708 else if (host->major == 161) carm_init_one() 1710 blk_cleanup_queue(host->oob_q); carm_init_one() 1712 pci_free_consistent(pdev, CARM_SHM_SIZE, host->shm, host->shm_dma); carm_init_one() 1714 iounmap(host->mmio); carm_init_one() 1716 kfree(host); carm_init_one() 1726 struct carm_host *host = pci_get_drvdata(pdev); carm_remove_one() local 1728 if (!host) { carm_remove_one() 1729 printk(KERN_ERR PFX "BUG: no host data for PCI(%s)\n", carm_remove_one() 1734 free_irq(pdev->irq, host); carm_remove_one() 1735 carm_free_disks(host); carm_remove_one() 1736 unregister_blkdev(host->major, host->name); carm_remove_one() 1737 if (host->major == 160) carm_remove_one() 1739 else if (host->major == 161) carm_remove_one() 1741 blk_cleanup_queue(host->oob_q); carm_remove_one() 1742 pci_free_consistent(pdev, CARM_SHM_SIZE, host->shm, host->shm_dma); carm_remove_one() 1743 iounmap(host->mmio); carm_remove_one() 1744 kfree(host); carm_remove_one()
|
/linux-4.1.27/sound/soc/spear/ |
H A D | spdif_out.c | 44 static void spdif_out_configure(struct spdif_out_dev *host) spdif_out_configure() argument 46 writel(SPDIF_OUT_RESET, host->io_base + SPDIF_OUT_SOFT_RST); spdif_out_configure() 48 writel(readl(host->io_base + SPDIF_OUT_SOFT_RST) & ~SPDIF_OUT_RESET, spdif_out_configure() 49 host->io_base + SPDIF_OUT_SOFT_RST); spdif_out_configure() 54 host->io_base + SPDIF_OUT_CFG); spdif_out_configure() 56 writel(0x7F, host->io_base + SPDIF_OUT_INT_STA_CLR); spdif_out_configure() 57 writel(0x7F, host->io_base + SPDIF_OUT_INT_EN_CLR); spdif_out_configure() 63 struct spdif_out_dev *host = snd_soc_dai_get_drvdata(cpu_dai); spdif_out_startup() local 69 ret = clk_enable(host->clk); spdif_out_startup() 73 host->running = true; spdif_out_startup() 74 spdif_out_configure(host); spdif_out_startup() 82 struct spdif_out_dev *host = snd_soc_dai_get_drvdata(dai); spdif_out_shutdown() local 87 clk_disable(host->clk); spdif_out_shutdown() 88 host->running = false; spdif_out_shutdown() 91 static void spdif_out_clock(struct spdif_out_dev *host, u32 core_freq, spdif_out_clock() argument 96 clk_set_rate(host->clk, core_freq); spdif_out_clock() 97 divider = DIV_ROUND_CLOSEST(clk_get_rate(host->clk), (rate * 128)); spdif_out_clock() 99 ctrl = readl(host->io_base + SPDIF_OUT_CTRL); spdif_out_clock() 102 writel(ctrl, host->io_base + SPDIF_OUT_CTRL); spdif_out_clock() 109 struct spdif_out_dev *host = snd_soc_dai_get_drvdata(dai); spdif_out_hw_params() local 144 spdif_out_clock(host, core_freq, rate); spdif_out_hw_params() 145 host->saved_params.core_freq = core_freq; spdif_out_hw_params() 146 host->saved_params.rate = rate; spdif_out_hw_params() 154 struct spdif_out_dev *host = snd_soc_dai_get_drvdata(dai); spdif_out_trigger() local 165 ctrl = readl(host->io_base + SPDIF_OUT_CTRL); spdif_out_trigger() 167 if (!host->saved_params.mute) spdif_out_trigger() 172 writel(ctrl, host->io_base + SPDIF_OUT_CTRL); spdif_out_trigger() 178 ctrl = readl(host->io_base + SPDIF_OUT_CTRL); spdif_out_trigger() 181 writel(ctrl, host->io_base + SPDIF_OUT_CTRL); spdif_out_trigger() 193 struct spdif_out_dev *host = snd_soc_dai_get_drvdata(dai); spdif_digital_mute() local 196 host->saved_params.mute = mute; spdif_digital_mute() 197 val = readl(host->io_base + SPDIF_OUT_CTRL); spdif_digital_mute() 203 if (host->running) spdif_digital_mute() 209 writel(val, host->io_base + SPDIF_OUT_CTRL); spdif_digital_mute() 217 struct spdif_out_dev *host = snd_soc_dai_get_drvdata(cpu_dai); spdif_mute_get() local 219 ucontrol->value.integer.value[0] = host->saved_params.mute; spdif_mute_get() 227 struct spdif_out_dev *host = snd_soc_dai_get_drvdata(cpu_dai); spdif_mute_put() local 229 if (host->saved_params.mute == ucontrol->value.integer.value[0]) spdif_mute_put() 243 struct spdif_out_dev *host = snd_soc_dai_get_drvdata(dai); spdif_soc_dai_probe() local 245 host->dma_params_tx.filter_data = &host->dma_params; spdif_soc_dai_probe() 246 dai->playback_dma_data = &host->dma_params_tx; spdif_soc_dai_probe() 279 struct spdif_out_dev *host; spdif_out_probe() local 284 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); spdif_out_probe() 285 if (!host) { spdif_out_probe() 291 host->io_base = devm_ioremap_resource(&pdev->dev, res); spdif_out_probe() 292 if (IS_ERR(host->io_base)) spdif_out_probe() 293 return PTR_ERR(host->io_base); spdif_out_probe() 295 host->clk = devm_clk_get(&pdev->dev, NULL); spdif_out_probe() 296 if (IS_ERR(host->clk)) spdif_out_probe() 297 return PTR_ERR(host->clk); spdif_out_probe() 301 host->dma_params.data = pdata->dma_params; spdif_out_probe() 302 host->dma_params.addr = res->start + SPDIF_OUT_FIFO_DATA; spdif_out_probe() 303 host->dma_params.max_burst = 16; spdif_out_probe() 304 host->dma_params.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; spdif_out_probe() 306 dev_set_drvdata(&pdev->dev, host); spdif_out_probe() 313 return devm_spear_pcm_platform_register(&pdev->dev, &host->config, spdif_out_probe() 321 struct spdif_out_dev *host = dev_get_drvdata(&pdev->dev); spdif_out_suspend() local 323 if (host->running) spdif_out_suspend() 324 clk_disable(host->clk); spdif_out_suspend() 332 struct spdif_out_dev *host = dev_get_drvdata(&pdev->dev); spdif_out_resume() local 334 if (host->running) { spdif_out_resume() 335 clk_enable(host->clk); spdif_out_resume() 336 spdif_out_configure(host); spdif_out_resume() 337 spdif_out_clock(host, host->saved_params.core_freq, spdif_out_resume() 338 host->saved_params.rate); spdif_out_resume()
|
H A D | spdif_in.c | 46 static void spdif_in_configure(struct spdif_in_dev *host) spdif_in_configure() argument 52 writel(ctrl, host->io_base + SPDIF_IN_CTRL); spdif_in_configure() 53 writel(0xF, host->io_base + SPDIF_IN_IRQ_MASK); spdif_in_configure() 58 struct spdif_in_dev *host = snd_soc_dai_get_drvdata(dai); spdif_in_dai_probe() local 60 host->dma_params_rx.filter_data = &host->dma_params; spdif_in_dai_probe() 61 dai->capture_dma_data = &host->dma_params_rx; spdif_in_dai_probe() 69 struct spdif_in_dev *host = snd_soc_dai_get_drvdata(dai); spdif_in_shutdown() local 74 writel(0x0, host->io_base + SPDIF_IN_IRQ_MASK); spdif_in_shutdown() 77 static void spdif_in_format(struct spdif_in_dev *host, u32 format) spdif_in_format() argument 79 u32 ctrl = readl(host->io_base + SPDIF_IN_CTRL); spdif_in_format() 91 writel(ctrl, host->io_base + SPDIF_IN_CTRL); spdif_in_format() 98 struct spdif_in_dev *host = snd_soc_dai_get_drvdata(dai); spdif_in_hw_params() local 105 host->saved_params.format = format; spdif_in_hw_params() 113 struct spdif_in_dev *host = snd_soc_dai_get_drvdata(dai); spdif_in_trigger() local 124 clk_enable(host->clk); spdif_in_trigger() 125 spdif_in_configure(host); spdif_in_trigger() 126 spdif_in_format(host, host->saved_params.format); spdif_in_trigger() 128 ctrl = readl(host->io_base + SPDIF_IN_CTRL); spdif_in_trigger() 130 writel(ctrl, host->io_base + SPDIF_IN_CTRL); spdif_in_trigger() 131 writel(0xF, host->io_base + SPDIF_IN_IRQ_MASK); spdif_in_trigger() 137 ctrl = readl(host->io_base + SPDIF_IN_CTRL); spdif_in_trigger() 139 writel(ctrl, host->io_base + SPDIF_IN_CTRL); spdif_in_trigger() 140 writel(0x0, host->io_base + SPDIF_IN_IRQ_MASK); spdif_in_trigger() 142 if (host->reset_perip) spdif_in_trigger() 143 host->reset_perip(); spdif_in_trigger() 144 clk_disable(host->clk); spdif_in_trigger() 180 struct spdif_in_dev *host = (struct spdif_in_dev *)arg; spdif_in_irq() local 182 u32 irq_status = readl(host->io_base + SPDIF_IN_IRQ); spdif_in_irq() 188 dev_err(host->dev, "spdif in: fifo write error"); spdif_in_irq() 190 dev_err(host->dev, "spdif in: empty fifo read error"); spdif_in_irq() 192 dev_err(host->dev, "spdif in: fifo full error"); spdif_in_irq() 194 dev_err(host->dev, "spdif in: out of range error"); spdif_in_irq() 196 writel(0, host->io_base + SPDIF_IN_IRQ); spdif_in_irq() 203 struct spdif_in_dev *host; spdif_in_probe() local 222 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); spdif_in_probe() 223 if (!host) { spdif_in_probe() 228 host->io_base = devm_ioremap(&pdev->dev, res->start, spdif_in_probe() 230 if (!host->io_base) { spdif_in_probe() 235 host->irq = platform_get_irq(pdev, 0); spdif_in_probe() 236 if (host->irq < 0) spdif_in_probe() 239 host->clk = devm_clk_get(&pdev->dev, NULL); spdif_in_probe() 240 if (IS_ERR(host->clk)) spdif_in_probe() 241 return PTR_ERR(host->clk); spdif_in_probe() 248 host->dma_params.data = pdata->dma_params; spdif_in_probe() 249 host->dma_params.addr = res_fifo->start; spdif_in_probe() 250 host->dma_params.max_burst = 16; spdif_in_probe() 251 host->dma_params.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; spdif_in_probe() 252 host->reset_perip = pdata->reset_perip; spdif_in_probe() 254 host->dev = &pdev->dev; spdif_in_probe() 255 dev_set_drvdata(&pdev->dev, host); spdif_in_probe() 257 ret = devm_request_irq(&pdev->dev, host->irq, spdif_in_irq, 0, spdif_in_probe() 258 "spdif-in", host); spdif_in_probe() 269 return devm_spear_pcm_platform_register(&pdev->dev, &host->config, spdif_in_probe()
|
/linux-4.1.27/drivers/usb/usbip/ |
H A D | Makefile | 9 obj-$(CONFIG_USBIP_HOST) += usbip-host.o 10 usbip-host-y := stub_dev.o stub_main.o stub_rx.o stub_tx.o
|
/linux-4.1.27/include/linux/mmc/ |
H A D | slot-gpio.h | 16 int mmc_gpio_get_ro(struct mmc_host *host); 17 int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio); 19 int mmc_gpio_get_cd(struct mmc_host *host); 20 int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio, 23 int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id, 26 int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id, 29 void mmc_gpio_set_cd_isr(struct mmc_host *host, 31 void mmc_gpiod_request_cd_irq(struct mmc_host *host);
|
H A D | host.h | 2 * linux/include/linux/mmc/host.h 83 * It is optional for the host to implement pre_req and post_req in 90 void (*post_req)(struct mmc_host *host, struct mmc_request *req, 92 void (*pre_req)(struct mmc_host *host, struct mmc_request *req, 94 void (*request)(struct mmc_host *host, struct mmc_request *req); 115 void (*set_ios)(struct mmc_host *host, struct mmc_ios *ios); 116 int (*get_ro)(struct mmc_host *host); 117 int (*get_cd)(struct mmc_host *host); 119 void (*enable_sdio_irq)(struct mmc_host *host, int enable); 122 void (*init_card)(struct mmc_host *host, struct mmc_card *card); 124 int (*start_signal_voltage_switch)(struct mmc_host *host, struct mmc_ios *ios); 127 int (*card_busy)(struct mmc_host *host); 130 int (*execute_tuning)(struct mmc_host *host, u32 opcode); 132 /* Prepare HS400 target operating frequency depending host driver */ 133 int (*prepare_hs400_tuning)(struct mmc_host *host, struct mmc_ios *ios); 135 void (*hw_reset)(struct mmc_host *host); 136 void (*card_event)(struct mmc_host *host); 165 * Some MMC/SD host controllers implement slot-functions like card and 237 #define MMC_CAP_4_BIT_DATA (1 << 0) /* Can the host do 4 bit transfers */ 243 #define MMC_CAP_8_BIT_DATA (1 << 6) /* Can the host do 8 bit transfers */ 266 u32 caps2; /* More host capabilities */ 303 /* host specific block data */ 319 unsigned int claimed:1; /* host exclusively claimed */ 322 unsigned int removed:1; /* host is being removed */ 330 struct mmc_card *card; /* device attached to this host */ 333 struct task_struct *claimer; /* task that has host claimed */ 380 int mmc_of_parse(struct mmc_host *host); 382 static inline void *mmc_priv(struct mmc_host *host) mmc_priv() argument 384 return (void *)host->private; mmc_priv() 387 #define mmc_host_is_spi(host) ((host)->caps & MMC_CAP_SPI) 393 int mmc_power_save_host(struct mmc_host *host); 394 int mmc_power_restore_host(struct mmc_host *host); 399 static inline void mmc_signal_sdio_irq(struct mmc_host *host) mmc_signal_sdio_irq() argument 401 host->ops->enable_sdio_irq(host, 0); mmc_signal_sdio_irq() 402 host->sdio_irq_pending = true; mmc_signal_sdio_irq() 403 wake_up_process(host->sdio_irq_thread); mmc_signal_sdio_irq() 406 void sdio_run_irqs(struct mmc_host *host); 431 static inline int mmc_card_is_removable(struct mmc_host *host) mmc_card_is_removable() argument 433 return !(host->caps & MMC_CAP_NONREMOVABLE); mmc_card_is_removable() 436 static inline int mmc_card_keep_power(struct mmc_host *host) mmc_card_keep_power() argument 438 return host->pm_flags & MMC_PM_KEEP_POWER; mmc_card_keep_power() 441 static inline int mmc_card_wake_sdio_irq(struct mmc_host *host) mmc_card_wake_sdio_irq() argument 443 return host->pm_flags & MMC_PM_WAKE_SDIO_IRQ; mmc_card_wake_sdio_irq() 446 static inline int mmc_host_cmd23(struct mmc_host *host) mmc_host_cmd23() argument 448 return host->caps & MMC_CAP_CMD23; mmc_host_cmd23() 451 static inline int mmc_boot_partition_access(struct mmc_host *host) mmc_boot_partition_access() argument 453 return !(host->caps2 & MMC_CAP2_BOOTPART_NOACC); mmc_boot_partition_access() 456 static inline int mmc_host_uhs(struct mmc_host *host) mmc_host_uhs() argument 458 return host->caps & mmc_host_uhs() 464 static inline int mmc_host_packed_wr(struct mmc_host *host) mmc_host_packed_wr() argument 466 return host->caps2 & MMC_CAP2_PACKED_WR; mmc_host_packed_wr() 470 void mmc_host_clk_hold(struct mmc_host *host); 471 void mmc_host_clk_release(struct mmc_host *host); 472 unsigned int mmc_host_clk_rate(struct mmc_host *host); 475 static inline void mmc_host_clk_hold(struct mmc_host *host) mmc_host_clk_hold() argument 479 static inline void mmc_host_clk_release(struct mmc_host *host) mmc_host_clk_release() argument 483 static inline unsigned int mmc_host_clk_rate(struct mmc_host *host) mmc_host_clk_rate() argument 485 return host->ios.clock; mmc_host_clk_rate() 491 return card->host->ios.timing == MMC_TIMING_SD_HS || mmc_card_hs() 492 card->host->ios.timing == MMC_TIMING_MMC_HS; mmc_card_hs() 497 return card->host->ios.timing >= MMC_TIMING_UHS_SDR12 && mmc_card_uhs() 498 card->host->ios.timing <= MMC_TIMING_UHS_DDR50; mmc_card_uhs() 503 return card->host->ios.timing == MMC_TIMING_MMC_HS200; mmc_card_hs200() 508 return card->host->ios.timing == MMC_TIMING_MMC_DDR52; mmc_card_ddr52() 513 return card->host->ios.timing == MMC_TIMING_MMC_HS400; mmc_card_hs400()
|
H A D | pm.h | 18 * the host system is being suspended. There are several layers of 19 * abstractions involved, from the host controller driver, to the MMC core 28 #define MMC_PM_WAKE_SDIO_IRQ (1 << 1) /* wake up host system on SDIO IRQ assertion */
|
/linux-4.1.27/drivers/scsi/isci/ |
H A D | Makefile | 4 host.o task.o probe_roms.o \
|
/linux-4.1.27/arch/powerpc/sysdev/ |
H A D | dcr.c | 50 bool dcr_map_ok_generic(dcr_host_t host) dcr_map_ok_generic() argument 52 if (host.type == DCR_HOST_NATIVE) dcr_map_ok_generic() 53 return dcr_map_ok_native(host.host.native); dcr_map_ok_generic() 54 else if (host.type == DCR_HOST_MMIO) dcr_map_ok_generic() 55 return dcr_map_ok_mmio(host.host.mmio); dcr_map_ok_generic() 65 dcr_host_t host; dcr_map_generic() local 69 host.type = DCR_HOST_INVALID; dcr_map_generic() 73 return host; dcr_map_generic() 80 host.type = DCR_HOST_NATIVE; dcr_map_generic() 81 host.host.native = dcr_map_native(dev, dcr_n, dcr_c); dcr_map_generic() 83 host.type = DCR_HOST_MMIO; dcr_map_generic() 84 host.host.mmio = dcr_map_mmio(dev, dcr_n, dcr_c); dcr_map_generic() 88 return host; dcr_map_generic() 92 void dcr_unmap_generic(dcr_host_t host, unsigned int dcr_c) dcr_unmap_generic() argument 94 if (host.type == DCR_HOST_NATIVE) dcr_unmap_generic() 95 dcr_unmap_native(host.host.native, dcr_c); dcr_unmap_generic() 96 else if (host.type == DCR_HOST_MMIO) dcr_unmap_generic() 97 dcr_unmap_mmio(host.host.mmio, dcr_c); dcr_unmap_generic() 98 else /* host.type == DCR_HOST_INVALID */ dcr_unmap_generic() 103 u32 dcr_read_generic(dcr_host_t host, unsigned int dcr_n) dcr_read_generic() argument 105 if (host.type == DCR_HOST_NATIVE) dcr_read_generic() 106 return dcr_read_native(host.host.native, dcr_n); dcr_read_generic() 107 else if (host.type == DCR_HOST_MMIO) dcr_read_generic() 108 return dcr_read_mmio(host.host.mmio, dcr_n); dcr_read_generic() 109 else /* host.type == DCR_HOST_INVALID */ dcr_read_generic() 115 void dcr_write_generic(dcr_host_t host, unsigned int dcr_n, u32 value) dcr_write_generic() argument 117 if (host.type == DCR_HOST_NATIVE) dcr_write_generic() 118 dcr_write_native(host.host.native, dcr_n, value); dcr_write_generic() 119 else if (host.type == DCR_HOST_MMIO) dcr_write_generic() 120 dcr_write_mmio(host.host.mmio, dcr_n, value); dcr_write_generic() 121 else /* host.type == DCR_HOST_INVALID */ dcr_write_generic() 217 void dcr_unmap_mmio(dcr_host_mmio_t host, unsigned int dcr_c) dcr_unmap_mmio() argument 219 dcr_host_mmio_t h = host; dcr_unmap_mmio() 223 h.token += host.base * h.stride; dcr_unmap_mmio()
|
/linux-4.1.27/drivers/i2c/busses/ |
H A D | i2c-cbus-gpio.c | 40 spinlock_t lock; /* host lock */ 49 * @host: the host we're using 52 static void cbus_send_bit(struct cbus_host *host, unsigned bit) cbus_send_bit() argument 54 gpio_set_value(host->dat_gpio, bit ? 1 : 0); cbus_send_bit() 55 gpio_set_value(host->clk_gpio, 1); cbus_send_bit() 56 gpio_set_value(host->clk_gpio, 0); cbus_send_bit() 61 * @host: the host we're using 65 static void cbus_send_data(struct cbus_host *host, unsigned data, unsigned len) cbus_send_data() argument 70 cbus_send_bit(host, data & (1 << (i - 1))); cbus_send_data() 75 * @host: the host we're using 77 static int cbus_receive_bit(struct cbus_host *host) cbus_receive_bit() argument 81 gpio_set_value(host->clk_gpio, 1); cbus_receive_bit() 82 ret = gpio_get_value(host->dat_gpio); cbus_receive_bit() 83 gpio_set_value(host->clk_gpio, 0); cbus_receive_bit() 89 * @host: the host we're using 91 static int cbus_receive_word(struct cbus_host *host) cbus_receive_word() argument 97 int bit = cbus_receive_bit(host); cbus_receive_word() 110 * @host: the host we're using 116 static int cbus_transfer(struct cbus_host *host, char rw, unsigned dev, cbus_transfer() argument 123 spin_lock_irqsave(&host->lock, flags); cbus_transfer() 126 gpio_set_value(host->sel_gpio, 0); cbus_transfer() 129 gpio_direction_output(host->dat_gpio, 1); cbus_transfer() 132 cbus_send_data(host, dev, CBUS_ADDR_BITS); cbus_transfer() 135 cbus_send_bit(host, rw == I2C_SMBUS_READ); cbus_transfer() 138 cbus_send_data(host, reg, CBUS_REG_BITS); cbus_transfer() 141 cbus_send_data(host, data, 16); cbus_transfer() 144 ret = gpio_direction_input(host->dat_gpio); cbus_transfer() 146 dev_dbg(host->dev, "failed setting direction\n"); cbus_transfer() 149 gpio_set_value(host->clk_gpio, 1); cbus_transfer() 151 ret = cbus_receive_word(host); cbus_transfer() 153 dev_dbg(host->dev, "failed receiving data\n"); cbus_transfer() 159 gpio_set_value(host->sel_gpio, 1); cbus_transfer() 160 gpio_set_value(host->clk_gpio, 1); cbus_transfer() 161 gpio_set_value(host->clk_gpio, 0); cbus_transfer() 164 spin_unlock_irqrestore(&host->lock, flags); cbus_transfer()
|
/linux-4.1.27/drivers/video/fbdev/ |
H A D | mxsfb.c | 187 #define mxsfb_is_v3(host) (host->devdata->ipversion == 3) 188 #define mxsfb_is_v4(host) (host->devdata->ipversion == 4) 214 static inline u32 set_hsync_pulse_width(struct mxsfb_info *host, unsigned val) set_hsync_pulse_width() argument 216 return (val & host->devdata->hs_wdth_mask) << set_hsync_pulse_width() 217 host->devdata->hs_wdth_shift; set_hsync_pulse_width() 220 static inline u32 get_hsync_pulse_width(struct mxsfb_info *host, unsigned val) get_hsync_pulse_width() argument 222 return (val >> host->devdata->hs_wdth_shift) & get_hsync_pulse_width() 223 host->devdata->hs_wdth_mask; get_hsync_pulse_width() 272 struct mxsfb_info *host = to_imxfb_host(fb_info); mxsfb_check_var() local 290 switch (host->ld_intf_width) { mxsfb_check_var() 319 static inline void mxsfb_enable_axi_clk(struct mxsfb_info *host) mxsfb_enable_axi_clk() argument 321 if (host->clk_axi) mxsfb_enable_axi_clk() 322 clk_prepare_enable(host->clk_axi); mxsfb_enable_axi_clk() 325 static inline void mxsfb_disable_axi_clk(struct mxsfb_info *host) mxsfb_disable_axi_clk() argument 327 if (host->clk_axi) mxsfb_disable_axi_clk() 328 clk_disable_unprepare(host->clk_axi); mxsfb_disable_axi_clk() 333 struct mxsfb_info *host = to_imxfb_host(fb_info); mxsfb_enable_controller() local 337 dev_dbg(&host->pdev->dev, "%s\n", __func__); mxsfb_enable_controller() 339 if (host->reg_lcd) { mxsfb_enable_controller() 340 ret = regulator_enable(host->reg_lcd); mxsfb_enable_controller() 342 dev_err(&host->pdev->dev, mxsfb_enable_controller() 348 if (host->clk_disp_axi) mxsfb_enable_controller() 349 clk_prepare_enable(host->clk_disp_axi); mxsfb_enable_controller() 350 clk_prepare_enable(host->clk); mxsfb_enable_controller() 351 clk_set_rate(host->clk, PICOS2KHZ(fb_info->var.pixclock) * 1000U); mxsfb_enable_controller() 353 mxsfb_enable_axi_clk(host); mxsfb_enable_controller() 356 writel(CTRL_DOTCLK_MODE, host->base + LCDC_CTRL + REG_SET); mxsfb_enable_controller() 359 reg = readl(host->base + LCDC_VDCTRL4); mxsfb_enable_controller() 361 writel(reg, host->base + LCDC_VDCTRL4); mxsfb_enable_controller() 363 writel(CTRL_RUN, host->base + LCDC_CTRL + REG_SET); mxsfb_enable_controller() 365 host->enabled = 1; mxsfb_enable_controller() 370 struct mxsfb_info *host = to_imxfb_host(fb_info); mxsfb_disable_controller() local 375 dev_dbg(&host->pdev->dev, "%s\n", __func__); mxsfb_disable_controller() 381 writel(CTRL_DOTCLK_MODE, host->base + LCDC_CTRL + REG_CLR); mxsfb_disable_controller() 385 reg = readl(host->base + LCDC_CTRL); mxsfb_disable_controller() 391 reg = readl(host->base + LCDC_VDCTRL4); mxsfb_disable_controller() 392 writel(reg & ~VDCTRL4_SYNC_SIGNALS_ON, host->base + LCDC_VDCTRL4); mxsfb_disable_controller() 394 mxsfb_disable_axi_clk(host); mxsfb_disable_controller() 396 clk_disable_unprepare(host->clk); mxsfb_disable_controller() 397 if (host->clk_disp_axi) mxsfb_disable_controller() 398 clk_disable_unprepare(host->clk_disp_axi); mxsfb_disable_controller() 400 host->enabled = 0; mxsfb_disable_controller() 402 if (host->reg_lcd) { mxsfb_disable_controller() 403 ret = regulator_disable(host->reg_lcd); mxsfb_disable_controller() 405 dev_err(&host->pdev->dev, mxsfb_disable_controller() 412 struct mxsfb_info *host = to_imxfb_host(fb_info); mxsfb_set_par() local 430 if (host->enabled) { mxsfb_set_par() 435 mxsfb_enable_axi_clk(host); mxsfb_set_par() 438 writel(CTRL1_FIFO_CLEAR, host->base + LCDC_CTRL1 + REG_SET); mxsfb_set_par() 441 CTRL_SET_BUS_WIDTH(host->ld_intf_width); mxsfb_set_par() 445 dev_dbg(&host->pdev->dev, "Setting up RGB565 mode\n"); mxsfb_set_par() 447 writel(CTRL1_SET_BYTE_PACKAGING(0xf), host->base + LCDC_CTRL1); mxsfb_set_par() 450 dev_dbg(&host->pdev->dev, "Setting up RGB888/666 mode\n"); mxsfb_set_par() 452 switch (host->ld_intf_width) { mxsfb_set_par() 454 mxsfb_disable_axi_clk(host); mxsfb_set_par() 455 dev_err(&host->pdev->dev, mxsfb_set_par() 465 writel(CTRL1_SET_BYTE_PACKAGING(0x7), host->base + LCDC_CTRL1); mxsfb_set_par() 468 mxsfb_disable_axi_clk(host); mxsfb_set_par() 469 dev_err(&host->pdev->dev, "Unhandled color depth of %u\n", mxsfb_set_par() 474 writel(ctrl, host->base + LCDC_CTRL); mxsfb_set_par() 478 host->base + host->devdata->transfer_count); mxsfb_set_par() 488 if (host->sync & MXSFB_SYNC_DATA_ENABLE_HIGH_ACT) mxsfb_set_par() 490 if (host->sync & MXSFB_SYNC_DOTCLK_FALLING_ACT) mxsfb_set_par() 493 writel(vdctrl0, host->base + LCDC_VDCTRL0); mxsfb_set_par() 498 host->base + LCDC_VDCTRL1); mxsfb_set_par() 501 writel(set_hsync_pulse_width(host, fb_info->var.hsync_len) | mxsfb_set_par() 505 host->base + LCDC_VDCTRL2); mxsfb_set_par() 511 host->base + LCDC_VDCTRL3); mxsfb_set_par() 514 if (mxsfb_is_v4(host)) mxsfb_set_par() 515 vdctrl4 |= VDCTRL4_SET_DOTCLK_DLY(host->dotclk_delay); mxsfb_set_par() 516 writel(vdctrl4, host->base + LCDC_VDCTRL4); mxsfb_set_par() 520 host->base + host->devdata->next_buf); mxsfb_set_par() 522 mxsfb_disable_axi_clk(host); mxsfb_set_par() 572 struct mxsfb_info *host = to_imxfb_host(fb_info); mxsfb_blank() local 579 if (host->enabled) mxsfb_blank() 584 if (!host->enabled) mxsfb_blank() 594 struct mxsfb_info *host = to_imxfb_host(fb_info); mxsfb_pan_display() local 602 mxsfb_enable_axi_clk(host); mxsfb_pan_display() 606 host->base + host->devdata->next_buf); mxsfb_pan_display() 608 mxsfb_disable_axi_clk(host); mxsfb_pan_display() 625 static int mxsfb_restore_mode(struct mxsfb_info *host, mxsfb_restore_mode() argument 628 struct fb_info *fb_info = &host->fb_info; mxsfb_restore_mode() 635 mxsfb_enable_axi_clk(host); mxsfb_restore_mode() 638 ctrl = readl(host->base + LCDC_CTRL); mxsfb_restore_mode() 644 vdctrl0 = readl(host->base + LCDC_VDCTRL0); mxsfb_restore_mode() 645 vdctrl2 = readl(host->base + LCDC_VDCTRL2); mxsfb_restore_mode() 646 vdctrl3 = readl(host->base + LCDC_VDCTRL3); mxsfb_restore_mode() 647 vdctrl4 = readl(host->base + LCDC_VDCTRL4); mxsfb_restore_mode() 649 transfer_count = readl(host->base + host->devdata->transfer_count); mxsfb_restore_mode() 669 vmode->pixclock = KHZ2PICOS(clk_get_rate(host->clk) / 1000U); mxsfb_restore_mode() 670 vmode->hsync_len = get_hsync_pulse_width(host, vdctrl2); mxsfb_restore_mode() 675 period = readl(host->base + LCDC_VDCTRL1); mxsfb_restore_mode() 695 host->ld_intf_width = CTRL_GET_BUS_WIDTH(ctrl); mxsfb_restore_mode() 696 host->dotclk_delay = VDCTRL4_GET_DOTCLK_DLY(vdctrl4); mxsfb_restore_mode() 700 pa = readl(host->base + host->devdata->cur_buf); mxsfb_restore_mode() 713 writel(fb_info->fix.smem_start, host->base + host->devdata->next_buf); mxsfb_restore_mode() 719 clk_prepare_enable(host->clk); mxsfb_restore_mode() 720 host->enabled = 1; mxsfb_restore_mode() 724 mxsfb_disable_axi_clk(host); mxsfb_restore_mode() 729 static int mxsfb_init_fbinfo_dt(struct mxsfb_info *host, mxsfb_init_fbinfo_dt() argument 732 struct fb_info *fb_info = &host->fb_info; mxsfb_init_fbinfo_dt() 734 struct device *dev = &host->pdev->dev; mxsfb_init_fbinfo_dt() 735 struct device_node *np = host->pdev->dev.of_node; mxsfb_init_fbinfo_dt() 755 host->ld_intf_width = STMLCDIF_8BIT; mxsfb_init_fbinfo_dt() 758 host->ld_intf_width = STMLCDIF_16BIT; mxsfb_init_fbinfo_dt() 761 host->ld_intf_width = STMLCDIF_18BIT; mxsfb_init_fbinfo_dt() 764 host->ld_intf_width = STMLCDIF_24BIT; mxsfb_init_fbinfo_dt() 790 host->sync |= MXSFB_SYNC_DATA_ENABLE_HIGH_ACT; mxsfb_init_fbinfo_dt() 792 host->sync |= MXSFB_SYNC_DOTCLK_FALLING_ACT; mxsfb_init_fbinfo_dt() 799 static int mxsfb_init_fbinfo(struct mxsfb_info *host, mxsfb_init_fbinfo() argument 803 struct fb_info *fb_info = &host->fb_info; mxsfb_init_fbinfo() 817 ret = mxsfb_init_fbinfo_dt(host, vmode); mxsfb_init_fbinfo() 838 if (mxsfb_restore_mode(host, vmode)) mxsfb_init_fbinfo() 844 static void mxsfb_free_videomem(struct mxsfb_info *host) mxsfb_free_videomem() argument 846 struct fb_info *fb_info = &host->fb_info; mxsfb_free_videomem() 876 struct mxsfb_info *host; mxsfb_probe() local 895 host = to_imxfb_host(fb_info); mxsfb_probe() 898 host->base = devm_ioremap_resource(&pdev->dev, res); mxsfb_probe() 899 if (IS_ERR(host->base)) { mxsfb_probe() 900 ret = PTR_ERR(host->base); mxsfb_probe() 904 host->pdev = pdev; mxsfb_probe() 905 platform_set_drvdata(pdev, host); mxsfb_probe() 907 host->devdata = &mxsfb_devdata[pdev->id_entry->driver_data]; mxsfb_probe() 909 host->clk = devm_clk_get(&host->pdev->dev, NULL); mxsfb_probe() 910 if (IS_ERR(host->clk)) { mxsfb_probe() 911 ret = PTR_ERR(host->clk); mxsfb_probe() 915 host->clk_axi = devm_clk_get(&host->pdev->dev, "axi"); mxsfb_probe() 916 if (IS_ERR(host->clk_axi)) mxsfb_probe() 917 host->clk_axi = NULL; mxsfb_probe() 919 host->clk_disp_axi = devm_clk_get(&host->pdev->dev, "disp_axi"); mxsfb_probe() 920 if (IS_ERR(host->clk_disp_axi)) mxsfb_probe() 921 host->clk_disp_axi = NULL; mxsfb_probe() 923 host->reg_lcd = devm_regulator_get(&pdev->dev, "lcd"); mxsfb_probe() 924 if (IS_ERR(host->reg_lcd)) mxsfb_probe() 925 host->reg_lcd = NULL; mxsfb_probe() 934 ret = mxsfb_init_fbinfo(host, mode); mxsfb_probe() 951 if (!host->enabled) { mxsfb_probe() 952 mxsfb_enable_axi_clk(host); mxsfb_probe() 953 writel(0, host->base + LCDC_CTRL); mxsfb_probe() 954 mxsfb_disable_axi_clk(host); mxsfb_probe() 964 if (host->enabled) mxsfb_probe() 965 clk_disable_unprepare(host->clk); mxsfb_probe() 975 struct mxsfb_info *host = to_imxfb_host(fb_info); mxsfb_remove() local 977 if (host->enabled) mxsfb_remove() 981 mxsfb_free_videomem(host); mxsfb_remove() 991 struct mxsfb_info *host = to_imxfb_host(fb_info); mxsfb_shutdown() local 993 mxsfb_enable_axi_clk(host); mxsfb_shutdown() 999 writel(CTRL_RUN, host->base + LCDC_CTRL + REG_CLR); mxsfb_shutdown() 1001 mxsfb_disable_axi_clk(host); mxsfb_shutdown()
|
/linux-4.1.27/drivers/gpu/drm/msm/dsi/ |
H A D | dsi.h | 48 struct mipi_dsi_host *host; member in struct:msm_dsi 76 /* dsi host */ 77 int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host, 79 void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host, 81 int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host, 83 int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host, 85 void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, 87 int msm_dsi_host_enable(struct mipi_dsi_host *host); 88 int msm_dsi_host_disable(struct mipi_dsi_host *host); 89 int msm_dsi_host_power_on(struct mipi_dsi_host *host); 90 int msm_dsi_host_power_off(struct mipi_dsi_host *host); 91 int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, 93 struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host, 95 int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer); 96 void msm_dsi_host_unregister(struct mipi_dsi_host *host); 97 void msm_dsi_host_destroy(struct mipi_dsi_host *host); 98 int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
|
H A D | dsi_manager.c | 99 msm_dsi->panel = msm_dsi_host_get_panel(msm_dsi->host, dsi_mgr_connector_detect() 110 other_dsi->host, NULL); dsi_mgr_connector_detect() 282 struct mipi_dsi_host *host = msm_dsi->host; dsi_mgr_bridge_pre_enable() local 291 ret = msm_dsi_host_power_on(host); dsi_mgr_bridge_pre_enable() 293 pr_err("%s: power on host %d failed, %d\n", __func__, id, ret); dsi_mgr_bridge_pre_enable() 298 ret = msm_dsi_host_power_on(msm_dsi1->host); dsi_mgr_bridge_pre_enable() 315 ret = msm_dsi_host_enable(host); dsi_mgr_bridge_pre_enable() 317 pr_err("%s: enable host %d failed, %d\n", __func__, id, ret); dsi_mgr_bridge_pre_enable() 322 ret = msm_dsi_host_enable(msm_dsi1->host); dsi_mgr_bridge_pre_enable() 339 msm_dsi_host_disable(msm_dsi1->host); dsi_mgr_bridge_pre_enable() 341 msm_dsi_host_disable(host); dsi_mgr_bridge_pre_enable() 346 msm_dsi_host_power_off(msm_dsi1->host); dsi_mgr_bridge_pre_enable() 348 msm_dsi_host_power_off(host); dsi_mgr_bridge_pre_enable() 368 struct mipi_dsi_host *host = msm_dsi->host; dsi_mgr_bridge_post_disable() local 382 ret = msm_dsi_host_disable(host); dsi_mgr_bridge_post_disable() 384 pr_err("%s: host %d disable failed, %d\n", __func__, id, ret); dsi_mgr_bridge_post_disable() 387 ret = msm_dsi_host_disable(msm_dsi1->host); dsi_mgr_bridge_post_disable() 396 ret = msm_dsi_host_power_off(host); dsi_mgr_bridge_post_disable() 398 pr_err("%s: host %d power off failed,%d\n", __func__, id, ret); dsi_mgr_bridge_post_disable() 401 ret = msm_dsi_host_power_off(msm_dsi1->host); dsi_mgr_bridge_post_disable() 415 struct mipi_dsi_host *host = msm_dsi->host; dsi_mgr_bridge_mode_set() local 430 msm_dsi_host_set_display_mode(host, adjusted_mode); dsi_mgr_bridge_mode_set() 432 msm_dsi_host_set_display_mode(other_dsi->host, adjusted_mode); dsi_mgr_bridge_mode_set() 486 * when panel is attached to the host. msm_dsi_manager_connector_init() 592 struct mipi_dsi_host *host = msm_dsi->host; msm_dsi_manager_cmd_xfer() local 609 ret = msm_dsi_host_xfer_prepare(msm_dsi0->host, msg); msm_dsi_manager_cmd_xfer() 611 pr_err("%s: failed to prepare non-trigger host, %d\n", msm_dsi_manager_cmd_xfer() 616 ret = msm_dsi_host_xfer_prepare(host, msg); msm_dsi_manager_cmd_xfer() 618 pr_err("%s: failed to prepare host, %d\n", __func__, ret); msm_dsi_manager_cmd_xfer() 622 ret = is_read ? msm_dsi_host_cmd_rx(host, msg) : msm_dsi_manager_cmd_xfer() 623 msm_dsi_host_cmd_tx(host, msg); msm_dsi_manager_cmd_xfer() 625 msm_dsi_host_xfer_restore(host, msg); msm_dsi_manager_cmd_xfer() 629 msm_dsi_host_xfer_restore(msm_dsi0->host, msg); msm_dsi_manager_cmd_xfer() 638 struct mipi_dsi_host *host = msm_dsi->host; msm_dsi_manager_cmd_xfer_trigger() local 644 msm_dsi_host_cmd_xfer_commit(msm_dsi0->host, iova, len); msm_dsi_manager_cmd_xfer_trigger() 646 msm_dsi_host_cmd_xfer_commit(host, iova, len); msm_dsi_manager_cmd_xfer_trigger() 677 ret = msm_dsi_host_register(msm_dsi->host, true); msm_dsi_manager_register() 685 /* Register slave host first, so that slave DSI device msm_dsi_manager_register() 688 * Also, do not check defer for the slave host, msm_dsi_manager_register() 692 ret = msm_dsi_host_register(sdsi->host, false); msm_dsi_manager_register() 695 ret = msm_dsi_host_register(mdsi->host, true); msm_dsi_manager_register() 705 if (msm_dsi->host) msm_dsi_manager_unregister() 706 msm_dsi_host_unregister(msm_dsi->host); msm_dsi_manager_unregister()
|
/linux-4.1.27/include/linux/platform_data/ |
H A D | usb-pxa3xx-ulpi.h | 28 /* Start PXA3xx U2D host */ 29 int pxa3xx_u2d_start_hc(struct usb_bus *host); 30 /* Stop PXA3xx U2D host */ 31 void pxa3xx_u2d_stop_hc(struct usb_bus *host);
|
H A D | mmc-davinci.h | 9 #include <linux/mmc/host.h> 23 /* any additional host capabilities: OR'd in to mmc->f_caps */
|
H A D | usb-rcar-gen2-phy.h | 17 bool chan0_pci:1; /* true: PCI USB host 0, false: USBHS */ 19 bool chan2_pci:1; /* true: PCI USB host 2, false: USBSS */
|
H A D | pxa2xx_udc.h | 13 int (*udc_is_connected)(void); /* do we see host? */ 15 #define PXA2XX_UDC_CMD_CONNECT 0 /* let host see us */ 16 #define PXA2XX_UDC_CMD_DISCONNECT 1 /* so host won't see us */
|
H A D | usb-mx2.h | 27 enum mx21_usbh_xcvr host_xcvr; /* tranceiver mode host 1,2 ports */ 28 enum mx21_usbh_xcvr otg_xcvr; /* tranceiver mode otg (as host) port */ 31 enable_otg_host:1, /* enable "OTG" port (as host) */
|
/linux-4.1.27/arch/powerpc/platforms/powermac/ |
H A D | low_i2c.c | 185 name, __kw_state_names[host->state], isr); \ 197 static inline u8 __kw_read_reg(struct pmac_i2c_host_kw *host, reg_t reg) __kw_read_reg() argument 199 return readb(host->base + (((unsigned int)reg) << host->bsteps)); __kw_read_reg() 202 static inline void __kw_write_reg(struct pmac_i2c_host_kw *host, __kw_write_reg() argument 205 writeb(val, host->base + (((unsigned)reg) << host->bsteps)); __kw_write_reg() 206 (void)__kw_read_reg(host, reg_subaddr); __kw_write_reg() 209 #define kw_write_reg(reg, val) __kw_write_reg(host, reg, val) 210 #define kw_read_reg(reg) __kw_read_reg(host, reg) 212 static u8 kw_i2c_wait_interrupt(struct pmac_i2c_host_kw *host) kw_i2c_wait_interrupt() argument 226 if (host->polled) { kw_i2c_wait_interrupt() 235 static void kw_i2c_do_stop(struct pmac_i2c_host_kw *host, int result) kw_i2c_do_stop() argument 238 host->state = state_stop; kw_i2c_do_stop() 239 host->result = result; kw_i2c_do_stop() 243 static void kw_i2c_handle_interrupt(struct pmac_i2c_host_kw *host, u8 isr) kw_i2c_handle_interrupt() argument 248 __kw_state_names[host->state], isr); kw_i2c_handle_interrupt() 250 if (host->state == state_idle) { kw_i2c_handle_interrupt() 260 if (host->state != state_stop) { kw_i2c_handle_interrupt() 261 kw_i2c_do_stop(host, -EIO); kw_i2c_handle_interrupt() 267 host->state = state_idle; kw_i2c_handle_interrupt() 269 if (!host->polled) kw_i2c_handle_interrupt() 270 complete(&host->complete); kw_i2c_handle_interrupt() 276 if (host->state != state_addr) { kw_i2c_handle_interrupt() 278 kw_i2c_do_stop(host, -EIO); kw_i2c_handle_interrupt() 281 host->result = -ENXIO; kw_i2c_handle_interrupt() 282 host->state = state_stop; kw_i2c_handle_interrupt() 285 if (host->len == 0) kw_i2c_handle_interrupt() 286 kw_i2c_do_stop(host, 0); kw_i2c_handle_interrupt() 287 else if (host->rw) { kw_i2c_handle_interrupt() 288 host->state = state_read; kw_i2c_handle_interrupt() 289 if (host->len > 1) kw_i2c_handle_interrupt() 293 host->state = state_write; kw_i2c_handle_interrupt() 294 kw_write_reg(reg_data, *(host->data++)); kw_i2c_handle_interrupt() 295 host->len--; kw_i2c_handle_interrupt() 302 if (host->state == state_read) { kw_i2c_handle_interrupt() 303 *(host->data++) = kw_read_reg(reg_data); kw_i2c_handle_interrupt() 304 host->len--; kw_i2c_handle_interrupt() 306 if (host->len == 0) kw_i2c_handle_interrupt() 307 host->state = state_stop; kw_i2c_handle_interrupt() 308 else if (host->len == 1) kw_i2c_handle_interrupt() 310 } else if (host->state == state_write) { kw_i2c_handle_interrupt() 314 host->result = -EFBIG; kw_i2c_handle_interrupt() 315 host->state = state_stop; kw_i2c_handle_interrupt() 316 } else if (host->len) { kw_i2c_handle_interrupt() 317 kw_write_reg(reg_data, *(host->data++)); kw_i2c_handle_interrupt() 318 host->len--; kw_i2c_handle_interrupt() 320 kw_i2c_do_stop(host, 0); kw_i2c_handle_interrupt() 323 if (host->state != state_stop) kw_i2c_handle_interrupt() 324 kw_i2c_do_stop(host, -EIO); kw_i2c_handle_interrupt() 331 if (host->state != state_stop) { kw_i2c_handle_interrupt() 333 host->result = -EIO; kw_i2c_handle_interrupt() 335 host->state = state_idle; kw_i2c_handle_interrupt() 336 if (!host->polled) kw_i2c_handle_interrupt() 337 complete(&host->complete); kw_i2c_handle_interrupt() 349 struct pmac_i2c_host_kw *host = dev_id; kw_i2c_irq() local 352 spin_lock_irqsave(&host->lock, flags); kw_i2c_irq() 353 del_timer(&host->timeout_timer); kw_i2c_irq() 354 kw_i2c_handle_interrupt(host, kw_read_reg(reg_isr)); kw_i2c_irq() 355 if (host->state != state_idle) { kw_i2c_irq() 356 host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT; kw_i2c_irq() 357 add_timer(&host->timeout_timer); kw_i2c_irq() 359 spin_unlock_irqrestore(&host->lock, flags); kw_i2c_irq() 365 struct pmac_i2c_host_kw *host = (struct pmac_i2c_host_kw *)data; kw_i2c_timeout() local 368 spin_lock_irqsave(&host->lock, flags); kw_i2c_timeout() 374 if (timer_pending(&host->timeout_timer)) kw_i2c_timeout() 377 kw_i2c_handle_interrupt(host, kw_read_reg(reg_isr)); kw_i2c_timeout() 378 if (host->state != state_idle) { kw_i2c_timeout() 379 host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT; kw_i2c_timeout() 380 add_timer(&host->timeout_timer); kw_i2c_timeout() 383 spin_unlock_irqrestore(&host->lock, flags); kw_i2c_timeout() 388 struct pmac_i2c_host_kw *host = bus->hostdata; kw_i2c_open() local 389 mutex_lock(&host->mutex); kw_i2c_open() 395 struct pmac_i2c_host_kw *host = bus->hostdata; kw_i2c_close() local 396 mutex_unlock(&host->mutex); kw_i2c_close() 402 struct pmac_i2c_host_kw *host = bus->hostdata; kw_i2c_xfer() local 403 u8 mode_reg = host->speed; kw_i2c_xfer() 404 int use_irq = host->irq != NO_IRQ && !bus->polled; kw_i2c_xfer() 443 host->data = data; kw_i2c_xfer() 444 host->len = len; kw_i2c_xfer() 445 host->state = state_addr; kw_i2c_xfer() 446 host->result = 0; kw_i2c_xfer() 447 host->rw = (addrdir & 1); kw_i2c_xfer() 448 host->polled = bus->polled; kw_i2c_xfer() 455 reinit_completion(&host->complete); kw_i2c_xfer() 459 host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT; kw_i2c_xfer() 460 add_timer(&host->timeout_timer); kw_i2c_xfer() 470 wait_for_completion(&host->complete); kw_i2c_xfer() 472 while(host->state != state_idle) { kw_i2c_xfer() 475 u8 isr = kw_i2c_wait_interrupt(host); kw_i2c_xfer() 476 spin_lock_irqsave(&host->lock, flags); kw_i2c_xfer() 477 kw_i2c_handle_interrupt(host, isr); kw_i2c_xfer() 478 spin_unlock_irqrestore(&host->lock, flags); kw_i2c_xfer() 485 return host->result; kw_i2c_xfer() 490 struct pmac_i2c_host_kw *host; kw_i2c_host_init() local 494 host = kzalloc(sizeof(struct pmac_i2c_host_kw), GFP_KERNEL); kw_i2c_host_init() 495 if (host == NULL) { kw_i2c_host_init() 496 printk(KERN_ERR "low_i2c: Can't allocate host for %s\n", kw_i2c_host_init() 509 kfree(host); kw_i2c_host_init() 512 mutex_init(&host->mutex); kw_i2c_host_init() 513 init_completion(&host->complete); kw_i2c_host_init() 514 spin_lock_init(&host->lock); kw_i2c_host_init() 515 init_timer(&host->timeout_timer); kw_i2c_host_init() 516 host->timeout_timer.function = kw_i2c_timeout; kw_i2c_host_init() 517 host->timeout_timer.data = (unsigned long)host; kw_i2c_host_init() 521 for (host->bsteps = 0; (steps & 0x01) == 0; host->bsteps++) kw_i2c_host_init() 524 host->speed = KW_I2C_MODE_25KHZ; kw_i2c_host_init() 528 host->speed = KW_I2C_MODE_100KHZ; kw_i2c_host_init() 531 host->speed = KW_I2C_MODE_50KHZ; kw_i2c_host_init() 534 host->speed = KW_I2C_MODE_25KHZ; kw_i2c_host_init() 537 host->irq = irq_of_parse_and_map(np, 0); kw_i2c_host_init() 538 if (host->irq == NO_IRQ) kw_i2c_host_init() 543 host->base = ioremap((*addrp), 0x1000); kw_i2c_host_init() 544 if (host->base == NULL) { kw_i2c_host_init() 547 kfree(host); kw_i2c_host_init() 558 if (request_irq(host->irq, kw_i2c_irq, IRQF_NO_SUSPEND, kw_i2c_host_init() 559 "keywest i2c", host)) kw_i2c_host_init() 560 host->irq = NO_IRQ; kw_i2c_host_init() 563 *addrp, host->irq, np->full_name); kw_i2c_host_init() 565 return host; kw_i2c_host_init() 569 static void __init kw_i2c_add(struct pmac_i2c_host_kw *host, kw_i2c_add() argument 583 bus->hostdata = host; kw_i2c_add() 604 struct pmac_i2c_host_kw *host; kw_i2c_probe() local 607 /* Found one, init a host structure */ kw_i2c_probe() 608 host = kw_i2c_host_init(np); kw_i2c_probe() 609 if (host == NULL) kw_i2c_probe() 634 kw_i2c_add(host, np, np, i); kw_i2c_probe() 642 kw_i2c_add(host, np, child, *reg); kw_i2c_probe()
|
/linux-4.1.27/drivers/scsi/ufs/ |
H A D | ufs-qcom.c | 29 static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host, 31 static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote); 76 static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host) ufs_qcom_disable_lane_clks() argument 78 if (!host->is_lane_clks_enabled) ufs_qcom_disable_lane_clks() 81 clk_disable_unprepare(host->tx_l1_sync_clk); ufs_qcom_disable_lane_clks() 82 clk_disable_unprepare(host->tx_l0_sync_clk); ufs_qcom_disable_lane_clks() 83 clk_disable_unprepare(host->rx_l1_sync_clk); ufs_qcom_disable_lane_clks() 84 clk_disable_unprepare(host->rx_l0_sync_clk); ufs_qcom_disable_lane_clks() 86 host->is_lane_clks_enabled = false; ufs_qcom_disable_lane_clks() 89 static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host) ufs_qcom_enable_lane_clks() argument 92 struct device *dev = host->hba->dev; ufs_qcom_enable_lane_clks() 94 if (host->is_lane_clks_enabled) ufs_qcom_enable_lane_clks() 98 host->rx_l0_sync_clk); ufs_qcom_enable_lane_clks() 103 host->tx_l0_sync_clk); ufs_qcom_enable_lane_clks() 108 host->rx_l1_sync_clk); ufs_qcom_enable_lane_clks() 113 host->tx_l1_sync_clk); ufs_qcom_enable_lane_clks() 117 host->is_lane_clks_enabled = true; ufs_qcom_enable_lane_clks() 121 clk_disable_unprepare(host->rx_l1_sync_clk); ufs_qcom_enable_lane_clks() 123 clk_disable_unprepare(host->tx_l0_sync_clk); ufs_qcom_enable_lane_clks() 125 clk_disable_unprepare(host->rx_l0_sync_clk); ufs_qcom_enable_lane_clks() 130 static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host) ufs_qcom_init_lane_clks() argument 133 struct device *dev = host->hba->dev; ufs_qcom_init_lane_clks() 136 "rx_lane0_sync_clk", &host->rx_l0_sync_clk); ufs_qcom_init_lane_clks() 141 "tx_lane0_sync_clk", &host->tx_l0_sync_clk); ufs_qcom_init_lane_clks() 146 &host->rx_l1_sync_clk); ufs_qcom_init_lane_clks() 151 &host->tx_l1_sync_clk); ufs_qcom_init_lane_clks() 158 struct ufs_qcom_host *host = hba->priv; ufs_qcom_link_startup_post_change() local 159 struct phy *phy = host->generic_phy; ufs_qcom_link_startup_post_change() 214 struct ufs_qcom_host *host = hba->priv; ufs_qcom_power_up_sequence() local 215 struct phy *phy = host->generic_phy; ufs_qcom_power_up_sequence() 276 struct ufs_qcom_host *host = hba->priv; ufs_qcom_hce_enable_notify() local 287 err = ufs_qcom_enable_lane_clks(host); ufs_qcom_hce_enable_notify() 440 struct ufs_qcom_host *host = hba->priv; ufs_qcom_suspend() local 441 struct phy *phy = host->generic_phy; ufs_qcom_suspend() 450 ufs_qcom_disable_lane_clks(host); ufs_qcom_suspend() 471 struct ufs_qcom_host *host = hba->priv; ufs_qcom_resume() local 472 struct phy *phy = host->generic_phy; ufs_qcom_resume() 586 static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host) ufs_qcom_update_bus_bw_vote() argument 592 ufs_qcom_get_speed_mode(&host->dev_req_params, mode); ufs_qcom_update_bus_bw_vote() 594 vote = ufs_qcom_get_bus_vote(host, mode); ufs_qcom_update_bus_bw_vote() 596 err = ufs_qcom_set_bus_vote(host, vote); ufs_qcom_update_bus_bw_vote() 601 dev_err(host->hba->dev, "%s: failed %d\n", __func__, err); ufs_qcom_update_bus_bw_vote() 603 host->bus_vote.saved_vote = vote; ufs_qcom_update_bus_bw_vote() 613 struct ufs_qcom_host *host = hba->priv; ufs_qcom_pwr_change_notify() local 614 struct phy *phy = host->generic_phy; ufs_qcom_pwr_change_notify() 674 memcpy(&host->dev_req_params, ufs_qcom_pwr_change_notify() 676 ufs_qcom_update_bus_bw_vote(host); ufs_qcom_pwr_change_notify() 688 * @hba: host controller instance 690 * QCOM UFS host controller might have some non standard behaviours (quirks) 692 * quirks to standard UFS host controller driver so standard takes them into 697 struct ufs_qcom_host *host = hba->priv; ufs_qcom_advertise_quirks() local 699 if (host->hw_ver.major == 0x1) ufs_qcom_advertise_quirks() 702 if (host->hw_ver.major >= 0x2) { ufs_qcom_advertise_quirks() 703 if (!ufs_qcom_cap_qunipro(host)) ufs_qcom_advertise_quirks() 711 struct ufs_qcom_host *host = hba->priv; ufs_qcom_set_caps() local 713 if (host->hw_ver.major >= 0x2) ufs_qcom_set_caps() 714 host->caps = UFS_QCOM_CAP_QUNIPRO; ufs_qcom_set_caps() 717 static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host, ufs_qcom_get_bus_vote() argument 720 struct device *dev = host->hba->dev; ufs_qcom_get_bus_vote() 730 if (host->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN")) ufs_qcom_get_bus_vote() 742 static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote) ufs_qcom_set_bus_vote() argument 746 if (vote != host->bus_vote.curr_vote) ufs_qcom_set_bus_vote() 747 host->bus_vote.curr_vote = vote; ufs_qcom_set_bus_vote() 782 struct ufs_qcom_host *host = hba->priv; ufs_qcom_setup_clocks() local 791 if (!host) ufs_qcom_setup_clocks() 795 err = ufs_qcom_phy_enable_iface_clk(host->generic_phy); ufs_qcom_setup_clocks() 799 err = ufs_qcom_phy_enable_ref_clk(host->generic_phy); ufs_qcom_setup_clocks() 803 ufs_qcom_phy_disable_iface_clk(host->generic_phy); ufs_qcom_setup_clocks() 807 ufs_qcom_phy_enable_dev_ref_clk(host->generic_phy); ufs_qcom_setup_clocks() 808 vote = host->bus_vote.saved_vote; ufs_qcom_setup_clocks() 809 if (vote == host->bus_vote.min_bw_vote) ufs_qcom_setup_clocks() 810 ufs_qcom_update_bus_bw_vote(host); ufs_qcom_setup_clocks() 813 ufs_qcom_phy_disable_iface_clk(host->generic_phy); ufs_qcom_setup_clocks() 816 ufs_qcom_phy_disable_ref_clk(host->generic_phy); ufs_qcom_setup_clocks() 818 ufs_qcom_phy_disable_dev_ref_clk(host->generic_phy); ufs_qcom_setup_clocks() 820 vote = host->bus_vote.min_bw_vote; ufs_qcom_setup_clocks() 823 err = ufs_qcom_set_bus_vote(host, vote); ufs_qcom_setup_clocks() 837 struct ufs_qcom_host *host = hba->priv; show_ufs_to_mem_max_bus_bw() local 840 host->bus_vote.is_max_bw_needed); show_ufs_to_mem_max_bus_bw() 848 struct ufs_qcom_host *host = hba->priv; store_ufs_to_mem_max_bus_bw() local 852 host->bus_vote.is_max_bw_needed = !!value; store_ufs_to_mem_max_bus_bw() 853 ufs_qcom_update_bus_bw_vote(host); store_ufs_to_mem_max_bus_bw() 859 static int ufs_qcom_bus_register(struct ufs_qcom_host *host) ufs_qcom_bus_register() argument 862 struct device *dev = host->hba->dev; ufs_qcom_bus_register() 873 host->bus_vote.min_bw_vote = ufs_qcom_get_bus_vote(host, "MIN"); ufs_qcom_bus_register() 874 host->bus_vote.max_bw_vote = ufs_qcom_get_bus_vote(host, "MAX"); ufs_qcom_bus_register() 876 host->bus_vote.max_bus_bw.show = show_ufs_to_mem_max_bus_bw; ufs_qcom_bus_register() 877 host->bus_vote.max_bus_bw.store = store_ufs_to_mem_max_bus_bw; ufs_qcom_bus_register() 878 sysfs_attr_init(&host->bus_vote.max_bus_bw.attr); ufs_qcom_bus_register() 879 host->bus_vote.max_bus_bw.attr.name = "max_bus_bw"; ufs_qcom_bus_register() 880 host->bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR; ufs_qcom_bus_register() 881 err = device_create_file(dev, &host->bus_vote.max_bus_bw); ufs_qcom_bus_register() 897 * @hba: host controller instance 909 struct ufs_qcom_host *host; ufs_qcom_init() local 914 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); ufs_qcom_init() 915 if (!host) { ufs_qcom_init() 917 dev_err(dev, "%s: no memory for qcom ufs host\n", __func__); ufs_qcom_init() 921 host->hba = hba; ufs_qcom_init() 922 hba->priv = (void *)host; ufs_qcom_init() 924 host->generic_phy = devm_phy_get(dev, "ufsphy"); ufs_qcom_init() 926 if (IS_ERR(host->generic_phy)) { ufs_qcom_init() 927 err = PTR_ERR(host->generic_phy); ufs_qcom_init() 932 err = ufs_qcom_bus_register(host); ufs_qcom_init() 936 ufs_qcom_get_controller_revision(hba, &host->hw_ver.major, ufs_qcom_init() 937 &host->hw_ver.minor, &host->hw_ver.step); ufs_qcom_init() 940 ufs_qcom_phy_save_controller_version(host->generic_phy, ufs_qcom_init() 941 host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step); ufs_qcom_init() 943 phy_init(host->generic_phy); ufs_qcom_init() 944 err = phy_power_on(host->generic_phy); ufs_qcom_init() 948 err = ufs_qcom_init_lane_clks(host); ufs_qcom_init() 961 ufs_qcom_hosts[hba->dev->id] = host; ufs_qcom_init() 966 phy_power_off(host->generic_phy); ufs_qcom_init() 968 phy_exit(host->generic_phy); ufs_qcom_init() 970 devm_kfree(dev, host); ufs_qcom_init() 978 struct ufs_qcom_host *host = hba->priv; ufs_qcom_exit() local 980 ufs_qcom_disable_lane_clks(host); ufs_qcom_exit() 981 phy_power_off(host->generic_phy); ufs_qcom_exit() 987 struct ufs_qcom_host *host = hba->priv; ufs_qcom_clk_scale_notify() local 988 struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params; ufs_qcom_clk_scale_notify()
|
/linux-4.1.27/drivers/scsi/arcmsr/ |
H A D | arcmsr_attr.c | 6 ** Description: attributes exported to sysfs and device host 70 struct Scsi_Host *host = class_to_shost(dev); arcmsr_sysfs_iop_message_read() local 71 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; arcmsr_sysfs_iop_message_read() 117 struct Scsi_Host *host = class_to_shost(dev); arcmsr_sysfs_iop_message_write() local 118 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; arcmsr_sysfs_iop_message_write() 165 struct Scsi_Host *host = class_to_shost(dev); arcmsr_sysfs_iop_message_clear() local 166 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; arcmsr_sysfs_iop_message_clear() 222 struct Scsi_Host *host = acb->host; arcmsr_alloc_sysfs_attr() local 225 error = sysfs_create_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_read_attr); arcmsr_alloc_sysfs_attr() 230 error = sysfs_create_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_write_attr); arcmsr_alloc_sysfs_attr() 235 error = sysfs_create_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_clear_attr); arcmsr_alloc_sysfs_attr() 242 sysfs_remove_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_write_attr); arcmsr_alloc_sysfs_attr() 244 sysfs_remove_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_read_attr); arcmsr_alloc_sysfs_attr() 251 struct Scsi_Host *host = acb->host; arcmsr_free_sysfs_attr() local 253 sysfs_remove_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_clear_attr); arcmsr_free_sysfs_attr() 254 sysfs_remove_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_write_attr); arcmsr_free_sysfs_attr() 255 sysfs_remove_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_read_attr); arcmsr_free_sysfs_attr() 272 struct Scsi_Host *host = class_to_shost(dev); arcmsr_attr_host_driver_posted_cmd() local 274 (struct AdapterControlBlock *) host->hostdata; arcmsr_attr_host_driver_posted_cmd() 284 struct Scsi_Host *host = class_to_shost(dev); arcmsr_attr_host_driver_reset() local 286 (struct AdapterControlBlock *) host->hostdata; arcmsr_attr_host_driver_reset() 296 struct Scsi_Host *host = class_to_shost(dev); arcmsr_attr_host_driver_abort() local 298 (struct AdapterControlBlock *) host->hostdata; arcmsr_attr_host_driver_abort() 308 struct Scsi_Host *host = class_to_shost(dev); arcmsr_attr_host_fw_model() local 310 (struct AdapterControlBlock *) host->hostdata; arcmsr_attr_host_fw_model() 320 struct Scsi_Host *host = class_to_shost(dev); arcmsr_attr_host_fw_version() local 322 (struct AdapterControlBlock *) host->hostdata; arcmsr_attr_host_fw_version() 333 struct Scsi_Host *host = class_to_shost(dev); arcmsr_attr_host_fw_request_len() local 335 (struct AdapterControlBlock *) host->hostdata; arcmsr_attr_host_fw_request_len() 346 struct Scsi_Host *host = class_to_shost(dev); arcmsr_attr_host_fw_numbers_queue() local 348 (struct AdapterControlBlock *) host->hostdata; arcmsr_attr_host_fw_numbers_queue() 359 struct Scsi_Host *host = class_to_shost(dev); arcmsr_attr_host_fw_sdram_size() local 361 (struct AdapterControlBlock *) host->hostdata; arcmsr_attr_host_fw_sdram_size() 372 struct Scsi_Host *host = class_to_shost(dev); arcmsr_attr_host_fw_hd_channels() local 374 (struct AdapterControlBlock *) host->hostdata; arcmsr_attr_host_fw_hd_channels()
|
/linux-4.1.27/arch/um/os-Linux/ |
H A D | util.c | 49 struct utsname host; setup_machinename() local 51 uname(&host); setup_machinename() 54 if (!strcmp(host.machine, "x86_64")) { setup_machinename() 59 if (!strcmp(host.machine, "i686")) { setup_machinename() 65 strcpy(machine_out, host.machine); setup_machinename() 70 struct utsname host; setup_hostinfo() local 72 uname(&host); setup_hostinfo() 73 snprintf(buf, len, "%s %s %s %s %s", host.sysname, host.nodename, setup_hostinfo() 74 host.release, host.version, host.machine); setup_hostinfo()
|
/linux-4.1.27/drivers/ata/ |
H A D | pata_samsung_cf.c | 107 struct s3c_ide_info *info = ap->host->private_data; pata_s3c_set_piomode() 151 static void ata_outb(struct ata_host *host, u8 addr, void __iomem *reg) ata_outb() argument 153 struct s3c_ide_info *info = host->private_data; ata_outb() 162 static u8 ata_inb(struct ata_host *host, void __iomem *reg) ata_inb() argument 164 struct s3c_ide_info *info = host->private_data; ata_inb() 175 * pata_s3c_tf_load - send taskfile registers to host controller 184 ata_outb(ap->host, tf->ctl, ioaddr->ctl_addr); pata_s3c_tf_load() 190 ata_outb(ap->host, tf->hob_feature, ioaddr->feature_addr); pata_s3c_tf_load() 191 ata_outb(ap->host, tf->hob_nsect, ioaddr->nsect_addr); pata_s3c_tf_load() 192 ata_outb(ap->host, tf->hob_lbal, ioaddr->lbal_addr); pata_s3c_tf_load() 193 ata_outb(ap->host, tf->hob_lbam, ioaddr->lbam_addr); pata_s3c_tf_load() 194 ata_outb(ap->host, tf->hob_lbah, ioaddr->lbah_addr); pata_s3c_tf_load() 198 ata_outb(ap->host, tf->feature, ioaddr->feature_addr); pata_s3c_tf_load() 199 ata_outb(ap->host, tf->nsect, ioaddr->nsect_addr); pata_s3c_tf_load() 200 ata_outb(ap->host, tf->lbal, ioaddr->lbal_addr); pata_s3c_tf_load() 201 ata_outb(ap->host, tf->lbam, ioaddr->lbam_addr); pata_s3c_tf_load() 202 ata_outb(ap->host, tf->lbah, ioaddr->lbah_addr); pata_s3c_tf_load() 206 ata_outb(ap->host, tf->device, ioaddr->device_addr); pata_s3c_tf_load() 218 tf->feature = ata_inb(ap->host, ioaddr->error_addr); pata_s3c_tf_read() 219 tf->nsect = ata_inb(ap->host, ioaddr->nsect_addr); pata_s3c_tf_read() 220 tf->lbal = ata_inb(ap->host, ioaddr->lbal_addr); pata_s3c_tf_read() 221 tf->lbam = ata_inb(ap->host, ioaddr->lbam_addr); pata_s3c_tf_read() 222 tf->lbah = ata_inb(ap->host, ioaddr->lbah_addr); pata_s3c_tf_read() 223 tf->device = ata_inb(ap->host, ioaddr->device_addr); pata_s3c_tf_read() 226 ata_outb(ap->host, tf->ctl | ATA_HOB, ioaddr->ctl_addr); pata_s3c_tf_read() 227 tf->hob_feature = ata_inb(ap->host, ioaddr->error_addr); pata_s3c_tf_read() 228 tf->hob_nsect = ata_inb(ap->host, ioaddr->nsect_addr); pata_s3c_tf_read() 229 tf->hob_lbal = ata_inb(ap->host, ioaddr->lbal_addr); pata_s3c_tf_read() 230 tf->hob_lbam = ata_inb(ap->host, ioaddr->lbam_addr); pata_s3c_tf_read() 231 tf->hob_lbah = ata_inb(ap->host, ioaddr->lbah_addr); pata_s3c_tf_read() 232 ata_outb(ap->host, tf->ctl, ioaddr->ctl_addr); pata_s3c_tf_read() 238 * pata_s3c_exec_command - issue ATA command to host controller 243 ata_outb(ap->host, tf->command, ap->ioaddr.command_addr); pata_s3c_exec_command() 252 return ata_inb(ap->host, ap->ioaddr.status_addr); pata_s3c_check_status() 260 return ata_inb(ap->host, ap->ioaddr.altstatus_addr); pata_s3c_check_altstatus() 270 struct s3c_ide_info *info = ap->host->private_data; pata_s3c_data_xfer() 306 ata_outb(ap->host, tmp, ap->ioaddr.device_addr); pata_s3c_dev_select() 321 ata_outb(ap->host, 0x55, ioaddr->nsect_addr); pata_s3c_devchk() 322 ata_outb(ap->host, 0xaa, ioaddr->lbal_addr); pata_s3c_devchk() 324 ata_outb(ap->host, 0xaa, ioaddr->nsect_addr); pata_s3c_devchk() 325 ata_outb(ap->host, 0x55, ioaddr->lbal_addr); pata_s3c_devchk() 327 ata_outb(ap->host, 0x55, ioaddr->nsect_addr); pata_s3c_devchk() 328 ata_outb(ap->host, 0xaa, ioaddr->lbal_addr); pata_s3c_devchk() 330 nsect = ata_inb(ap->host, ioaddr->nsect_addr); pata_s3c_devchk() 331 lbal = ata_inb(ap->host, ioaddr->lbal_addr); pata_s3c_devchk() 369 ata_outb(ap->host, ap->ctl, ioaddr->ctl_addr); pata_s3c_bus_softreset() 371 ata_outb(ap->host, ap->ctl | ATA_SRST, ioaddr->ctl_addr); pata_s3c_bus_softreset() 373 ata_outb(ap->host, ap->ctl, ioaddr->ctl_addr); pata_s3c_bus_softreset() 380 * pata_s3c_softreset - reset host port via ATA SRST 417 ata_outb(ap->host, ctl, ap->ioaddr.ctl_addr); pata_s3c_set_devctl() 452 struct ata_host *host = dev_instance; pata_s3c_irq() local 453 struct s3c_ide_info *info = host->private_data; pata_s3c_irq() 501 struct ata_host *host; pata_s3c_probe() local 531 /* init ata host */ pata_s3c_probe() 532 host = ata_host_alloc(dev, 1); pata_s3c_probe() 533 if (!host) { pata_s3c_probe() 534 dev_err(dev, "failed to allocate ide host\n"); pata_s3c_probe() 539 ap = host->ports[0]; pata_s3c_probe() 577 host->private_data = info; pata_s3c_probe() 585 platform_set_drvdata(pdev, host); pata_s3c_probe() 587 ret = ata_host_activate(host, info->irq, pata_s3c_probe() 602 struct ata_host *host = platform_get_drvdata(pdev); pata_s3c_remove() local 603 struct s3c_ide_info *info = host->private_data; pata_s3c_remove() 605 ata_host_detach(host); pata_s3c_remove() 616 struct ata_host *host = platform_get_drvdata(pdev); pata_s3c_suspend() local 618 return ata_host_suspend(host, PMSG_SUSPEND); pata_s3c_suspend() 624 struct ata_host *host = platform_get_drvdata(pdev); pata_s3c_resume() local 626 struct s3c_ide_info *info = host->private_data; pata_s3c_resume() 629 ata_host_resume(host); pata_s3c_resume()
|
H A D | pata_imx.c | 47 struct pata_imx_priv *priv = ap->host->private_data; pata_imx_set_mode() 95 struct ata_host *host; pata_imx_probe() local 121 host = ata_host_alloc(&pdev->dev, 1); pata_imx_probe() 122 if (!host) { pata_imx_probe() 127 host->private_data = priv; pata_imx_probe() 128 ap = host->ports[0]; pata_imx_probe() 161 ret = ata_host_activate(host, irq, ata_sff_interrupt, 0, pata_imx_probe() 176 struct ata_host *host = platform_get_drvdata(pdev); pata_imx_remove() local 177 struct pata_imx_priv *priv = host->private_data; pata_imx_remove() 179 ata_host_detach(host); pata_imx_remove() 191 struct ata_host *host = dev_get_drvdata(dev); pata_imx_suspend() local 192 struct pata_imx_priv *priv = host->private_data; pata_imx_suspend() 195 ret = ata_host_suspend(host, PMSG_SUSPEND); pata_imx_suspend() 208 struct ata_host *host = dev_get_drvdata(dev); pata_imx_resume() local 209 struct pata_imx_priv *priv = host->private_data; pata_imx_resume() 220 ata_host_resume(host); pata_imx_resume()
|
H A D | pata_palmld.c | 53 struct ata_host *host; palmld_pata_probe() local 58 /* allocate host */ palmld_pata_probe() 59 host = ata_host_alloc(&pdev->dev, 1); palmld_pata_probe() 60 if (!host) { palmld_pata_probe() 85 ap = host->ports[0]; palmld_pata_probe() 98 /* activate host */ palmld_pata_probe() 99 ret = ata_host_activate(host, 0, NULL, IRQF_TRIGGER_RISING, palmld_pata_probe()
|
H A D | pata_isapnp.c | 49 struct ata_host *host; isapnp_init_one() local 63 /* allocate host */ isapnp_init_one() 64 host = ata_host_alloc(&idev->dev, 1); isapnp_init_one() 65 if (!host) isapnp_init_one() 68 /* acquire resources and fill host */ isapnp_init_one() 73 ap = host->ports[0]; isapnp_init_one() 96 return ata_host_activate(host, irq, handler, 0, isapnp_init_one() 111 struct ata_host *host = dev_get_drvdata(dev); isapnp_remove_one() local 113 ata_host_detach(host); isapnp_remove_one()
|
H A D | pata_cs5520.c | 71 struct pci_dev *pdev = to_pci_dev(ap->host->dev); cs5520_set_timings() 129 struct ata_host *host; cs5520_init_one() local 157 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2); cs5520_init_one() 158 if (!host) cs5520_init_one() 176 /* Map IO ports and initialize host accordingly */ cs5520_init_one() 186 ioaddr = &host->ports[0]->ioaddr; cs5520_init_one() 193 ata_port_desc(host->ports[0], cs5520_init_one() 195 ata_port_pbar_desc(host->ports[0], 4, 0, "bmdma"); cs5520_init_one() 197 ioaddr = &host->ports[1]->ioaddr; cs5520_init_one() 204 ata_port_desc(host->ports[1], cs5520_init_one() 206 ata_port_pbar_desc(host->ports[1], 4, 8, "bmdma"); cs5520_init_one() 208 /* activate the host */ cs5520_init_one() 210 rc = ata_host_start(host); cs5520_init_one() 216 struct ata_port *ap = host->ports[i]; cs5520_init_one() 222 ata_bmdma_interrupt, 0, DRV_NAME, host); cs5520_init_one() 229 return ata_host_register(host, &cs5520_sht); cs5520_init_one() 243 struct ata_host *host = pci_get_drvdata(pdev); cs5520_reinit_one() local 255 ata_host_resume(host); cs5520_reinit_one() 271 struct ata_host *host = pci_get_drvdata(pdev); cs5520_pci_device_suspend() local 274 rc = ata_host_suspend(host, mesg); cs5520_pci_device_suspend()
|
H A D | sata_qstor.c | 55 QS_HCF_CNFG3 = 0x0003, /* host configuration offset */ 56 QS_HID_HPHY = 0x0004, /* host physical interface info */ 58 QS_HST_SFF = 0x0100, /* host status fifo offset */ 69 QS_CFC_HUFT = 0x0800, /* host upstream fifo threshold */ 70 QS_CFC_HDFT = 0x0804, /* host downstream fifo threshold */ 118 static void qs_host_stop(struct ata_host *host); 177 static void __iomem *qs_mmio_base(struct ata_host *host) qs_mmio_base() argument 179 return host->iomap[QS_MMIO_BAR]; qs_mmio_base() 189 u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000); qs_enter_reg_mode() 199 u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000); qs_reset_channel_logic() 208 u8 __iomem *mmio_base = qs_mmio_base(ap->host); qs_freeze() 210 writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */ qs_freeze() 216 u8 __iomem *mmio_base = qs_mmio_base(ap->host); qs_thaw() 219 writeb(1, mmio_base + QS_HCT_CTRL); /* enable host interrupts */ qs_thaw() 300 /* host control block (HCB) */ qs_qc_prep() 319 u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000); qs_packet_start() 371 static inline unsigned int qs_intr_pkt(struct ata_host *host) qs_intr_pkt() argument 375 u8 __iomem *mmio_base = qs_mmio_base(host); qs_intr_pkt() 385 u8 sHST = sff1 & 0x3f; /* host status */ qs_intr_pkt() 387 struct ata_port *ap = host->ports[port_no]; qs_intr_pkt() 413 static inline unsigned int qs_intr_mmio(struct ata_host *host) qs_intr_mmio() argument 417 for (port_no = 0; port_no < host->n_ports; ++port_no) { qs_intr_mmio() 418 struct ata_port *ap = host->ports[port_no]; qs_intr_mmio() 448 struct ata_host *host = dev_instance; qs_intr() local 454 spin_lock_irqsave(&host->lock, flags); qs_intr() 455 handled = qs_intr_pkt(host) | qs_intr_mmio(host); qs_intr() 456 spin_unlock_irqrestore(&host->lock, flags); qs_intr() 483 struct device *dev = ap->host->dev; qs_port_start() 485 void __iomem *mmio_base = qs_mmio_base(ap->host); qs_port_start() 506 static void qs_host_stop(struct ata_host *host) qs_host_stop() argument 508 void __iomem *mmio_base = qs_mmio_base(host); qs_host_stop() 510 writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */ qs_host_stop() 514 static void qs_host_init(struct ata_host *host, unsigned int chip_id) qs_host_init() argument 516 void __iomem *mmio_base = host->iomap[QS_MMIO_BAR]; qs_host_init() 519 writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */ qs_host_init() 523 for (port_no = 0; port_no < host->n_ports; ++port_no) { qs_host_init() 531 for (port_no = 0; port_no < host->n_ports; ++port_no) { qs_host_init() 541 writeb(1, mmio_base + QS_HCT_CTRL); /* enable host interrupts */ qs_host_init() 591 struct ata_host *host; qs_ata_init_one() local 596 /* alloc host */ qs_ata_init_one() 597 host = ata_host_alloc_pinfo(&pdev->dev, ppi, QS_PORTS); qs_ata_init_one() 598 if (!host) qs_ata_init_one() 601 /* acquire resources and fill host */ qs_ata_init_one() 612 host->iomap = pcim_iomap_table(pdev); qs_ata_init_one() 614 rc = qs_set_dma_masks(pdev, host->iomap[QS_MMIO_BAR]); qs_ata_init_one() 618 for (port_no = 0; port_no < host->n_ports; ++port_no) { qs_ata_init_one() 619 struct ata_port *ap = host->ports[port_no]; qs_ata_init_one() 621 void __iomem *chan = host->iomap[QS_MMIO_BAR] + offset; qs_ata_init_one() 630 qs_host_init(host, board_idx); qs_ata_init_one() 633 return ata_host_activate(host, pdev->irq, qs_intr, IRQF_SHARED, qs_ata_init_one()
|
H A D | sata_sx4.c | 224 static unsigned int pdc20621_dimm_init(struct ata_host *host); 225 static int pdc20621_detect_dimm(struct ata_host *host); 226 static unsigned int pdc20621_i2c_read(struct ata_host *host, 228 static int pdc20621_prog_dimm0(struct ata_host *host); 229 static unsigned int pdc20621_prog_dimm_global(struct ata_host *host); 231 static void pdc20621_get_from_dimm(struct ata_host *host, 234 static void pdc20621_put_to_dimm(struct ata_host *host, 301 struct device *dev = ap->host->dev; pdc_port_start() 458 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR]; pdc20621_dma_prep() 459 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR]; pdc20621_dma_prep() 484 * Build ATA, host DMA packets pdc20621_dma_prep() 506 /* force host FIFO dump */ pdc20621_dma_prep() 518 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR]; pdc20621_nodata_prep() 519 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR]; pdc20621_nodata_prep() 541 /* force host FIFO dump */ pdc20621_nodata_prep() 568 struct ata_host *host = ap->host; __pdc20621_push_hdma() local 569 void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; __pdc20621_push_hdma() 586 struct pdc_host_priv *pp = ap->host->private_data; pdc20621_push_hdma() 604 struct pdc_host_priv *pp = ap->host->private_data; pdc20621_pop_hdma() 623 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR]; pdc20621_dump_hdma() 640 struct ata_host *host = ap->host; pdc20621_packet_start() local 642 void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; pdc20621_packet_start() 718 /* step two - DMA from DIMM to host */ pdc20621_host_intr() 743 /* step one - DMA from host to DIMM */ pdc20621_host_intr() 791 struct ata_host *host = dev_instance; pdc20621_interrupt() local 800 if (!host || !host->iomap[PDC_MMIO_BAR]) { pdc20621_interrupt() 805 mmio_base = host->iomap[PDC_MMIO_BAR]; pdc20621_interrupt() 822 spin_lock(&host->lock); pdc20621_interrupt() 828 if (port_no >= host->n_ports) pdc20621_interrupt() 831 ap = host->ports[port_no]; pdc20621_interrupt() 844 spin_unlock(&host->lock); pdc20621_interrupt() 997 static void pdc20621_get_from_dimm(struct ata_host *host, void *psource, pdc20621_get_from_dimm() argument 1004 void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; pdc20621_get_from_dimm() 1005 void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR]; pdc20621_get_from_dimm() 1049 static void pdc20621_put_to_dimm(struct ata_host *host, void *psource, pdc20621_put_to_dimm() argument 1056 void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; pdc20621_put_to_dimm() 1057 void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR]; pdc20621_put_to_dimm() 1099 static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device, pdc20621_i2c_read() argument 1102 void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; pdc20621_i2c_read() 1135 static int pdc20621_detect_dimm(struct ata_host *host) pdc20621_detect_dimm() argument 1138 if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, pdc20621_detect_dimm() 1145 if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) { pdc20621_detect_dimm() 1155 static int pdc20621_prog_dimm0(struct ata_host *host) pdc20621_prog_dimm0() argument 1161 void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; pdc20621_prog_dimm0() 1184 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, pdc20621_prog_dimm0() 1220 static unsigned int pdc20621_prog_dimm_global(struct ata_host *host) pdc20621_prog_dimm_global() argument 1224 void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; pdc20621_prog_dimm_global() 1241 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, pdc20621_prog_dimm_global() 1268 static unsigned int pdc20621_dimm_init(struct ata_host *host) pdc20621_dimm_init() argument 1277 void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; pdc20621_dimm_init() 1336 if (!(speed = pdc20621_detect_dimm(host))) { pdc20621_dimm_init() 1343 size = pdc20621_prog_dimm0(host); pdc20621_dimm_init() 1347 if (pdc20621_prog_dimm_global(host)) { pdc20621_dimm_init() 1362 pdc20621_put_to_dimm(host, test_parttern2, 0x10040, 40); pdc20621_dimm_init() 1363 pdc20621_put_to_dimm(host, test_parttern2, 0x40, 40); pdc20621_dimm_init() 1365 pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40); pdc20621_dimm_init() 1366 pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40); pdc20621_dimm_init() 1369 pdc20621_get_from_dimm(host, test_parttern2, 0x10040, pdc20621_dimm_init() 1374 pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40); pdc20621_dimm_init() 1375 pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40); pdc20621_dimm_init() 1383 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, pdc20621_dimm_init() 1392 pdc20621_put_to_dimm(host, buf, addr, pdc20621_dimm_init() 1403 static void pdc_20621_init(struct ata_host *host) pdc_20621_init() argument 1406 void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; pdc_20621_init() 1439 struct ata_host *host; pdc_sata_init_one() local 1445 /* allocate host */ pdc_sata_init_one() 1446 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4); pdc_sata_init_one() 1448 if (!host || !hpriv) pdc_sata_init_one() 1451 host->private_data = hpriv; pdc_sata_init_one() 1453 /* acquire resources and fill host */ pdc_sata_init_one() 1464 host->iomap = pcim_iomap_table(pdev); pdc_sata_init_one() 1467 struct ata_port *ap = host->ports[i]; pdc_sata_init_one() 1468 void __iomem *base = host->iomap[PDC_MMIO_BAR] + PDC_CHIP0_OFS; pdc_sata_init_one() 1486 if (pdc20621_dimm_init(host)) pdc_sata_init_one() 1488 pdc_20621_init(host); pdc_sata_init_one() 1491 return ata_host_activate(host, pdev->irq, pdc20621_interrupt, pdc_sata_init_one()
|
H A D | pata_sil680.c | 86 struct pci_dev *pdev = to_pci_dev(ap->host->dev); sil680_cable_detect() 118 struct pci_dev *pdev = to_pci_dev(ap->host->dev); sil680_set_piomode() 166 struct pci_dev *pdev = to_pci_dev(ap->host->dev); sil680_set_dmamode() 200 * sil680_sff_exec_command - issue ATA command to host controller 209 * spin_lock_irqsave(host lock) 221 struct pci_dev *pdev = to_pci_dev(ap->host->dev); sil680_sff_irq_check() 342 struct ata_host *host; sil680_init_one() local 370 /* Allocate host and set it up */ sil680_init_one() 371 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2); sil680_init_one() 372 if (!host) sil680_init_one() 374 host->iomap = pcim_iomap_table(pdev); sil680_init_one() 386 mmio_base = host->iomap[SIL680_MMIO_BAR]; sil680_init_one() 387 host->ports[0]->ioaddr.bmdma_addr = mmio_base + 0x00; sil680_init_one() 388 host->ports[0]->ioaddr.cmd_addr = mmio_base + 0x80; sil680_init_one() 389 host->ports[0]->ioaddr.ctl_addr = mmio_base + 0x8a; sil680_init_one() 390 host->ports[0]->ioaddr.altstatus_addr = mmio_base + 0x8a; sil680_init_one() 391 ata_sff_std_ports(&host->ports[0]->ioaddr); sil680_init_one() 392 host->ports[1]->ioaddr.bmdma_addr = mmio_base + 0x08; sil680_init_one() 393 host->ports[1]->ioaddr.cmd_addr = mmio_base + 0xc0; sil680_init_one() 394 host->ports[1]->ioaddr.ctl_addr = mmio_base + 0xca; sil680_init_one() 395 host->ports[1]->ioaddr.altstatus_addr = mmio_base + 0xca; sil680_init_one() 396 ata_sff_std_ports(&host->ports[1]->ioaddr); sil680_init_one() 399 return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, sil680_init_one() 409 struct ata_host *host = pci_get_drvdata(pdev); sil680_reinit_one() local 416 ata_host_resume(host); sil680_reinit_one()
|
H A D | libahci_platform.c | 30 static void ahci_host_stop(struct ata_host *host); 40 * @hpriv: host private area to store config values 77 * @hpriv: host private area to store config values 93 * @hpriv: host private area to store config values 122 * @hpriv: host private area to store config values 139 * @hpriv: host private area to store config values 175 * @hpriv: host private area to store config values 192 * @hpriv: host private area to store config values 236 * @hpriv: host private area to store config values 491 * ahci_platform_init_host - Bring up an ahci-platform host 492 * @pdev: platform device pointer for the host 493 * @hpriv: ahci-host private data for the host 498 * ahci-platform host, note any necessary resources (ie clks, phys, etc.) 512 struct ata_host *host; ahci_platform_init_host() local 521 /* prepare host */ ahci_platform_init_host() 541 host = ata_host_alloc_pinfo(dev, ppi, n_ports); ahci_platform_init_host() 542 if (!host) ahci_platform_init_host() 545 host->private_data = hpriv; ahci_platform_init_host() 548 host->flags |= ATA_HOST_PARALLEL_SCAN; ahci_platform_init_host() 553 ahci_reset_em(host); ahci_platform_init_host() 555 for (i = 0; i < host->n_ports; i++) { ahci_platform_init_host() 556 struct ata_port *ap = host->ports[i]; ahci_platform_init_host() 584 rc = ahci_reset_controller(host); ahci_platform_init_host() 588 ahci_init_controller(host); ahci_platform_init_host() 589 ahci_print_info(host, "platform"); ahci_platform_init_host() 591 return ahci_host_activate(host, irq, sht); ahci_platform_init_host() 595 static void ahci_host_stop(struct ata_host *host) ahci_host_stop() argument 597 struct ahci_host_priv *hpriv = host->private_data; ahci_host_stop() 604 * ahci_platform_suspend_host - Suspend an ahci-platform host 605 * @dev: device pointer for the host 608 * ahci-platform host, note any necessary resources (ie clks, phys, etc.) 616 struct ata_host *host = dev_get_drvdata(dev); ahci_platform_suspend_host() local 617 struct ahci_host_priv *hpriv = host->private_data; ahci_platform_suspend_host() 636 return ata_host_suspend(host, PMSG_SUSPEND); ahci_platform_suspend_host() 641 * ahci_platform_resume_host - Resume an ahci-platform host 642 * @dev: device pointer for the host 645 * host, note any necessary resources (ie clks, phys, etc.) must be 653 struct ata_host *host = dev_get_drvdata(dev); ahci_platform_resume_host() local 657 rc = ahci_reset_controller(host); ahci_platform_resume_host() 661 ahci_init_controller(host); ahci_platform_resume_host() 664 ata_host_resume(host); ahci_platform_resume_host() 674 * This function suspends the host associated with the device, followed by 682 struct ata_host *host = dev_get_drvdata(dev); ahci_platform_suspend() local 683 struct ahci_host_priv *hpriv = host->private_data; ahci_platform_suspend() 701 * resuming the host associated with the device. 708 struct ata_host *host = dev_get_drvdata(dev); ahci_platform_resume() local 709 struct ahci_host_priv *hpriv = host->private_data; ahci_platform_resume()
|
H A D | pata_arasan_cf.c | 4 * Arasan Compact Flash host controller source file 187 struct ata_host *host; member in struct:arasan_cf_dev 228 struct device *dev = acdev->host->dev; cf_dumpregs() 284 struct ata_port *ap = acdev->host->ports[0]; cf_card_detect() 308 struct arasan_cf_pdata *pdata = dev_get_platdata(acdev->host->dev); cf_init() 315 dev_dbg(acdev->host->dev, "clock enable failed"); cf_init() 321 dev_warn(acdev->host->dev, "clock set rate failed"); cf_init() 326 spin_lock_irqsave(&acdev->host->lock, flags); cf_init() 338 spin_unlock_irqrestore(&acdev->host->lock, flags); cf_init() 347 spin_lock_irqsave(&acdev->host->lock, flags); cf_exit() 353 spin_unlock_irqrestore(&acdev->host->lock, flags); cf_exit() 370 ata_sff_interrupt(acdev->irq, acdev->host); dma_complete() 372 spin_lock_irqsave(&acdev->host->lock, flags); dma_complete() 375 spin_unlock_irqrestore(&acdev->host->lock, flags); dma_complete() 383 dev_err(acdev->host->dev, "%s TimeOut", rw ? "write" : "read"); wait4buf() 405 dev_err(acdev->host->dev, "device_prep_dma_memcpy failed\n"); dma_xfer() 415 dev_err(acdev->host->dev, "dma_submit_error\n"); dma_xfer() 424 dev_err(acdev->host->dev, "wait_for_completion_timeout\n"); dma_xfer() 457 spin_lock_irqsave(&acdev->host->lock, flags); sg_xfer() 462 spin_unlock_irqrestore(&acdev->host->lock, flags); sg_xfer() 477 dev_err(acdev->host->dev, "dma failed"); sg_xfer() 499 spin_lock_irqsave(&acdev->host->lock, flags); sg_xfer() 502 spin_unlock_irqrestore(&acdev->host->lock, flags); sg_xfer() 530 acdev->dma_chan = dma_request_slave_channel(acdev->host->dev, "data"); data_xfer() 532 dev_err(acdev->host->dev, "Unable to get dma_chan\n"); data_xfer() 548 spin_lock_irqsave(&acdev->host->lock, flags); data_xfer() 550 spin_unlock_irqrestore(&acdev->host->lock, flags); data_xfer() 562 spin_lock_irqsave(&acdev->host->lock, flags); data_xfer() 581 spin_lock_irqsave(&acdev->host->lock, flags); delayed_finish() 583 spin_unlock_irqrestore(&acdev->host->lock, flags); delayed_finish() 601 spin_lock_irqsave(&acdev->host->lock, flags); arasan_cf_interrupt() 611 spin_unlock_irqrestore(&acdev->host->lock, flags); arasan_cf_interrupt() 619 spin_unlock_irqrestore(&acdev->host->lock, flags); arasan_cf_interrupt() 621 dev_err(acdev->host->dev, "pio xfer err irq\n"); arasan_cf_interrupt() 625 spin_unlock_irqrestore(&acdev->host->lock, flags); arasan_cf_interrupt() 645 struct arasan_cf_dev *acdev = ap->host->private_data; arasan_cf_freeze() 659 struct arasan_cf_dev *acdev = ap->host->private_data; arasan_cf_error_handler() 689 struct arasan_cf_dev *acdev = ap->host->private_data; arasan_cf_qc_issue() 722 struct arasan_cf_dev *acdev = ap->host->private_data; arasan_cf_set_piomode() 733 spin_lock_irqsave(&acdev->host->lock, flags); arasan_cf_set_piomode() 743 spin_unlock_irqrestore(&acdev->host->lock, flags); arasan_cf_set_piomode() 748 struct arasan_cf_dev *acdev = ap->host->private_data; arasan_cf_set_dmamode() 752 spin_lock_irqsave(&acdev->host->lock, flags); arasan_cf_set_dmamode() 768 spin_unlock_irqrestore(&acdev->host->lock, flags); arasan_cf_set_dmamode() 778 spin_unlock_irqrestore(&acdev->host->lock, flags); arasan_cf_set_dmamode() 794 struct ata_host *host; arasan_cf_probe() local 843 /* allocate host */ arasan_cf_probe() 844 host = ata_host_alloc(&pdev->dev, 1); arasan_cf_probe() 845 if (!host) { arasan_cf_probe() 847 dev_warn(&pdev->dev, "alloc host fail\n"); arasan_cf_probe() 851 ap = host->ports[0]; arasan_cf_probe() 852 host->private_data = acdev; arasan_cf_probe() 853 acdev->host = host; arasan_cf_probe() 901 ret = ata_host_activate(host, acdev->irq, irq_handler, 0, arasan_cf_probe() 914 struct ata_host *host = platform_get_drvdata(pdev); arasan_cf_remove() local 915 struct arasan_cf_dev *acdev = host->ports[0]->private_data; arasan_cf_remove() 917 ata_host_detach(host); arasan_cf_remove() 927 struct ata_host *host = dev_get_drvdata(dev); arasan_cf_suspend() local 928 struct arasan_cf_dev *acdev = host->ports[0]->private_data; arasan_cf_suspend() 934 return ata_host_suspend(host, PMSG_SUSPEND); arasan_cf_suspend() 939 struct ata_host *host = dev_get_drvdata(dev); arasan_cf_resume() local 940 struct arasan_cf_dev *acdev = host->ports[0]->private_data; arasan_cf_resume() 943 ata_host_resume(host); arasan_cf_resume()
|
H A D | sata_uli.c | 105 struct uli_priv *hpriv = ap->host->private_data; get_scr_cfg_addr() 111 struct pci_dev *pdev = to_pci_dev(link->ap->host->dev); uli_scr_cfg_read() 121 struct pci_dev *pdev = to_pci_dev(link->ap->host->dev); uli_scr_cfg_write() 149 struct ata_host *host; uli_init_one() local 165 /* allocate the host */ uli_init_one() 166 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); uli_init_one() 167 if (!host) uli_init_one() 173 host->private_data = hpriv; uli_init_one() 176 rc = ata_pci_sff_init_host(host); uli_init_one() 180 ata_pci_bmdma_init(host); uli_init_one() 182 iomap = host->iomap; uli_init_one() 192 ioaddr = &host->ports[2]->ioaddr; uli_init_one() 201 ata_port_desc(host->ports[2], uli_init_one() 207 ioaddr = &host->ports[3]->ioaddr; uli_init_one() 216 ata_port_desc(host->ports[2], uli_init_one() 241 return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, uli_init_one()
|
/linux-4.1.27/drivers/net/wireless/rsi/ |
H A D | rsi_91x_sdio.c | 65 return mmc_wait_for_cmd(card->host, &io_cmd, 0); rsi_cmd52writebyte() 90 err = mmc_wait_for_cmd(card->host, &io_cmd, 0); rsi_cmd52readbyte() 113 struct mmc_host *host; rsi_issue_sdiocommand() local 116 host = func->card->host; rsi_issue_sdiocommand() 122 err = mmc_wait_for_cmd(host, &cmd, 3); rsi_issue_sdiocommand() 157 struct mmc_host *host = card->host; rsi_reset_card() local 158 s32 bit = (fls(host->ocr_avail) - 1); rsi_reset_card() 169 * Hence expect a timeout status from host controller rsi_reset_card() 178 host->ios.vdd = bit; rsi_reset_card() 179 host->ios.chip_select = MMC_CS_DONTCARE; rsi_reset_card() 180 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; rsi_reset_card() 181 host->ios.power_mode = MMC_POWER_UP; rsi_reset_card() 182 host->ios.bus_width = MMC_BUS_WIDTH_1; rsi_reset_card() 183 host->ios.timing = MMC_TIMING_LEGACY; rsi_reset_card() 184 host->ops->set_ios(host, &host->ios); rsi_reset_card() 192 host->ios.clock = host->f_min; rsi_reset_card() 193 host->ios.power_mode = MMC_POWER_ON; rsi_reset_card() 194 host->ops->set_ios(host, &host->ios); rsi_reset_card() 203 host->ios.chip_select = MMC_CS_HIGH; rsi_reset_card() 204 host->ops->set_ios(host, &host->ios); rsi_reset_card() 211 host->ios.chip_select = MMC_CS_DONTCARE; rsi_reset_card() 212 host->ops->set_ios(host, &host->ios); rsi_reset_card() 214 host->use_spi_crc = 0; rsi_reset_card() 219 if (!host->ocr_avail) { rsi_reset_card() 229 host->ocr_avail = resp; rsi_reset_card() 236 host->ocr_avail, rsi_reset_card() 267 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; rsi_reset_card() 268 host->ops->set_ios(host, &host->ios); rsi_reset_card() 282 if (card->host->caps & MMC_CAP_SD_HIGHSPEED) { rsi_reset_card() 298 host->ios.timing = MMC_TIMING_SD_HS; rsi_reset_card() 299 host->ops->set_ios(host, &host->ios); rsi_reset_card() 309 if (clock > host->f_max) rsi_reset_card() 310 clock = host->f_max; rsi_reset_card() 312 host->ios.clock = clock; rsi_reset_card() 313 host->ops->set_ios(host, &host->ios); rsi_reset_card() 315 if (card->host->caps & MMC_CAP_4_BIT_DATA) { rsi_reset_card() 326 host->ios.bus_width = MMC_BUS_WIDTH_4; rsi_reset_card() 327 host->ops->set_ios(host, &host->ios); rsi_reset_card() 342 struct mmc_host *host = dev->pfunction->card->host; rsi_setclock() local 346 if (clock > host->f_max) rsi_setclock() 347 clock = host->f_max; rsi_setclock() 348 host->ios.clock = clock; rsi_setclock() 349 host->ops->set_ios(host, &host->ios); rsi_setclock() 353 * rsi_setblocklength() - This function sets the host block length.
|
/linux-4.1.27/drivers/net/appletalk/ |
H A D | ltpc.h | 26 unsigned short addr; /* host order */ 27 unsigned short length; /* host order */ 52 unsigned short length; /* host order */ 60 unsigned short length; /* host order */
|
/linux-4.1.27/arch/x86/um/os-Linux/ |
H A D | tls.c | 18 /* Checks whether host supports TLS, and sets *tls_min according to the value 19 * valid on the host. 20 * i386 host have it == 6; x86_64 host have it == 12, for i386 emulation. */ check_host_supports_tls()
|
/linux-4.1.27/arch/um/include/shared/ |
H A D | elf_user.h | 10 /* For compilation on a host that doesn't support AT_SYSINFO (Linux 2.4) */
|
/linux-4.1.27/include/trace/events/ |
H A D | filemap.h | 29 __entry->i_ino = page->mapping->host->i_ino; 31 if (page->mapping->host->i_sb) 32 __entry->s_dev = page->mapping->host->i_sb->s_dev; 34 __entry->s_dev = page->mapping->host->i_rdev;
|
/linux-4.1.27/drivers/scsi/bnx2fc/ |
H A D | bnx2fc_debug.c | 30 io_req->port->lport->host) BNX2FC_IO_DBG() 31 shost_printk(KERN_INFO, io_req->port->lport->host, BNX2FC_IO_DBG() 53 if (tgt && tgt->port && tgt->port->lport && tgt->port->lport->host && BNX2FC_TGT_DBG() 55 shost_printk(KERN_INFO, tgt->port->lport->host, BNX2FC_TGT_DBG() 77 if (lport && lport->host) BNX2FC_HBA_DBG() 78 shost_printk(KERN_INFO, lport->host, PFX "%pV", &vaf); BNX2FC_HBA_DBG()
|
/linux-4.1.27/drivers/scsi/libfc/ |
H A D | fc_libfc.h | 48 pr_info("host%u: lport %6.6x: " fmt, \ 49 (lport)->host->host_no, \ 54 pr_info("host%u: disc: " fmt, \ 55 fc_disc_lport(disc)->host->host_no, \ 60 pr_info("host%u: rport %6.6x: " fmt, \ 61 (lport)->host->host_no, \ 73 pr_info("host%u: fcp: %6.6x: " \ 75 (pkt)->lp->host->host_no, \ 79 pr_info("host%u: fcp: %6.6x: " fmt, \ 80 (pkt)->lp->host->host_no, \ 87 pr_info("host%u: xid %4x: " fmt, \ 88 (exch)->lp->host->host_no, \ 93 pr_info("host%u: scsi: " fmt, \ 94 (lport)->host->host_no, ##args))
|
/linux-4.1.27/drivers/scsi/fnic/ |
H A D | fcpio.h | 25 * communication by the host driver to the fcp firmware. 29 * Exchange and sequence id space allocated to the host driver 117 * The header command tag. All host requests will use the "tag" field 119 * a host request, it will copy the tag field into the response. 123 * requests. These two requests do not have corresponding host requests 164 * host driver 199 * fcpio_icmnd_16: host -> firmware request 247 * fcpio_icmnd_32: host -> firmware request 275 * fcpio_itmf: host -> firmware request 307 * fcpio_tdata: host -> firmware request 327 * fcpio_txrdy: host -> firmware request 342 * fcpio_trsp: host -> firmware request 365 * fcpio_ttmf_ack: host -> firmware response 367 * used by the host to indicate to the firmware it has received and processed 377 * fcpio_tabort: host -> firmware request 379 * used by the host to request the firmware to abort a target request that was 387 * fcpio_reset: host -> firmware request 389 * used by the host to signal a reset of the driver to the firmware 402 * fcpio_flogi_reg: host -> firmware request 405 * used by the host to notify the firmware of the lif's s_id 418 * fcpio_echo: host -> firmware request 427 * fcpio_lunmap_req: host -> firmware request 438 * fcpio_flogi_fip_reg: host -> firmware request 441 * used by the host to notify the firmware of the lif's s_id 456 * Basic structure for all fcpio structures that are sent from the host to the 459 #define FCPIO_HOST_REQ_LEN 128 /* expected length of host requests */ 471 * Initiator host requests 478 * Target host requests 498 * fcpio_icmnd_cmpl: firmware -> host response 500 * used for sending the host a response to an initiator command 517 * fcpio_itmf_cmpl: firmware -> host response 519 * used for sending the host a response for a itmf request 526 * fcpio_tcmnd_16: firmware -> host request 528 * used by the firmware to notify the host of an incoming target SCSI 16-Byte 559 * fcpio_tcmnd_32: firmware -> host request 561 * used by the firmware to notify the host of an incoming target SCSI 32-Byte 577 * fcpio_tdrsp_cmpl: firmware -> host response 579 * used by the firmware to notify the host of a response to a host target 588 * fcpio_ttmf: firmware -> host request 590 * used by the firmware to notify the host of an incoming task management 612 * fcpio_tabort_cmpl: firmware -> host response 614 * used by the firmware to respond to a host's tabort request 622 * fcpio_ack: firmware -> host response 624 * used by firmware to notify the host of the last work request received 627 u16 request_out; /* last host entry received */ 632 * fcpio_reset_cmpl: firmware -> host response 634 * use by firmware to respond to the host's reset request 641 * fcpio_flogi_reg_cmpl: firmware -> host response 651 * fcpio_echo_cmpl: firmware -> host response 660 * fcpio_lunmap_chng: firmware -> host notification 663 * notifies the host that the lunmap tables have changed 670 * fcpio_lunmap_req_cmpl: firmware -> host response 673 * response for lunmap table request from the host 681 * the host. They are 64 bytes per structure.
|
H A D | fnic_main.c | 188 static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host) fnic_get_stats() argument 191 struct fc_lport *lp = shost_priv(host); fnic_get_stats() 206 FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host, fnic_get_stats() 231 void fnic_dump_fchost_stats(struct Scsi_Host *host, fnic_dump_fchost_stats() argument 234 FNIC_MAIN_NOTE(KERN_NOTICE, host, fnic_dump_fchost_stats() 237 FNIC_MAIN_NOTE(KERN_NOTICE, host, fnic_dump_fchost_stats() 240 FNIC_MAIN_NOTE(KERN_NOTICE, host, fnic_dump_fchost_stats() 243 FNIC_MAIN_NOTE(KERN_NOTICE, host, fnic_dump_fchost_stats() 246 FNIC_MAIN_NOTE(KERN_NOTICE, host, fnic_dump_fchost_stats() 249 FNIC_MAIN_NOTE(KERN_NOTICE, host, fnic_dump_fchost_stats() 252 FNIC_MAIN_NOTE(KERN_NOTICE, host, fnic_dump_fchost_stats() 255 FNIC_MAIN_NOTE(KERN_NOTICE, host, fnic_dump_fchost_stats() 258 FNIC_MAIN_NOTE(KERN_NOTICE, host, fnic_dump_fchost_stats() 261 FNIC_MAIN_NOTE(KERN_NOTICE, host, fnic_dump_fchost_stats() 264 FNIC_MAIN_NOTE(KERN_NOTICE, host, fnic_dump_fchost_stats() 267 FNIC_MAIN_NOTE(KERN_NOTICE, host, fnic_dump_fchost_stats() 270 FNIC_MAIN_NOTE(KERN_NOTICE, host, fnic_dump_fchost_stats() 273 FNIC_MAIN_NOTE(KERN_NOTICE, host, fnic_dump_fchost_stats() 276 FNIC_MAIN_NOTE(KERN_NOTICE, host, fnic_dump_fchost_stats() 279 FNIC_MAIN_NOTE(KERN_NOTICE, host, fnic_dump_fchost_stats() 282 FNIC_MAIN_NOTE(KERN_NOTICE, host, fnic_dump_fchost_stats() 285 FNIC_MAIN_NOTE(KERN_NOTICE, host, fnic_dump_fchost_stats() 288 FNIC_MAIN_NOTE(KERN_NOTICE, host, fnic_dump_fchost_stats() 291 FNIC_MAIN_NOTE(KERN_NOTICE, host, fnic_dump_fchost_stats() 298 * fnic_reset_host_stats : clears host stats 301 static void fnic_reset_host_stats(struct Scsi_Host *host) fnic_reset_host_stats() argument 304 struct fc_lport *lp = shost_priv(host); fnic_reset_host_stats() 310 stats = fnic_get_stats(host); fnic_reset_host_stats() 311 fnic_dump_fchost_stats(host, stats); fnic_reset_host_stats() 318 FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host, fnic_reset_host_stats() 337 shost_printk(KERN_ERR, fnic->lport->host, fnic_log_q_error() 345 shost_printk(KERN_ERR, fnic->lport->host, fnic_log_q_error() 353 shost_printk(KERN_ERR, fnic->lport->host, fnic_log_q_error() 389 shost_printk(KERN_ERR, fnic->lport->host, fnic_notify_set() 544 struct Scsi_Host *host; fnic_probe() local 553 * Allocate SCSI Host and set up association between host, fnic_probe() 562 host = lp->host; fnic_probe() 568 host->host_no); fnic_probe() 570 host->transportt = fnic_fc_transport; fnic_probe() 574 shost_printk(KERN_ERR, fnic->lport->host, fnic_probe() 586 shost_printk(KERN_ERR, fnic->lport->host, fnic_probe() 593 shost_printk(KERN_ERR, fnic->lport->host, fnic_probe() 608 shost_printk(KERN_ERR, fnic->lport->host, fnic_probe() 615 shost_printk(KERN_ERR, fnic->lport->host, fnic_probe() 623 shost_printk(KERN_ERR, fnic->lport->host, fnic_probe() 632 shost_printk(KERN_ERR, fnic->lport->host, fnic_probe() 643 shost_printk(KERN_ERR, fnic->lport->host, fnic_probe() 652 shost_printk(KERN_ERR, fnic->lport->host, fnic_probe() 662 shost_printk(KERN_ERR, fnic->lport->host, fnic_probe() 669 shost_printk(KERN_ERR, fnic->lport->host, fnic_probe() 676 shost_printk(KERN_ERR, fnic->lport->host, fnic_probe() 686 shost_printk(KERN_ERR, fnic->lport->host, fnic_probe() 694 host->can_queue = min_t(u32, FNIC_MAX_IO_REQ, fnic_probe() 698 fnic->fnic_max_tag_id = host->can_queue; fnic_probe() 700 err = scsi_init_shared_tag_map(host, fnic->fnic_max_tag_id); fnic_probe() 702 shost_printk(KERN_ERR, fnic->lport->host, fnic_probe() 707 host->max_lun = fnic->config.luns_per_tgt; fnic_probe() 708 host->max_id = FNIC_MAX_FCP_TARGET; fnic_probe() 709 host->max_cmd_len = FCOE_MAX_CMD_LEN; fnic_probe() 715 shost_printk(KERN_ERR, fnic->lport->host, fnic_probe() 723 shost_printk(KERN_ERR, fnic->lport->host, fnic_probe() 769 shost_printk(KERN_INFO, fnic->lport->host, fnic_probe() 786 shost_printk(KERN_INFO, fnic->lport->host, fnic_probe() 802 shost_printk(KERN_ERR, fnic->lport->host, fnic_probe() 816 shost_printk(KERN_ERR, fnic->lport->host, fnic_probe() 825 * Add host to SCSI fnic_probe() 827 err = scsi_add_host(lp->host, &pdev->dev); fnic_probe() 829 shost_printk(KERN_ERR, fnic->lport->host, fnic_probe() 870 fc_host_maxframe_size(lp->host) = lp->mfs; fnic_probe() 871 fc_host_dev_loss_tmo(lp->host) = fnic->config.port_down_timeout / 1000; fnic_probe() 873 sprintf(fc_host_symbolic_name(lp->host), fnic_probe() 899 shost_printk(KERN_ERR, fnic->lport->host, fnic_probe() 914 fc_remove_host(lp->host); fnic_probe() 915 scsi_remove_host(lp->host); fnic_probe() 942 scsi_host_put(lp->host); fnic_probe() 1010 fc_remove_host(fnic->lport->host); fnic_remove() 1011 scsi_remove_host(fnic->lport->host); fnic_remove() 1022 scsi_host_put(lp->host); fnic_remove()
|
/linux-4.1.27/drivers/ide/ |
H A D | rapide.c | 35 struct ide_host *host; rapide_probe() local 53 ret = ide_host_add(&rapide_port_info, hws, 1, &host); rapide_probe() 57 ecard_set_drvdata(ec, host); rapide_probe() 68 struct ide_host *host = ecard_get_drvdata(ec); rapide_remove() local 72 ide_host_remove(host); rapide_remove()
|
/linux-4.1.27/drivers/misc/vmw_vmci/ |
H A D | vmci_route.c | 71 /* And we cannot send if the source is the host context. */ vmci_route() 92 /* Anywhere to local client on host. */ vmci_route() 96 * guest, then we need to send it down to the host. vmci_route() 97 * Note that if we are also acting as a host then this vmci_route() 100 * way to remove any ambiguity from the host context. vmci_route() 105 * host local communication. The hypervisor vmci_route() 106 * may send vmci event datagrams to the host vmci_route() 108 * an "outer host" through the guest device. vmci_route() 124 /* Send it from local client down to the host. */ vmci_route() 131 * it is destined for a local client on this host, or vmci_route() 132 * it is from another local client on this host. We vmci_route() 133 * must be acting as a host to service it. vmci_route() 142 * host context. vmci_route() 156 * If we are acting as a host then this might be destined for vmci_route() 166 * Otherwise we can use the host vmci_route() 179 * communication destined for the host vmci_route() 192 * The host is attempting to reach a CID vmci_route() 204 * we need to send it down to the host. We do not filter out VM to vmci_route() 210 * Ending up here means we have neither guest nor host vmci_route() 221 * Send it from local client down to the host, which will vmci_route()
|
/linux-4.1.27/fs/ubifs/ |
H A D | xattr.c | 35 * attribute, the host inode number, and the extended attribute inode number. 89 * @host: host inode 95 * and value @value for inode @host. The host inode is also updated on flash 100 static int create_xattr(struct ubifs_info *c, struct inode *host, create_xattr() argument 105 struct ubifs_inode *ui, *host_ui = ubifs_inode(host); create_xattr() 112 host->i_ino, host_ui->xattr_cnt); create_xattr() 124 host->i_ino, names_len, XATTR_LIST_MAX); create_xattr() 132 inode = ubifs_new_inode(c, host, S_IFREG | S_IRWXUGO); create_xattr() 156 host->i_ctime = ubifs_current_time(host); create_xattr() 162 err = ubifs_jnl_update(c, host, nm, inode, 0, 1); create_xattr() 188 * @host: host inode 197 static int change_xattr(struct ubifs_info *c, struct inode *host, change_xattr() argument 201 struct ubifs_inode *host_ui = ubifs_inode(host); change_xattr() 221 host->i_ctime = ubifs_current_time(host); change_xattr() 226 * It is important to write the host inode after the xattr inode change_xattr() 227 * because if the host inode gets synchronized (via 'fsync()'), then change_xattr() 229 * before the host inode in the write-buffer. change_xattr() 231 err = ubifs_jnl_change_xattr(c, inode, host); change_xattr() 302 static int setxattr(struct inode *host, const char *name, const void *value, setxattr() argument 306 struct ubifs_info *c = host->i_sb->s_fs_info; setxattr() 312 ubifs_assert(mutex_is_locked(&host->i_mutex)); setxattr() 329 xent_key_init(c, &key, host->i_ino, &nm); setxattr() 339 err = create_xattr(c, host, &nm, value, size); setxattr() 355 err = change_xattr(c, host, inode, value, size); setxattr() 366 dbg_gen("xattr '%s', host ino %lu ('%pd'), size %zd", ubifs_setxattr() 375 struct inode *inode, *host = d_inode(dentry); ubifs_getxattr() local 376 struct ubifs_info *c = host->i_sb->s_fs_info; ubifs_getxattr() 384 host->i_ino, dentry, size); ubifs_getxattr() 394 xent_key_init(c, &key, host->i_ino, &nm); ubifs_getxattr() 410 ubifs_assert(ubifs_inode(host)->xattr_size > ui->data_len); ubifs_getxattr() 435 struct inode *host = d_inode(dentry); ubifs_listxattr() local 436 struct ubifs_info *c = host->i_sb->s_fs_info; ubifs_listxattr() 437 struct ubifs_inode *host_ui = ubifs_inode(host); ubifs_listxattr() 442 dbg_gen("ino %lu ('%pd'), buffer size %zd", host->i_ino, ubifs_listxattr() 456 lowest_xent_key(c, &key, host->i_ino); ubifs_listxattr() 496 static int remove_xattr(struct ubifs_info *c, struct inode *host, remove_xattr() argument 500 struct ubifs_inode *host_ui = ubifs_inode(host); remove_xattr() 512 host->i_ctime = ubifs_current_time(host); remove_xattr() 518 err = ubifs_jnl_delete_xattr(c, host, inode, nm); remove_xattr() 538 struct inode *inode, *host = d_inode(dentry); ubifs_removexattr() local 539 struct ubifs_info *c = host->i_sb->s_fs_info; ubifs_removexattr() 546 host->i_ino, dentry); ubifs_removexattr() 547 ubifs_assert(mutex_is_locked(&host->i_mutex)); ubifs_removexattr() 557 xent_key_init(c, &key, host->i_ino, &nm); ubifs_removexattr() 573 err = remove_xattr(c, host, inode, &nm); ubifs_removexattr()
|
/linux-4.1.27/include/linux/usb/ |
H A D | otg.h | 5 * (for either host or peripheral roles) don't use these calls; they 21 struct usb_bus *host; member in struct:usb_otg 26 /* bind/unbind the host controller */ 27 int (*set_host)(struct usb_otg *otg, struct usb_bus *host); 68 otg_set_host(struct usb_otg *otg, struct usb_bus *host) otg_set_host() argument 71 return otg->set_host(otg, host); otg_set_host()
|
/linux-4.1.27/drivers/scsi/fcoe/ |
H A D | libfcoe.h | 24 pr_info("host%d: fip: " fmt, \ 25 (fip)->lp->host->host_no, ##args);)
|
/linux-4.1.27/tools/usb/usbip/src/ |
H A D | usbip_attach.c | 38 " -r, --remote=<host> The machine with exported USB devices\n" 39 " -b, --busid=<busid> Busid of the device on <host>\n"; 47 static int record_connection(char *host, char *port, char *busid, int rhport) record_connection() argument 76 host, port, busid); record_connection() 172 static int attach_device(char *host, char *busid) attach_device() argument 178 sockfd = usbip_net_tcp_connect(host, usbip_port_string); attach_device() 192 rc = record_connection(host, usbip_port_string, busid, rhport); attach_device() 208 char *host = NULL; usbip_attach() local 221 host = optarg; usbip_attach() 231 if (!host || !busid) usbip_attach() 234 ret = attach_device(host, busid); usbip_attach()
|
/linux-4.1.27/drivers/scsi/pcmcia/ |
H A D | nsp_cs.c | 3 NinjaSCSI-3 / NinjaSCSI-32Bi PCMCIA SCSI host adapter card driver 57 MODULE_DESCRIPTION("WorkBit NinjaSCSI-3 / NinjaSCSI-32Bi(16bit) PCMCIA SCSI host adapter module"); 180 nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; nsp_scsi_done() 191 /*unsigned int host_id = SCpnt->device->host->this_id;*/ nsp_queuecommand_lck() 192 /*unsigned int base = SCpnt->device->host->io_port;*/ nsp_queuecommand_lck() 195 nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; nsp_queuecommand_lck() 367 unsigned int host_id = SCpnt->device->host->this_id; nsphw_start_selection() 368 unsigned int base = SCpnt->device->host->io_port; nsphw_start_selection() 370 nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; nsphw_start_selection() 450 nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; nsp_analyze_sdtr() 506 unsigned int base = SCpnt->device->host->io_port; nsp_start_timer() 507 nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; nsp_start_timer() 520 unsigned int base = SCpnt->device->host->io_port; nsp_negate_signal() 549 unsigned int base = SCpnt->device->host->io_port; nsp_expect_signal() 582 unsigned int base = SCpnt->device->host->io_port; nsp_xfer() 583 nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; nsp_xfer() 622 nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; nsp_dataphase_bypass() 654 unsigned int base = SCpnt->device->host->io_port; nsp_reselected() 655 unsigned int host_id = SCpnt->device->host->this_id; nsp_reselected() 656 //nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; nsp_reselected() 693 unsigned int base = SCpnt->device->host->io_port; nsp_fifo_count() 720 unsigned int base = SCpnt->device->host->io_port; nsp_pio_read() 721 unsigned long mmio_base = SCpnt->device->host->base; nsp_pio_read() 722 nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; nsp_pio_read() 819 unsigned int base = SCpnt->device->host->io_port; nsp_pio_write() 820 unsigned long mmio_base = SCpnt->device->host->base; nsp_pio_write() 821 nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; nsp_pio_write() 916 unsigned int base = SCpnt->device->host->io_port; nsp_nexus() 919 nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; nsp_nexus() 968 //nsp_dbg(NSP_DEBUG_INTR, "host=0x%p", ((scsi_info_t *)dev_id)->host); nspintr() 971 ((scsi_info_t *)dev_id)->host != NULL ) { nspintr() 974 data = (nsp_hw_data *)info->host->hostdata; nspintr() 976 nsp_dbg(NSP_DEBUG_INTR, "host data wrong"); nspintr() 1316 struct Scsi_Host *host; /* registered host structure */ nsp_detect() local 1320 host = scsi_host_alloc(&nsp_driver_template, sizeof(nsp_hw_data)); nsp_detect() 1321 if (host == NULL) { nsp_detect() 1322 nsp_dbg(NSP_DEBUG_INIT, "host failed"); nsp_detect() 1326 memcpy(host->hostdata, data_b, sizeof(nsp_hw_data)); nsp_detect() 1327 data = (nsp_hw_data *)host->hostdata; nsp_detect() 1328 data->ScsiInfo->host = host; nsp_detect() 1333 nsp_dbg(NSP_DEBUG_INIT, "irq=%d,%d", data_b->IrqNumber, ((nsp_hw_data *)host->hostdata)->IrqNumber); nsp_detect() 1335 host->unique_id = data->BaseAddress; nsp_detect() 1336 host->io_port = data->BaseAddress; nsp_detect() 1337 host->n_io_port = data->NumAddress; nsp_detect() 1338 host->irq = data->IrqNumber; nsp_detect() 1339 host->base = data->MmioAddress; nsp_detect() 1346 host->io_port, host->io_port + host->n_io_port - 1, nsp_detect() 1347 host->base, nsp_detect() 1348 host->irq); nsp_detect() 1354 return host; /* detect done. */ nsp_detect() 1367 static int nsp_show_info(struct seq_file *m, struct Scsi_Host *host) nsp_show_info() argument 1375 hostno = host->host_no; nsp_show_info() 1376 data = (nsp_hw_data *)host->hostdata; nsp_show_info() 1380 seq_printf(m, "SCSI host No.: %d\n", hostno); nsp_show_info() 1381 seq_printf(m, "IRQ: %d\n", host->irq); nsp_show_info() 1382 seq_printf(m, "IO: 0x%lx-0x%lx\n", host->io_port, host->io_port + host->n_io_port - 1); nsp_show_info() 1383 seq_printf(m, "MMIO(virtual address): 0x%lx-0x%lx\n", host->base, host->base + data->MmioLength - 1); nsp_show_info() 1384 seq_printf(m, "sg_tablesize: %d\n", host->sg_tablesize); nsp_show_info() 1413 if (id == host->this_id) { nsp_show_info() 1414 seq_puts(m, "----- NinjaSCSI-3 host adapter\n"); nsp_show_info() 1482 nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; nsp_eh_bus_reset() 1491 nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; nsp_eh_host_reset() 1582 struct Scsi_Host *host; nsp_cs_config() local 1625 host = nsp_detect(&nsp_driver_template); nsp_cs_config() 1627 if (host == NULL) { nsp_cs_config() 1633 ret = scsi_add_host (host, NULL); nsp_cs_config() 1637 scsi_scan_host(host); nsp_cs_config() 1639 info->host = host; nsp_cs_config() 1656 if (info->host == NULL) { nsp_cs_release() 1659 data = (nsp_hw_data *)info->host->hostdata; nsp_cs_release() 1665 if (info->host != NULL) { nsp_cs_release() 1666 scsi_remove_host(info->host); nsp_cs_release() 1676 if (info->host != NULL) { nsp_cs_release() 1677 scsi_host_put(info->host); nsp_cs_release() 1688 if (info->host != NULL) { nsp_cs_suspend() 1691 data = (nsp_hw_data *)info->host->hostdata; nsp_cs_suspend() 1710 if (info->host != NULL) { nsp_cs_resume() 1711 nsp_msg(KERN_INFO, "reset host and bus"); nsp_cs_resume() 1713 data = (nsp_hw_data *)info->host->hostdata; nsp_cs_resume()
|
/linux-4.1.27/drivers/usb/chipidea/ |
H A D | Makefile | 7 ci_hdrc-$(CONFIG_USB_CHIPIDEA_HOST) += host.o
|
/linux-4.1.27/drivers/usb/host/ |
H A D | xhci-trace.c | 2 * xHCI host controller driver
|