Lines Matching refs:host

225 	struct mmci_host *host = mmc_priv(mmc);  in mmci_card_busy()  local
231 spin_lock_irqsave(&host->lock, flags); in mmci_card_busy()
232 if (readl(host->base + MMCISTATUS) & MCI_ST_CARDBUSY) in mmci_card_busy()
234 spin_unlock_irqrestore(&host->lock, flags); in mmci_card_busy()
245 static int mmci_validate_data(struct mmci_host *host, in mmci_validate_data() argument
252 dev_err(mmc_dev(host->mmc), in mmci_validate_data()
260 static void mmci_reg_delay(struct mmci_host *host) in mmci_reg_delay() argument
269 if (host->cclk < 25000000) in mmci_reg_delay()
278 static void mmci_write_clkreg(struct mmci_host *host, u32 clk) in mmci_write_clkreg() argument
280 if (host->clk_reg != clk) { in mmci_write_clkreg()
281 host->clk_reg = clk; in mmci_write_clkreg()
282 writel(clk, host->base + MMCICLOCK); in mmci_write_clkreg()
289 static void mmci_write_pwrreg(struct mmci_host *host, u32 pwr) in mmci_write_pwrreg() argument
291 if (host->pwr_reg != pwr) { in mmci_write_pwrreg()
292 host->pwr_reg = pwr; in mmci_write_pwrreg()
293 writel(pwr, host->base + MMCIPOWER); in mmci_write_pwrreg()
300 static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl) in mmci_write_datactrlreg() argument
303 datactrl |= host->datactrl_reg & MCI_ST_DPSM_BUSYMODE; in mmci_write_datactrlreg()
305 if (host->datactrl_reg != datactrl) { in mmci_write_datactrlreg()
306 host->datactrl_reg = datactrl; in mmci_write_datactrlreg()
307 writel(datactrl, host->base + MMCIDATACTRL); in mmci_write_datactrlreg()
314 static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) in mmci_set_clkreg() argument
316 struct variant_data *variant = host->variant; in mmci_set_clkreg()
320 host->cclk = 0; in mmci_set_clkreg()
324 host->cclk = host->mclk; in mmci_set_clkreg()
325 } else if (desired >= host->mclk) { in mmci_set_clkreg()
329 host->cclk = host->mclk; in mmci_set_clkreg()
337 clk = DIV_ROUND_UP(host->mclk, desired) - 2; in mmci_set_clkreg()
340 host->cclk = host->mclk / (clk + 2); in mmci_set_clkreg()
346 clk = host->mclk / (2 * desired) - 1; in mmci_set_clkreg()
349 host->cclk = host->mclk / (2 * (clk + 1)); in mmci_set_clkreg()
359 host->mmc->actual_clock = host->cclk; in mmci_set_clkreg()
361 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) in mmci_set_clkreg()
363 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) in mmci_set_clkreg()
366 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 || in mmci_set_clkreg()
367 host->mmc->ios.timing == MMC_TIMING_MMC_DDR52) in mmci_set_clkreg()
370 mmci_write_clkreg(host, clk); in mmci_set_clkreg()
374 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) in mmci_request_end() argument
376 writel(0, host->base + MMCICOMMAND); in mmci_request_end()
378 BUG_ON(host->data); in mmci_request_end()
380 host->mrq = NULL; in mmci_request_end()
381 host->cmd = NULL; in mmci_request_end()
383 mmc_request_done(host->mmc, mrq); in mmci_request_end()
385 pm_runtime_mark_last_busy(mmc_dev(host->mmc)); in mmci_request_end()
386 pm_runtime_put_autosuspend(mmc_dev(host->mmc)); in mmci_request_end()
389 static void mmci_set_mask1(struct mmci_host *host, unsigned int mask) in mmci_set_mask1() argument
391 void __iomem *base = host->base; in mmci_set_mask1()
393 if (host->singleirq) { in mmci_set_mask1()
405 static void mmci_stop_data(struct mmci_host *host) in mmci_stop_data() argument
407 mmci_write_datactrlreg(host, 0); in mmci_stop_data()
408 mmci_set_mask1(host, 0); in mmci_stop_data()
409 host->data = NULL; in mmci_stop_data()
412 static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) in mmci_init_sg() argument
421 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); in mmci_init_sg()
430 static void mmci_dma_setup(struct mmci_host *host) in mmci_dma_setup() argument
433 struct variant_data *variant = host->variant; in mmci_dma_setup()
435 host->dma_rx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "rx"); in mmci_dma_setup()
436 host->dma_tx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "tx"); in mmci_dma_setup()
439 host->next_data.cookie = 1; in mmci_dma_setup()
446 if (host->dma_rx_channel && !host->dma_tx_channel) in mmci_dma_setup()
447 host->dma_tx_channel = host->dma_rx_channel; in mmci_dma_setup()
449 if (host->dma_rx_channel) in mmci_dma_setup()
450 rxname = dma_chan_name(host->dma_rx_channel); in mmci_dma_setup()
454 if (host->dma_tx_channel) in mmci_dma_setup()
455 txname = dma_chan_name(host->dma_tx_channel); in mmci_dma_setup()
459 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n", in mmci_dma_setup()
466 if (host->dma_tx_channel) { in mmci_dma_setup()
467 struct device *dev = host->dma_tx_channel->device->dev; in mmci_dma_setup()
470 if (max_seg_size < host->mmc->max_seg_size) in mmci_dma_setup()
471 host->mmc->max_seg_size = max_seg_size; in mmci_dma_setup()
473 if (host->dma_rx_channel) { in mmci_dma_setup()
474 struct device *dev = host->dma_rx_channel->device->dev; in mmci_dma_setup()
477 if (max_seg_size < host->mmc->max_seg_size) in mmci_dma_setup()
478 host->mmc->max_seg_size = max_seg_size; in mmci_dma_setup()
481 if (variant->qcom_dml && host->dma_rx_channel && host->dma_tx_channel) in mmci_dma_setup()
482 if (dml_hw_init(host, host->mmc->parent->of_node)) in mmci_dma_setup()
490 static inline void mmci_dma_release(struct mmci_host *host) in mmci_dma_release() argument
492 if (host->dma_rx_channel) in mmci_dma_release()
493 dma_release_channel(host->dma_rx_channel); in mmci_dma_release()
494 if (host->dma_tx_channel) in mmci_dma_release()
495 dma_release_channel(host->dma_tx_channel); in mmci_dma_release()
496 host->dma_rx_channel = host->dma_tx_channel = NULL; in mmci_dma_release()
499 static void mmci_dma_data_error(struct mmci_host *host) in mmci_dma_data_error() argument
501 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); in mmci_dma_data_error()
502 dmaengine_terminate_all(host->dma_current); in mmci_dma_data_error()
503 host->dma_current = NULL; in mmci_dma_data_error()
504 host->dma_desc_current = NULL; in mmci_dma_data_error()
505 host->data->host_cookie = 0; in mmci_dma_data_error()
508 static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) in mmci_dma_unmap() argument
515 chan = host->dma_rx_channel; in mmci_dma_unmap()
518 chan = host->dma_tx_channel; in mmci_dma_unmap()
524 static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data) in mmci_dma_finalize() argument
531 status = readl(host->base + MMCISTATUS); in mmci_dma_finalize()
544 mmci_dma_data_error(host); in mmci_dma_finalize()
550 mmci_dma_unmap(host, data); in mmci_dma_finalize()
557 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n"); in mmci_dma_finalize()
558 mmci_dma_release(host); in mmci_dma_finalize()
561 host->dma_current = NULL; in mmci_dma_finalize()
562 host->dma_desc_current = NULL; in mmci_dma_finalize()
566 static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, in __mmci_dma_prep_data() argument
570 struct variant_data *variant = host->variant; in __mmci_dma_prep_data()
572 .src_addr = host->phybase + MMCIFIFO, in __mmci_dma_prep_data()
573 .dst_addr = host->phybase + MMCIFIFO, in __mmci_dma_prep_data()
590 chan = host->dma_rx_channel; in __mmci_dma_prep_data()
594 chan = host->dma_tx_channel; in __mmci_dma_prep_data()
610 if (host->variant->qcom_dml) in __mmci_dma_prep_data()
629 static inline int mmci_dma_prep_data(struct mmci_host *host, in mmci_dma_prep_data() argument
633 if (host->dma_current && host->dma_desc_current) in mmci_dma_prep_data()
637 return __mmci_dma_prep_data(host, data, &host->dma_current, in mmci_dma_prep_data()
638 &host->dma_desc_current); in mmci_dma_prep_data()
641 static inline int mmci_dma_prep_next(struct mmci_host *host, in mmci_dma_prep_next() argument
644 struct mmci_host_next *nd = &host->next_data; in mmci_dma_prep_next()
645 return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc); in mmci_dma_prep_next()
648 static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) in mmci_dma_start_data() argument
651 struct mmc_data *data = host->data; in mmci_dma_start_data()
653 ret = mmci_dma_prep_data(host, host->data); in mmci_dma_start_data()
658 dev_vdbg(mmc_dev(host->mmc), in mmci_dma_start_data()
661 dmaengine_submit(host->dma_desc_current); in mmci_dma_start_data()
662 dma_async_issue_pending(host->dma_current); in mmci_dma_start_data()
664 if (host->variant->qcom_dml) in mmci_dma_start_data()
665 dml_start_xfer(host, data); in mmci_dma_start_data()
670 mmci_write_datactrlreg(host, datactrl); in mmci_dma_start_data()
677 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK, in mmci_dma_start_data()
678 host->base + MMCIMASK0); in mmci_dma_start_data()
682 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) in mmci_get_next_data() argument
684 struct mmci_host_next *next = &host->next_data; in mmci_get_next_data()
689 host->dma_desc_current = next->dma_desc; in mmci_get_next_data()
690 host->dma_current = next->dma_chan; in mmci_get_next_data()
698 struct mmci_host *host = mmc_priv(mmc); in mmci_pre_request() local
700 struct mmci_host_next *nd = &host->next_data; in mmci_pre_request()
707 if (mmci_validate_data(host, data)) in mmci_pre_request()
710 if (!mmci_dma_prep_next(host, data)) in mmci_pre_request()
717 struct mmci_host *host = mmc_priv(mmc); in mmci_post_request() local
723 mmci_dma_unmap(host, data); in mmci_post_request()
726 struct mmci_host_next *next = &host->next_data; in mmci_post_request()
729 chan = host->dma_rx_channel; in mmci_post_request()
731 chan = host->dma_tx_channel; in mmci_post_request()
734 if (host->dma_desc_current == next->dma_desc) in mmci_post_request()
735 host->dma_desc_current = NULL; in mmci_post_request()
737 if (host->dma_current == next->dma_chan) in mmci_post_request()
738 host->dma_current = NULL; in mmci_post_request()
748 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) in mmci_get_next_data() argument
751 static inline void mmci_dma_setup(struct mmci_host *host) in mmci_dma_setup() argument
755 static inline void mmci_dma_release(struct mmci_host *host) in mmci_dma_release() argument
759 static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) in mmci_dma_unmap() argument
763 static inline void mmci_dma_finalize(struct mmci_host *host, in mmci_dma_finalize() argument
768 static inline void mmci_dma_data_error(struct mmci_host *host) in mmci_dma_data_error() argument
772 static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) in mmci_dma_start_data() argument
782 static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) in mmci_start_data() argument
784 struct variant_data *variant = host->variant; in mmci_start_data()
790 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n", in mmci_start_data()
793 host->data = data; in mmci_start_data()
794 host->size = data->blksz * data->blocks; in mmci_start_data()
797 clks = (unsigned long long)data->timeout_ns * host->cclk; in mmci_start_data()
802 base = host->base; in mmci_start_data()
804 writel(host->size, base + MMCIDATALENGTH); in mmci_start_data()
819 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) { in mmci_start_data()
831 (host->size < 8 || in mmci_start_data()
832 (host->size <= 8 && host->mclk > 50000000))) in mmci_start_data()
833 clk = host->clk_reg & ~variant->clkreg_enable; in mmci_start_data()
835 clk = host->clk_reg | variant->clkreg_enable; in mmci_start_data()
837 mmci_write_clkreg(host, clk); in mmci_start_data()
840 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 || in mmci_start_data()
841 host->mmc->ios.timing == MMC_TIMING_MMC_DDR52) in mmci_start_data()
848 if (!mmci_dma_start_data(host, datactrl)) in mmci_start_data()
852 mmci_init_sg(host, data); in mmci_start_data()
862 if (host->size < variant->fifohalfsize) in mmci_start_data()
872 mmci_write_datactrlreg(host, datactrl); in mmci_start_data()
874 mmci_set_mask1(host, irqmask); in mmci_start_data()
878 mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c) in mmci_start_command() argument
880 void __iomem *base = host->base; in mmci_start_command()
882 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n", in mmci_start_command()
887 mmci_reg_delay(host); in mmci_start_command()
900 c |= host->variant->data_cmd_enable; in mmci_start_command()
902 host->cmd = cmd; in mmci_start_command()
909 mmci_data_irq(struct mmci_host *host, struct mmc_data *data, in mmci_data_irq() argument
922 if (dma_inprogress(host)) { in mmci_data_irq()
923 mmci_dma_data_error(host); in mmci_data_irq()
924 mmci_dma_unmap(host, data); in mmci_data_irq()
934 remain = readl(host->base + MMCIDATACNT); in mmci_data_irq()
937 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n", in mmci_data_irq()
950 if (success > host->variant->fifosize) in mmci_data_irq()
951 success -= host->variant->fifosize; in mmci_data_irq()
960 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n"); in mmci_data_irq()
963 if (dma_inprogress(host)) in mmci_data_irq()
964 mmci_dma_finalize(host, data); in mmci_data_irq()
965 mmci_stop_data(host); in mmci_data_irq()
971 if (!data->stop || host->mrq->sbc) { in mmci_data_irq()
972 mmci_request_end(host, data->mrq); in mmci_data_irq()
974 mmci_start_command(host, data->stop, 0); in mmci_data_irq()
980 mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, in mmci_cmd_irq() argument
983 void __iomem *base = host->base; in mmci_cmd_irq()
989 sbc = (cmd == host->mrq->sbc); in mmci_cmd_irq()
990 busy_resp = host->variant->busy_detect && (cmd->flags & MMC_RSP_BUSY); in mmci_cmd_irq()
992 if (!((status|host->busy_status) & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT| in mmci_cmd_irq()
997 if (host->busy_status && (status & MCI_ST_CARDBUSY)) in mmci_cmd_irq()
1001 if (!host->busy_status && busy_resp && in mmci_cmd_irq()
1006 host->busy_status = status & (MCI_CMDSENT|MCI_CMDRESPEND); in mmci_cmd_irq()
1011 if (host->busy_status) { in mmci_cmd_irq()
1014 host->busy_status = 0; in mmci_cmd_irq()
1017 host->cmd = NULL; in mmci_cmd_irq()
1031 if (host->data) { in mmci_cmd_irq()
1033 if (dma_inprogress(host)) { in mmci_cmd_irq()
1034 mmci_dma_data_error(host); in mmci_cmd_irq()
1035 mmci_dma_unmap(host, host->data); in mmci_cmd_irq()
1037 mmci_stop_data(host); in mmci_cmd_irq()
1039 mmci_request_end(host, host->mrq); in mmci_cmd_irq()
1041 mmci_start_command(host, host->mrq->cmd, 0); in mmci_cmd_irq()
1043 mmci_start_data(host, cmd->data); in mmci_cmd_irq()
1047 static int mmci_get_rx_fifocnt(struct mmci_host *host, u32 status, int remain) in mmci_get_rx_fifocnt() argument
1049 return remain - (readl(host->base + MMCIFIFOCNT) << 2); in mmci_get_rx_fifocnt()
1052 static int mmci_qcom_get_rx_fifocnt(struct mmci_host *host, u32 status, int r) in mmci_qcom_get_rx_fifocnt() argument
1059 return host->variant->fifohalfsize; in mmci_qcom_get_rx_fifocnt()
1066 static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain) in mmci_pio_read() argument
1068 void __iomem *base = host->base; in mmci_pio_read()
1070 u32 status = readl(host->base + MMCISTATUS); in mmci_pio_read()
1071 int host_remain = host->size; in mmci_pio_read()
1074 int count = host->get_rx_fifocnt(host, status, host_remain); in mmci_pio_read()
1114 static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status) in mmci_pio_write() argument
1116 struct variant_data *variant = host->variant; in mmci_pio_write()
1117 void __iomem *base = host->base; in mmci_pio_write()
1154 struct mmci_host *host = dev_id; in mmci_pio_irq() local
1155 struct sg_mapping_iter *sg_miter = &host->sg_miter; in mmci_pio_irq()
1156 struct variant_data *variant = host->variant; in mmci_pio_irq()
1157 void __iomem *base = host->base; in mmci_pio_irq()
1163 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status); in mmci_pio_irq()
1189 len = mmci_pio_read(host, buffer, remain); in mmci_pio_irq()
1191 len = mmci_pio_write(host, buffer, remain, status); in mmci_pio_irq()
1195 host->size -= len; in mmci_pio_irq()
1212 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize) in mmci_pio_irq()
1213 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK); in mmci_pio_irq()
1221 if (host->size == 0) { in mmci_pio_irq()
1222 mmci_set_mask1(host, 0); in mmci_pio_irq()
1234 struct mmci_host *host = dev_id; in mmci_irq() local
1238 spin_lock(&host->lock); in mmci_irq()
1241 status = readl(host->base + MMCISTATUS); in mmci_irq()
1243 if (host->singleirq) { in mmci_irq()
1244 if (status & readl(host->base + MMCIMASK1)) in mmci_irq()
1255 status &= readl(host->base + MMCIMASK0); in mmci_irq()
1256 writel(status, host->base + MMCICLEAR); in mmci_irq()
1258 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status); in mmci_irq()
1260 if (host->variant->reversed_irq_handling) { in mmci_irq()
1261 mmci_data_irq(host, host->data, status); in mmci_irq()
1262 mmci_cmd_irq(host, host->cmd, status); in mmci_irq()
1264 mmci_cmd_irq(host, host->cmd, status); in mmci_irq()
1265 mmci_data_irq(host, host->data, status); in mmci_irq()
1269 if (host->busy_status) in mmci_irq()
1275 spin_unlock(&host->lock); in mmci_irq()
1282 struct mmci_host *host = mmc_priv(mmc); in mmci_request() local
1285 WARN_ON(host->mrq != NULL); in mmci_request()
1287 mrq->cmd->error = mmci_validate_data(host, mrq->data); in mmci_request()
1295 spin_lock_irqsave(&host->lock, flags); in mmci_request()
1297 host->mrq = mrq; in mmci_request()
1300 mmci_get_next_data(host, mrq->data); in mmci_request()
1303 mmci_start_data(host, mrq->data); in mmci_request()
1306 mmci_start_command(host, mrq->sbc, 0); in mmci_request()
1308 mmci_start_command(host, mrq->cmd, 0); in mmci_request()
1310 spin_unlock_irqrestore(&host->lock, flags); in mmci_request()
1315 struct mmci_host *host = mmc_priv(mmc); in mmci_set_ios() local
1316 struct variant_data *variant = host->variant; in mmci_set_ios()
1323 if (host->plat->ios_handler && in mmci_set_ios()
1324 host->plat->ios_handler(mmc_dev(mmc), ios)) in mmci_set_ios()
1332 if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) { in mmci_set_ios()
1334 host->vqmmc_enabled = false; in mmci_set_ios()
1351 if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) { in mmci_set_ios()
1357 host->vqmmc_enabled = true; in mmci_set_ios()
1370 pwr |= host->pwr_reg_add; in mmci_set_ios()
1381 if (host->hw_designer != AMBA_VENDOR_ST) in mmci_set_ios()
1399 if (host->variant->explicit_mclk_control && in mmci_set_ios()
1400 ios->clock != host->clock_cache) { in mmci_set_ios()
1401 ret = clk_set_rate(host->clk, ios->clock); in mmci_set_ios()
1403 dev_err(mmc_dev(host->mmc), in mmci_set_ios()
1406 host->mclk = clk_get_rate(host->clk); in mmci_set_ios()
1408 host->clock_cache = ios->clock; in mmci_set_ios()
1410 spin_lock_irqsave(&host->lock, flags); in mmci_set_ios()
1412 mmci_set_clkreg(host, ios->clock); in mmci_set_ios()
1413 mmci_write_pwrreg(host, pwr); in mmci_set_ios()
1414 mmci_reg_delay(host); in mmci_set_ios()
1416 spin_unlock_irqrestore(&host->lock, flags); in mmci_set_ios()
1424 struct mmci_host *host = mmc_priv(mmc); in mmci_get_cd() local
1425 struct mmci_platform_data *plat = host->plat; in mmci_get_cd()
1432 status = plat->status(mmc_dev(host->mmc)); in mmci_get_cd()
1482 struct mmci_host *host = mmc_priv(mmc); in mmci_of_parse() local
1489 host->pwr_reg_add |= MCI_ST_DATA0DIREN; in mmci_of_parse()
1491 host->pwr_reg_add |= MCI_ST_DATA2DIREN; in mmci_of_parse()
1493 host->pwr_reg_add |= MCI_ST_DATA31DIREN; in mmci_of_parse()
1495 host->pwr_reg_add |= MCI_ST_DATA74DIREN; in mmci_of_parse()
1497 host->pwr_reg_add |= MCI_ST_CMDDIREN; in mmci_of_parse()
1499 host->pwr_reg_add |= MCI_ST_FBCLKEN; in mmci_of_parse()
1515 struct mmci_host *host; in mmci_probe() local
1539 host = mmc_priv(mmc); in mmci_probe()
1540 host->mmc = mmc; in mmci_probe()
1542 host->hw_designer = amba_manf(dev); in mmci_probe()
1543 host->hw_revision = amba_rev(dev); in mmci_probe()
1544 dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer); in mmci_probe()
1545 dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision); in mmci_probe()
1547 host->clk = devm_clk_get(&dev->dev, NULL); in mmci_probe()
1548 if (IS_ERR(host->clk)) { in mmci_probe()
1549 ret = PTR_ERR(host->clk); in mmci_probe()
1553 ret = clk_prepare_enable(host->clk); in mmci_probe()
1558 host->get_rx_fifocnt = mmci_qcom_get_rx_fifocnt; in mmci_probe()
1560 host->get_rx_fifocnt = mmci_get_rx_fifocnt; in mmci_probe()
1562 host->plat = plat; in mmci_probe()
1563 host->variant = variant; in mmci_probe()
1564 host->mclk = clk_get_rate(host->clk); in mmci_probe()
1570 if (host->mclk > variant->f_max) { in mmci_probe()
1571 ret = clk_set_rate(host->clk, variant->f_max); in mmci_probe()
1574 host->mclk = clk_get_rate(host->clk); in mmci_probe()
1576 host->mclk); in mmci_probe()
1579 host->phybase = dev->res.start; in mmci_probe()
1580 host->base = devm_ioremap_resource(&dev->dev, &dev->res); in mmci_probe()
1581 if (IS_ERR(host->base)) { in mmci_probe()
1582 ret = PTR_ERR(host->base); in mmci_probe()
1593 mmc->f_min = DIV_ROUND_UP(host->mclk, 257); in mmci_probe()
1595 mmc->f_min = clk_round_rate(host->clk, 100000); in mmci_probe()
1597 mmc->f_min = DIV_ROUND_UP(host->mclk, 512); in mmci_probe()
1607 min(host->mclk, mmc->f_max); in mmci_probe()
1610 fmax : min(host->mclk, fmax); in mmci_probe()
1637 mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE); in mmci_probe()
1676 spin_lock_init(&host->lock); in mmci_probe()
1678 writel(0, host->base + MMCIMASK0); in mmci_probe()
1679 writel(0, host->base + MMCIMASK1); in mmci_probe()
1680 writel(0xfff, host->base + MMCICLEAR); in mmci_probe()
1714 DRIVER_NAME " (cmd)", host); in mmci_probe()
1719 host->singleirq = true; in mmci_probe()
1722 IRQF_SHARED, DRIVER_NAME " (pio)", host); in mmci_probe()
1727 writel(MCI_IRQENABLE, host->base + MMCIMASK0); in mmci_probe()
1736 mmci_dma_setup(host); in mmci_probe()
1747 clk_disable_unprepare(host->clk); in mmci_probe()
1758 struct mmci_host *host = mmc_priv(mmc); in mmci_remove() local
1768 writel(0, host->base + MMCIMASK0); in mmci_remove()
1769 writel(0, host->base + MMCIMASK1); in mmci_remove()
1771 writel(0, host->base + MMCICOMMAND); in mmci_remove()
1772 writel(0, host->base + MMCIDATACTRL); in mmci_remove()
1774 mmci_dma_release(host); in mmci_remove()
1775 clk_disable_unprepare(host->clk); in mmci_remove()
1783 static void mmci_save(struct mmci_host *host) in mmci_save() argument
1787 spin_lock_irqsave(&host->lock, flags); in mmci_save()
1789 writel(0, host->base + MMCIMASK0); in mmci_save()
1790 if (host->variant->pwrreg_nopower) { in mmci_save()
1791 writel(0, host->base + MMCIDATACTRL); in mmci_save()
1792 writel(0, host->base + MMCIPOWER); in mmci_save()
1793 writel(0, host->base + MMCICLOCK); in mmci_save()
1795 mmci_reg_delay(host); in mmci_save()
1797 spin_unlock_irqrestore(&host->lock, flags); in mmci_save()
1800 static void mmci_restore(struct mmci_host *host) in mmci_restore() argument
1804 spin_lock_irqsave(&host->lock, flags); in mmci_restore()
1806 if (host->variant->pwrreg_nopower) { in mmci_restore()
1807 writel(host->clk_reg, host->base + MMCICLOCK); in mmci_restore()
1808 writel(host->datactrl_reg, host->base + MMCIDATACTRL); in mmci_restore()
1809 writel(host->pwr_reg, host->base + MMCIPOWER); in mmci_restore()
1811 writel(MCI_IRQENABLE, host->base + MMCIMASK0); in mmci_restore()
1812 mmci_reg_delay(host); in mmci_restore()
1814 spin_unlock_irqrestore(&host->lock, flags); in mmci_restore()
1823 struct mmci_host *host = mmc_priv(mmc); in mmci_runtime_suspend() local
1825 mmci_save(host); in mmci_runtime_suspend()
1826 clk_disable_unprepare(host->clk); in mmci_runtime_suspend()
1838 struct mmci_host *host = mmc_priv(mmc); in mmci_runtime_resume() local
1839 clk_prepare_enable(host->clk); in mmci_runtime_resume()
1840 mmci_restore(host); in mmci_runtime_resume()