1/* 2 * Copyright (c) 2015 MediaTek Inc. 3 * Author: Leilk Liu <leilk.liu@mediatek.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 */ 14 15#include <linux/clk.h> 16#include <linux/device.h> 17#include <linux/err.h> 18#include <linux/interrupt.h> 19#include <linux/io.h> 20#include <linux/ioport.h> 21#include <linux/module.h> 22#include <linux/of.h> 23#include <linux/of_gpio.h> 24#include <linux/platform_device.h> 25#include <linux/platform_data/spi-mt65xx.h> 26#include <linux/pm_runtime.h> 27#include <linux/spi/spi.h> 28 29#define SPI_CFG0_REG 0x0000 30#define SPI_CFG1_REG 0x0004 31#define SPI_TX_SRC_REG 0x0008 32#define SPI_RX_DST_REG 0x000c 33#define SPI_TX_DATA_REG 0x0010 34#define SPI_RX_DATA_REG 0x0014 35#define SPI_CMD_REG 0x0018 36#define SPI_STATUS0_REG 0x001c 37#define SPI_PAD_SEL_REG 0x0024 38 39#define SPI_CFG0_SCK_HIGH_OFFSET 0 40#define SPI_CFG0_SCK_LOW_OFFSET 8 41#define SPI_CFG0_CS_HOLD_OFFSET 16 42#define SPI_CFG0_CS_SETUP_OFFSET 24 43 44#define SPI_CFG1_CS_IDLE_OFFSET 0 45#define SPI_CFG1_PACKET_LOOP_OFFSET 8 46#define SPI_CFG1_PACKET_LENGTH_OFFSET 16 47#define SPI_CFG1_GET_TICK_DLY_OFFSET 30 48 49#define SPI_CFG1_CS_IDLE_MASK 0xff 50#define SPI_CFG1_PACKET_LOOP_MASK 0xff00 51#define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000 52 53#define SPI_CMD_ACT BIT(0) 54#define SPI_CMD_RESUME BIT(1) 55#define SPI_CMD_RST BIT(2) 56#define SPI_CMD_PAUSE_EN BIT(4) 57#define SPI_CMD_DEASSERT BIT(5) 58#define SPI_CMD_CPHA BIT(8) 59#define SPI_CMD_CPOL BIT(9) 60#define SPI_CMD_RX_DMA BIT(10) 61#define SPI_CMD_TX_DMA BIT(11) 62#define SPI_CMD_TXMSBF BIT(12) 63#define SPI_CMD_RXMSBF BIT(13) 64#define SPI_CMD_RX_ENDIAN BIT(14) 65#define SPI_CMD_TX_ENDIAN BIT(15) 66#define SPI_CMD_FINISH_IE BIT(16) 67#define SPI_CMD_PAUSE_IE BIT(17) 68 69#define MT8173_SPI_MAX_PAD_SEL 3 70 71#define MTK_SPI_PAUSE_INT_STATUS 0x2 72 73#define MTK_SPI_IDLE 0 74#define MTK_SPI_PAUSED 1 75 76#define MTK_SPI_MAX_FIFO_SIZE 32 77#define MTK_SPI_PACKET_SIZE 1024 78 79struct mtk_spi_compatible { 80 bool need_pad_sel; 81 /* Must explicitly send dummy Tx bytes to do Rx only transfer */ 82 bool must_tx; 83}; 84 85struct mtk_spi { 86 void __iomem *base; 87 u32 state; 88 int pad_num; 89 u32 *pad_sel; 90 struct clk *parent_clk, *sel_clk, *spi_clk; 91 struct spi_transfer *cur_transfer; 92 u32 xfer_len; 93 struct scatterlist *tx_sgl, *rx_sgl; 94 u32 tx_sgl_len, rx_sgl_len; 95 const struct mtk_spi_compatible *dev_comp; 96}; 97 98static const struct mtk_spi_compatible mt6589_compat; 99static const struct mtk_spi_compatible mt8135_compat; 100static const struct mtk_spi_compatible mt8173_compat = { 101 .need_pad_sel = true, 102 .must_tx = true, 103}; 104 105/* 106 * A piece of default chip info unless the platform 107 * supplies it. 108 */ 109static const struct mtk_chip_config mtk_default_chip_info = { 110 .rx_mlsb = 1, 111 .tx_mlsb = 1, 112}; 113 114static const struct of_device_id mtk_spi_of_match[] = { 115 { .compatible = "mediatek,mt6589-spi", .data = (void *)&mt6589_compat }, 116 { .compatible = "mediatek,mt8135-spi", .data = (void *)&mt8135_compat }, 117 { .compatible = "mediatek,mt8173-spi", .data = (void *)&mt8173_compat }, 118 {} 119}; 120MODULE_DEVICE_TABLE(of, mtk_spi_of_match); 121 122static void mtk_spi_reset(struct mtk_spi *mdata) 123{ 124 u32 reg_val; 125 126 /* set the software reset bit in SPI_CMD_REG. */ 127 reg_val = readl(mdata->base + SPI_CMD_REG); 128 reg_val |= SPI_CMD_RST; 129 writel(reg_val, mdata->base + SPI_CMD_REG); 130 131 reg_val = readl(mdata->base + SPI_CMD_REG); 132 reg_val &= ~SPI_CMD_RST; 133 writel(reg_val, mdata->base + SPI_CMD_REG); 134} 135 136static int mtk_spi_prepare_message(struct spi_master *master, 137 struct spi_message *msg) 138{ 139 u16 cpha, cpol; 140 u32 reg_val; 141 struct spi_device *spi = msg->spi; 142 struct mtk_chip_config *chip_config = spi->controller_data; 143 struct mtk_spi *mdata = spi_master_get_devdata(master); 144 145 cpha = spi->mode & SPI_CPHA ? 1 : 0; 146 cpol = spi->mode & SPI_CPOL ? 1 : 0; 147 148 reg_val = readl(mdata->base + SPI_CMD_REG); 149 if (cpha) 150 reg_val |= SPI_CMD_CPHA; 151 else 152 reg_val &= ~SPI_CMD_CPHA; 153 if (cpol) 154 reg_val |= SPI_CMD_CPOL; 155 else 156 reg_val &= ~SPI_CMD_CPOL; 157 writel(reg_val, mdata->base + SPI_CMD_REG); 158 159 reg_val = readl(mdata->base + SPI_CMD_REG); 160 161 /* set the mlsbx and mlsbtx */ 162 if (chip_config->tx_mlsb) 163 reg_val |= SPI_CMD_TXMSBF; 164 else 165 reg_val &= ~SPI_CMD_TXMSBF; 166 if (chip_config->rx_mlsb) 167 reg_val |= SPI_CMD_RXMSBF; 168 else 169 reg_val &= ~SPI_CMD_RXMSBF; 170 171 /* set the tx/rx endian */ 172#ifdef __LITTLE_ENDIAN 173 reg_val &= ~SPI_CMD_TX_ENDIAN; 174 reg_val &= ~SPI_CMD_RX_ENDIAN; 175#else 176 reg_val |= SPI_CMD_TX_ENDIAN; 177 reg_val |= SPI_CMD_RX_ENDIAN; 178#endif 179 180 /* set finish and pause interrupt always enable */ 181 reg_val |= SPI_CMD_FINISH_IE | SPI_CMD_PAUSE_IE; 182 183 /* disable dma mode */ 184 reg_val &= ~(SPI_CMD_TX_DMA | SPI_CMD_RX_DMA); 185 186 /* disable deassert mode */ 187 reg_val &= ~SPI_CMD_DEASSERT; 188 189 writel(reg_val, mdata->base + SPI_CMD_REG); 190 191 /* pad select */ 192 if (mdata->dev_comp->need_pad_sel) 193 writel(mdata->pad_sel[spi->chip_select], 194 mdata->base + SPI_PAD_SEL_REG); 195 196 return 0; 197} 198 199static void mtk_spi_set_cs(struct spi_device *spi, bool enable) 200{ 201 u32 reg_val; 202 struct mtk_spi *mdata = spi_master_get_devdata(spi->master); 203 204 reg_val = readl(mdata->base + SPI_CMD_REG); 205 if (!enable) { 206 reg_val |= SPI_CMD_PAUSE_EN; 207 writel(reg_val, mdata->base + SPI_CMD_REG); 208 } else { 209 reg_val &= ~SPI_CMD_PAUSE_EN; 210 writel(reg_val, mdata->base + SPI_CMD_REG); 211 mdata->state = MTK_SPI_IDLE; 212 mtk_spi_reset(mdata); 213 } 214} 215 216static void mtk_spi_prepare_transfer(struct spi_master *master, 217 struct spi_transfer *xfer) 218{ 219 u32 spi_clk_hz, div, sck_time, cs_time, reg_val = 0; 220 struct mtk_spi *mdata = spi_master_get_devdata(master); 221 222 spi_clk_hz = clk_get_rate(mdata->spi_clk); 223 if (xfer->speed_hz < spi_clk_hz / 2) 224 div = DIV_ROUND_UP(spi_clk_hz, xfer->speed_hz); 225 else 226 div = 1; 227 228 sck_time = (div + 1) / 2; 229 cs_time = sck_time * 2; 230 231 reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_HIGH_OFFSET); 232 reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET); 233 reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET); 234 reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_SETUP_OFFSET); 235 writel(reg_val, mdata->base + SPI_CFG0_REG); 236 237 reg_val = readl(mdata->base + SPI_CFG1_REG); 238 reg_val &= ~SPI_CFG1_CS_IDLE_MASK; 239 reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET); 240 writel(reg_val, mdata->base + SPI_CFG1_REG); 241} 242 243static void mtk_spi_setup_packet(struct spi_master *master) 244{ 245 u32 packet_size, packet_loop, reg_val; 246 struct mtk_spi *mdata = spi_master_get_devdata(master); 247 248 packet_size = min_t(u32, mdata->xfer_len, MTK_SPI_PACKET_SIZE); 249 packet_loop = mdata->xfer_len / packet_size; 250 251 reg_val = readl(mdata->base + SPI_CFG1_REG); 252 reg_val &= ~(SPI_CFG1_PACKET_LENGTH_MASK | SPI_CFG1_PACKET_LOOP_MASK); 253 reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET; 254 reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET; 255 writel(reg_val, mdata->base + SPI_CFG1_REG); 256} 257 258static void mtk_spi_enable_transfer(struct spi_master *master) 259{ 260 u32 cmd; 261 struct mtk_spi *mdata = spi_master_get_devdata(master); 262 263 cmd = readl(mdata->base + SPI_CMD_REG); 264 if (mdata->state == MTK_SPI_IDLE) 265 cmd |= SPI_CMD_ACT; 266 else 267 cmd |= SPI_CMD_RESUME; 268 writel(cmd, mdata->base + SPI_CMD_REG); 269} 270 271static int mtk_spi_get_mult_delta(u32 xfer_len) 272{ 273 u32 mult_delta; 274 275 if (xfer_len > MTK_SPI_PACKET_SIZE) 276 mult_delta = xfer_len % MTK_SPI_PACKET_SIZE; 277 else 278 mult_delta = 0; 279 280 return mult_delta; 281} 282 283static void mtk_spi_update_mdata_len(struct spi_master *master) 284{ 285 int mult_delta; 286 struct mtk_spi *mdata = spi_master_get_devdata(master); 287 288 if (mdata->tx_sgl_len && mdata->rx_sgl_len) { 289 if (mdata->tx_sgl_len > mdata->rx_sgl_len) { 290 mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len); 291 mdata->xfer_len = mdata->rx_sgl_len - mult_delta; 292 mdata->rx_sgl_len = mult_delta; 293 mdata->tx_sgl_len -= mdata->xfer_len; 294 } else { 295 mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len); 296 mdata->xfer_len = mdata->tx_sgl_len - mult_delta; 297 mdata->tx_sgl_len = mult_delta; 298 mdata->rx_sgl_len -= mdata->xfer_len; 299 } 300 } else if (mdata->tx_sgl_len) { 301 mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len); 302 mdata->xfer_len = mdata->tx_sgl_len - mult_delta; 303 mdata->tx_sgl_len = mult_delta; 304 } else if (mdata->rx_sgl_len) { 305 mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len); 306 mdata->xfer_len = mdata->rx_sgl_len - mult_delta; 307 mdata->rx_sgl_len = mult_delta; 308 } 309} 310 311static void mtk_spi_setup_dma_addr(struct spi_master *master, 312 struct spi_transfer *xfer) 313{ 314 struct mtk_spi *mdata = spi_master_get_devdata(master); 315 316 if (mdata->tx_sgl) 317 writel(xfer->tx_dma, mdata->base + SPI_TX_SRC_REG); 318 if (mdata->rx_sgl) 319 writel(xfer->rx_dma, mdata->base + SPI_RX_DST_REG); 320} 321 322static int mtk_spi_fifo_transfer(struct spi_master *master, 323 struct spi_device *spi, 324 struct spi_transfer *xfer) 325{ 326 int cnt; 327 struct mtk_spi *mdata = spi_master_get_devdata(master); 328 329 mdata->cur_transfer = xfer; 330 mdata->xfer_len = xfer->len; 331 mtk_spi_prepare_transfer(master, xfer); 332 mtk_spi_setup_packet(master); 333 334 if (xfer->len % 4) 335 cnt = xfer->len / 4 + 1; 336 else 337 cnt = xfer->len / 4; 338 iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt); 339 340 mtk_spi_enable_transfer(master); 341 342 return 1; 343} 344 345static int mtk_spi_dma_transfer(struct spi_master *master, 346 struct spi_device *spi, 347 struct spi_transfer *xfer) 348{ 349 int cmd; 350 struct mtk_spi *mdata = spi_master_get_devdata(master); 351 352 mdata->tx_sgl = NULL; 353 mdata->rx_sgl = NULL; 354 mdata->tx_sgl_len = 0; 355 mdata->rx_sgl_len = 0; 356 mdata->cur_transfer = xfer; 357 358 mtk_spi_prepare_transfer(master, xfer); 359 360 cmd = readl(mdata->base + SPI_CMD_REG); 361 if (xfer->tx_buf) 362 cmd |= SPI_CMD_TX_DMA; 363 if (xfer->rx_buf) 364 cmd |= SPI_CMD_RX_DMA; 365 writel(cmd, mdata->base + SPI_CMD_REG); 366 367 if (xfer->tx_buf) 368 mdata->tx_sgl = xfer->tx_sg.sgl; 369 if (xfer->rx_buf) 370 mdata->rx_sgl = xfer->rx_sg.sgl; 371 372 if (mdata->tx_sgl) { 373 xfer->tx_dma = sg_dma_address(mdata->tx_sgl); 374 mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl); 375 } 376 if (mdata->rx_sgl) { 377 xfer->rx_dma = sg_dma_address(mdata->rx_sgl); 378 mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl); 379 } 380 381 mtk_spi_update_mdata_len(master); 382 mtk_spi_setup_packet(master); 383 mtk_spi_setup_dma_addr(master, xfer); 384 mtk_spi_enable_transfer(master); 385 386 return 1; 387} 388 389static int mtk_spi_transfer_one(struct spi_master *master, 390 struct spi_device *spi, 391 struct spi_transfer *xfer) 392{ 393 if (master->can_dma(master, spi, xfer)) 394 return mtk_spi_dma_transfer(master, spi, xfer); 395 else 396 return mtk_spi_fifo_transfer(master, spi, xfer); 397} 398 399static bool mtk_spi_can_dma(struct spi_master *master, 400 struct spi_device *spi, 401 struct spi_transfer *xfer) 402{ 403 return xfer->len > MTK_SPI_MAX_FIFO_SIZE; 404} 405 406static int mtk_spi_setup(struct spi_device *spi) 407{ 408 struct mtk_spi *mdata = spi_master_get_devdata(spi->master); 409 410 if (!spi->controller_data) 411 spi->controller_data = (void *)&mtk_default_chip_info; 412 413 if (mdata->dev_comp->need_pad_sel && gpio_is_valid(spi->cs_gpio)) 414 gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH)); 415 416 return 0; 417} 418 419static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id) 420{ 421 u32 cmd, reg_val, cnt; 422 struct spi_master *master = dev_id; 423 struct mtk_spi *mdata = spi_master_get_devdata(master); 424 struct spi_transfer *trans = mdata->cur_transfer; 425 426 reg_val = readl(mdata->base + SPI_STATUS0_REG); 427 if (reg_val & MTK_SPI_PAUSE_INT_STATUS) 428 mdata->state = MTK_SPI_PAUSED; 429 else 430 mdata->state = MTK_SPI_IDLE; 431 432 if (!master->can_dma(master, master->cur_msg->spi, trans)) { 433 if (trans->rx_buf) { 434 if (mdata->xfer_len % 4) 435 cnt = mdata->xfer_len / 4 + 1; 436 else 437 cnt = mdata->xfer_len / 4; 438 ioread32_rep(mdata->base + SPI_RX_DATA_REG, 439 trans->rx_buf, cnt); 440 } 441 spi_finalize_current_transfer(master); 442 return IRQ_HANDLED; 443 } 444 445 if (mdata->tx_sgl) 446 trans->tx_dma += mdata->xfer_len; 447 if (mdata->rx_sgl) 448 trans->rx_dma += mdata->xfer_len; 449 450 if (mdata->tx_sgl && (mdata->tx_sgl_len == 0)) { 451 mdata->tx_sgl = sg_next(mdata->tx_sgl); 452 if (mdata->tx_sgl) { 453 trans->tx_dma = sg_dma_address(mdata->tx_sgl); 454 mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl); 455 } 456 } 457 if (mdata->rx_sgl && (mdata->rx_sgl_len == 0)) { 458 mdata->rx_sgl = sg_next(mdata->rx_sgl); 459 if (mdata->rx_sgl) { 460 trans->rx_dma = sg_dma_address(mdata->rx_sgl); 461 mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl); 462 } 463 } 464 465 if (!mdata->tx_sgl && !mdata->rx_sgl) { 466 /* spi disable dma */ 467 cmd = readl(mdata->base + SPI_CMD_REG); 468 cmd &= ~SPI_CMD_TX_DMA; 469 cmd &= ~SPI_CMD_RX_DMA; 470 writel(cmd, mdata->base + SPI_CMD_REG); 471 472 spi_finalize_current_transfer(master); 473 return IRQ_HANDLED; 474 } 475 476 mtk_spi_update_mdata_len(master); 477 mtk_spi_setup_packet(master); 478 mtk_spi_setup_dma_addr(master, trans); 479 mtk_spi_enable_transfer(master); 480 481 return IRQ_HANDLED; 482} 483 484static int mtk_spi_probe(struct platform_device *pdev) 485{ 486 struct spi_master *master; 487 struct mtk_spi *mdata; 488 const struct of_device_id *of_id; 489 struct resource *res; 490 int i, irq, ret; 491 492 master = spi_alloc_master(&pdev->dev, sizeof(*mdata)); 493 if (!master) { 494 dev_err(&pdev->dev, "failed to alloc spi master\n"); 495 return -ENOMEM; 496 } 497 498 master->auto_runtime_pm = true; 499 master->dev.of_node = pdev->dev.of_node; 500 master->mode_bits = SPI_CPOL | SPI_CPHA; 501 502 master->set_cs = mtk_spi_set_cs; 503 master->prepare_message = mtk_spi_prepare_message; 504 master->transfer_one = mtk_spi_transfer_one; 505 master->can_dma = mtk_spi_can_dma; 506 master->setup = mtk_spi_setup; 507 508 of_id = of_match_node(mtk_spi_of_match, pdev->dev.of_node); 509 if (!of_id) { 510 dev_err(&pdev->dev, "failed to probe of_node\n"); 511 ret = -EINVAL; 512 goto err_put_master; 513 } 514 515 mdata = spi_master_get_devdata(master); 516 mdata->dev_comp = of_id->data; 517 if (mdata->dev_comp->must_tx) 518 master->flags = SPI_MASTER_MUST_TX; 519 520 if (mdata->dev_comp->need_pad_sel) { 521 mdata->pad_num = of_property_count_u32_elems( 522 pdev->dev.of_node, 523 "mediatek,pad-select"); 524 if (mdata->pad_num < 0) { 525 dev_err(&pdev->dev, 526 "No 'mediatek,pad-select' property\n"); 527 ret = -EINVAL; 528 goto err_put_master; 529 } 530 531 mdata->pad_sel = devm_kmalloc_array(&pdev->dev, mdata->pad_num, 532 sizeof(u32), GFP_KERNEL); 533 if (!mdata->pad_sel) { 534 ret = -ENOMEM; 535 goto err_put_master; 536 } 537 538 for (i = 0; i < mdata->pad_num; i++) { 539 of_property_read_u32_index(pdev->dev.of_node, 540 "mediatek,pad-select", 541 i, &mdata->pad_sel[i]); 542 if (mdata->pad_sel[i] > MT8173_SPI_MAX_PAD_SEL) { 543 dev_err(&pdev->dev, "wrong pad-sel[%d]: %u\n", 544 i, mdata->pad_sel[i]); 545 ret = -EINVAL; 546 goto err_put_master; 547 } 548 } 549 } 550 551 platform_set_drvdata(pdev, master); 552 553 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 554 if (!res) { 555 ret = -ENODEV; 556 dev_err(&pdev->dev, "failed to determine base address\n"); 557 goto err_put_master; 558 } 559 560 mdata->base = devm_ioremap_resource(&pdev->dev, res); 561 if (IS_ERR(mdata->base)) { 562 ret = PTR_ERR(mdata->base); 563 goto err_put_master; 564 } 565 566 irq = platform_get_irq(pdev, 0); 567 if (irq < 0) { 568 dev_err(&pdev->dev, "failed to get irq (%d)\n", irq); 569 ret = irq; 570 goto err_put_master; 571 } 572 573 if (!pdev->dev.dma_mask) 574 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; 575 576 ret = devm_request_irq(&pdev->dev, irq, mtk_spi_interrupt, 577 IRQF_TRIGGER_NONE, dev_name(&pdev->dev), master); 578 if (ret) { 579 dev_err(&pdev->dev, "failed to register irq (%d)\n", ret); 580 goto err_put_master; 581 } 582 583 mdata->parent_clk = devm_clk_get(&pdev->dev, "parent-clk"); 584 if (IS_ERR(mdata->parent_clk)) { 585 ret = PTR_ERR(mdata->parent_clk); 586 dev_err(&pdev->dev, "failed to get parent-clk: %d\n", ret); 587 goto err_put_master; 588 } 589 590 mdata->sel_clk = devm_clk_get(&pdev->dev, "sel-clk"); 591 if (IS_ERR(mdata->sel_clk)) { 592 ret = PTR_ERR(mdata->sel_clk); 593 dev_err(&pdev->dev, "failed to get sel-clk: %d\n", ret); 594 goto err_put_master; 595 } 596 597 mdata->spi_clk = devm_clk_get(&pdev->dev, "spi-clk"); 598 if (IS_ERR(mdata->spi_clk)) { 599 ret = PTR_ERR(mdata->spi_clk); 600 dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret); 601 goto err_put_master; 602 } 603 604 ret = clk_prepare_enable(mdata->spi_clk); 605 if (ret < 0) { 606 dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret); 607 goto err_put_master; 608 } 609 610 ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk); 611 if (ret < 0) { 612 dev_err(&pdev->dev, "failed to clk_set_parent (%d)\n", ret); 613 goto err_disable_clk; 614 } 615 616 clk_disable_unprepare(mdata->spi_clk); 617 618 pm_runtime_enable(&pdev->dev); 619 620 ret = devm_spi_register_master(&pdev->dev, master); 621 if (ret) { 622 dev_err(&pdev->dev, "failed to register master (%d)\n", ret); 623 goto err_put_master; 624 } 625 626 if (mdata->dev_comp->need_pad_sel) { 627 if (mdata->pad_num != master->num_chipselect) { 628 dev_err(&pdev->dev, 629 "pad_num does not match num_chipselect(%d != %d)\n", 630 mdata->pad_num, master->num_chipselect); 631 ret = -EINVAL; 632 goto err_put_master; 633 } 634 635 if (!master->cs_gpios && master->num_chipselect > 1) { 636 dev_err(&pdev->dev, 637 "cs_gpios not specified and num_chipselect > 1\n"); 638 ret = -EINVAL; 639 goto err_put_master; 640 } 641 642 if (master->cs_gpios) { 643 for (i = 0; i < master->num_chipselect; i++) { 644 ret = devm_gpio_request(&pdev->dev, 645 master->cs_gpios[i], 646 dev_name(&pdev->dev)); 647 if (ret) { 648 dev_err(&pdev->dev, 649 "can't get CS GPIO %i\n", i); 650 goto err_put_master; 651 } 652 } 653 } 654 } 655 656 return 0; 657 658err_disable_clk: 659 clk_disable_unprepare(mdata->spi_clk); 660err_put_master: 661 spi_master_put(master); 662 663 return ret; 664} 665 666static int mtk_spi_remove(struct platform_device *pdev) 667{ 668 struct spi_master *master = platform_get_drvdata(pdev); 669 struct mtk_spi *mdata = spi_master_get_devdata(master); 670 671 pm_runtime_disable(&pdev->dev); 672 673 mtk_spi_reset(mdata); 674 spi_master_put(master); 675 676 return 0; 677} 678 679#ifdef CONFIG_PM_SLEEP 680static int mtk_spi_suspend(struct device *dev) 681{ 682 int ret; 683 struct spi_master *master = dev_get_drvdata(dev); 684 struct mtk_spi *mdata = spi_master_get_devdata(master); 685 686 ret = spi_master_suspend(master); 687 if (ret) 688 return ret; 689 690 if (!pm_runtime_suspended(dev)) 691 clk_disable_unprepare(mdata->spi_clk); 692 693 return ret; 694} 695 696static int mtk_spi_resume(struct device *dev) 697{ 698 int ret; 699 struct spi_master *master = dev_get_drvdata(dev); 700 struct mtk_spi *mdata = spi_master_get_devdata(master); 701 702 if (!pm_runtime_suspended(dev)) { 703 ret = clk_prepare_enable(mdata->spi_clk); 704 if (ret < 0) { 705 dev_err(dev, "failed to enable spi_clk (%d)\n", ret); 706 return ret; 707 } 708 } 709 710 ret = spi_master_resume(master); 711 if (ret < 0) 712 clk_disable_unprepare(mdata->spi_clk); 713 714 return ret; 715} 716#endif /* CONFIG_PM_SLEEP */ 717 718#ifdef CONFIG_PM 719static int mtk_spi_runtime_suspend(struct device *dev) 720{ 721 struct spi_master *master = dev_get_drvdata(dev); 722 struct mtk_spi *mdata = spi_master_get_devdata(master); 723 724 clk_disable_unprepare(mdata->spi_clk); 725 726 return 0; 727} 728 729static int mtk_spi_runtime_resume(struct device *dev) 730{ 731 struct spi_master *master = dev_get_drvdata(dev); 732 struct mtk_spi *mdata = spi_master_get_devdata(master); 733 int ret; 734 735 ret = clk_prepare_enable(mdata->spi_clk); 736 if (ret < 0) { 737 dev_err(dev, "failed to enable spi_clk (%d)\n", ret); 738 return ret; 739 } 740 741 return 0; 742} 743#endif /* CONFIG_PM */ 744 745static const struct dev_pm_ops mtk_spi_pm = { 746 SET_SYSTEM_SLEEP_PM_OPS(mtk_spi_suspend, mtk_spi_resume) 747 SET_RUNTIME_PM_OPS(mtk_spi_runtime_suspend, 748 mtk_spi_runtime_resume, NULL) 749}; 750 751static struct platform_driver mtk_spi_driver = { 752 .driver = { 753 .name = "mtk-spi", 754 .pm = &mtk_spi_pm, 755 .of_match_table = mtk_spi_of_match, 756 }, 757 .probe = mtk_spi_probe, 758 .remove = mtk_spi_remove, 759}; 760 761module_platform_driver(mtk_spi_driver); 762 763MODULE_DESCRIPTION("MTK SPI Controller driver"); 764MODULE_AUTHOR("Leilk Liu <leilk.liu@mediatek.com>"); 765MODULE_LICENSE("GPL v2"); 766MODULE_ALIAS("platform:mtk-spi"); 767