1/* 2 * Blackfin On-Chip SPI Driver 3 * 4 * Copyright 2004-2010 Analog Devices Inc. 5 * 6 * Enter bugs at http://blackfin.uclinux.org/ 7 * 8 * Licensed under the GPL-2 or later. 9 */ 10 11#include <linux/init.h> 12#include <linux/module.h> 13#include <linux/delay.h> 14#include <linux/device.h> 15#include <linux/gpio.h> 16#include <linux/slab.h> 17#include <linux/io.h> 18#include <linux/ioport.h> 19#include <linux/irq.h> 20#include <linux/errno.h> 21#include <linux/interrupt.h> 22#include <linux/platform_device.h> 23#include <linux/dma-mapping.h> 24#include <linux/spi/spi.h> 25#include <linux/workqueue.h> 26 27#include <asm/dma.h> 28#include <asm/portmux.h> 29#include <asm/bfin5xx_spi.h> 30#include <asm/cacheflush.h> 31 32#define DRV_NAME "bfin-spi" 33#define DRV_AUTHOR "Bryan Wu, Luke Yang" 34#define DRV_DESC "Blackfin on-chip SPI Controller Driver" 35#define DRV_VERSION "1.0" 36 37MODULE_AUTHOR(DRV_AUTHOR); 38MODULE_DESCRIPTION(DRV_DESC); 39MODULE_LICENSE("GPL"); 40 41#define START_STATE ((void *)0) 42#define RUNNING_STATE ((void *)1) 43#define DONE_STATE ((void *)2) 44#define ERROR_STATE ((void *)-1) 45 46struct bfin_spi_master_data; 47 48struct bfin_spi_transfer_ops { 49 void (*write) (struct bfin_spi_master_data *); 50 void (*read) (struct bfin_spi_master_data *); 51 void (*duplex) (struct bfin_spi_master_data *); 52}; 53 54struct bfin_spi_master_data { 55 /* Driver model hookup */ 56 struct platform_device *pdev; 57 58 /* SPI framework hookup */ 59 struct spi_master *master; 60 61 /* Regs base of SPI controller */ 62 struct bfin_spi_regs __iomem *regs; 63 64 /* Pin request list */ 65 u16 *pin_req; 66 67 /* BFIN hookup */ 68 struct bfin5xx_spi_master *master_info; 69 70 /* Driver message queue */ 71 struct workqueue_struct *workqueue; 72 struct work_struct pump_messages; 73 spinlock_t lock; 74 struct list_head queue; 75 int busy; 76 bool running; 77 78 /* Message Transfer pump */ 79 struct tasklet_struct pump_transfers; 80 81 /* Current message transfer state info */ 82 struct spi_message *cur_msg; 83 struct spi_transfer *cur_transfer; 84 struct bfin_spi_slave_data *cur_chip; 85 size_t len_in_bytes; 86 size_t len; 87 void *tx; 88 void *tx_end; 89 void *rx; 90 void *rx_end; 91 92 /* DMA stuffs */ 93 int dma_channel; 94 int dma_mapped; 95 int dma_requested; 96 dma_addr_t rx_dma; 97 dma_addr_t tx_dma; 98 99 int irq_requested; 100 int spi_irq; 101 102 size_t rx_map_len; 103 size_t tx_map_len; 104 u8 n_bytes; 105 u16 ctrl_reg; 106 u16 flag_reg; 107 108 int cs_change; 109 const struct bfin_spi_transfer_ops *ops; 110}; 111 112struct bfin_spi_slave_data { 113 u16 ctl_reg; 114 u16 baud; 115 u16 flag; 116 117 u8 chip_select_num; 118 u8 enable_dma; 119 u16 cs_chg_udelay; /* Some devices require > 255usec delay */ 120 u32 cs_gpio; 121 u16 idle_tx_val; 122 u8 pio_interrupt; /* use spi data irq */ 123 const struct bfin_spi_transfer_ops *ops; 124}; 125 126static void bfin_spi_enable(struct bfin_spi_master_data *drv_data) 127{ 128 bfin_write_or(&drv_data->regs->ctl, BIT_CTL_ENABLE); 129} 130 131static void bfin_spi_disable(struct bfin_spi_master_data *drv_data) 132{ 133 bfin_write_and(&drv_data->regs->ctl, ~BIT_CTL_ENABLE); 134} 135 136/* Caculate the SPI_BAUD register value based on input HZ */ 137static u16 hz_to_spi_baud(u32 speed_hz) 138{ 139 u_long sclk = get_sclk(); 140 u16 spi_baud = (sclk / (2 * speed_hz)); 141 142 if ((sclk % (2 * speed_hz)) > 0) 143 spi_baud++; 144 145 if (spi_baud < MIN_SPI_BAUD_VAL) 146 spi_baud = MIN_SPI_BAUD_VAL; 147 148 return spi_baud; 149} 150 151static int bfin_spi_flush(struct bfin_spi_master_data *drv_data) 152{ 153 unsigned long limit = loops_per_jiffy << 1; 154 155 /* wait for stop and clear stat */ 156 while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_SPIF) && --limit) 157 cpu_relax(); 158 159 bfin_write(&drv_data->regs->stat, BIT_STAT_CLR); 160 161 return limit; 162} 163 164/* Chip select operation functions for cs_change flag */ 165static void bfin_spi_cs_active(struct bfin_spi_master_data *drv_data, struct bfin_spi_slave_data *chip) 166{ 167 if (likely(chip->chip_select_num < MAX_CTRL_CS)) 168 bfin_write_and(&drv_data->regs->flg, ~chip->flag); 169 else 170 gpio_set_value(chip->cs_gpio, 0); 171} 172 173static void bfin_spi_cs_deactive(struct bfin_spi_master_data *drv_data, 174 struct bfin_spi_slave_data *chip) 175{ 176 if (likely(chip->chip_select_num < MAX_CTRL_CS)) 177 bfin_write_or(&drv_data->regs->flg, chip->flag); 178 else 179 gpio_set_value(chip->cs_gpio, 1); 180 181 /* Move delay here for consistency */ 182 if (chip->cs_chg_udelay) 183 udelay(chip->cs_chg_udelay); 184} 185 186/* enable or disable the pin muxed by GPIO and SPI CS to work as SPI CS */ 187static inline void bfin_spi_cs_enable(struct bfin_spi_master_data *drv_data, 188 struct bfin_spi_slave_data *chip) 189{ 190 if (chip->chip_select_num < MAX_CTRL_CS) 191 bfin_write_or(&drv_data->regs->flg, chip->flag >> 8); 192} 193 194static inline void bfin_spi_cs_disable(struct bfin_spi_master_data *drv_data, 195 struct bfin_spi_slave_data *chip) 196{ 197 if (chip->chip_select_num < MAX_CTRL_CS) 198 bfin_write_and(&drv_data->regs->flg, ~(chip->flag >> 8)); 199} 200 201/* stop controller and re-config current chip*/ 202static void bfin_spi_restore_state(struct bfin_spi_master_data *drv_data) 203{ 204 struct bfin_spi_slave_data *chip = drv_data->cur_chip; 205 206 /* Clear status and disable clock */ 207 bfin_write(&drv_data->regs->stat, BIT_STAT_CLR); 208 bfin_spi_disable(drv_data); 209 dev_dbg(&drv_data->pdev->dev, "restoring spi ctl state\n"); 210 211 SSYNC(); 212 213 /* Load the registers */ 214 bfin_write(&drv_data->regs->ctl, chip->ctl_reg); 215 bfin_write(&drv_data->regs->baud, chip->baud); 216 217 bfin_spi_enable(drv_data); 218 bfin_spi_cs_active(drv_data, chip); 219} 220 221/* used to kick off transfer in rx mode and read unwanted RX data */ 222static inline void bfin_spi_dummy_read(struct bfin_spi_master_data *drv_data) 223{ 224 (void) bfin_read(&drv_data->regs->rdbr); 225} 226 227static void bfin_spi_u8_writer(struct bfin_spi_master_data *drv_data) 228{ 229 /* clear RXS (we check for RXS inside the loop) */ 230 bfin_spi_dummy_read(drv_data); 231 232 while (drv_data->tx < drv_data->tx_end) { 233 bfin_write(&drv_data->regs->tdbr, (*(u8 *) (drv_data->tx++))); 234 /* wait until transfer finished. 235 checking SPIF or TXS may not guarantee transfer completion */ 236 while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS)) 237 cpu_relax(); 238 /* discard RX data and clear RXS */ 239 bfin_spi_dummy_read(drv_data); 240 } 241} 242 243static void bfin_spi_u8_reader(struct bfin_spi_master_data *drv_data) 244{ 245 u16 tx_val = drv_data->cur_chip->idle_tx_val; 246 247 /* discard old RX data and clear RXS */ 248 bfin_spi_dummy_read(drv_data); 249 250 while (drv_data->rx < drv_data->rx_end) { 251 bfin_write(&drv_data->regs->tdbr, tx_val); 252 while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS)) 253 cpu_relax(); 254 *(u8 *) (drv_data->rx++) = bfin_read(&drv_data->regs->rdbr); 255 } 256} 257 258static void bfin_spi_u8_duplex(struct bfin_spi_master_data *drv_data) 259{ 260 /* discard old RX data and clear RXS */ 261 bfin_spi_dummy_read(drv_data); 262 263 while (drv_data->rx < drv_data->rx_end) { 264 bfin_write(&drv_data->regs->tdbr, (*(u8 *) (drv_data->tx++))); 265 while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS)) 266 cpu_relax(); 267 *(u8 *) (drv_data->rx++) = bfin_read(&drv_data->regs->rdbr); 268 } 269} 270 271static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u8 = { 272 .write = bfin_spi_u8_writer, 273 .read = bfin_spi_u8_reader, 274 .duplex = bfin_spi_u8_duplex, 275}; 276 277static void bfin_spi_u16_writer(struct bfin_spi_master_data *drv_data) 278{ 279 /* clear RXS (we check for RXS inside the loop) */ 280 bfin_spi_dummy_read(drv_data); 281 282 while (drv_data->tx < drv_data->tx_end) { 283 bfin_write(&drv_data->regs->tdbr, (*(u16 *) (drv_data->tx))); 284 drv_data->tx += 2; 285 /* wait until transfer finished. 286 checking SPIF or TXS may not guarantee transfer completion */ 287 while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS)) 288 cpu_relax(); 289 /* discard RX data and clear RXS */ 290 bfin_spi_dummy_read(drv_data); 291 } 292} 293 294static void bfin_spi_u16_reader(struct bfin_spi_master_data *drv_data) 295{ 296 u16 tx_val = drv_data->cur_chip->idle_tx_val; 297 298 /* discard old RX data and clear RXS */ 299 bfin_spi_dummy_read(drv_data); 300 301 while (drv_data->rx < drv_data->rx_end) { 302 bfin_write(&drv_data->regs->tdbr, tx_val); 303 while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS)) 304 cpu_relax(); 305 *(u16 *) (drv_data->rx) = bfin_read(&drv_data->regs->rdbr); 306 drv_data->rx += 2; 307 } 308} 309 310static void bfin_spi_u16_duplex(struct bfin_spi_master_data *drv_data) 311{ 312 /* discard old RX data and clear RXS */ 313 bfin_spi_dummy_read(drv_data); 314 315 while (drv_data->rx < drv_data->rx_end) { 316 bfin_write(&drv_data->regs->tdbr, (*(u16 *) (drv_data->tx))); 317 drv_data->tx += 2; 318 while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS)) 319 cpu_relax(); 320 *(u16 *) (drv_data->rx) = bfin_read(&drv_data->regs->rdbr); 321 drv_data->rx += 2; 322 } 323} 324 325static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u16 = { 326 .write = bfin_spi_u16_writer, 327 .read = bfin_spi_u16_reader, 328 .duplex = bfin_spi_u16_duplex, 329}; 330 331/* test if there is more transfer to be done */ 332static void *bfin_spi_next_transfer(struct bfin_spi_master_data *drv_data) 333{ 334 struct spi_message *msg = drv_data->cur_msg; 335 struct spi_transfer *trans = drv_data->cur_transfer; 336 337 /* Move to next transfer */ 338 if (trans->transfer_list.next != &msg->transfers) { 339 drv_data->cur_transfer = 340 list_entry(trans->transfer_list.next, 341 struct spi_transfer, transfer_list); 342 return RUNNING_STATE; 343 } else 344 return DONE_STATE; 345} 346 347/* 348 * caller already set message->status; 349 * dma and pio irqs are blocked give finished message back 350 */ 351static void bfin_spi_giveback(struct bfin_spi_master_data *drv_data) 352{ 353 struct bfin_spi_slave_data *chip = drv_data->cur_chip; 354 unsigned long flags; 355 struct spi_message *msg; 356 357 spin_lock_irqsave(&drv_data->lock, flags); 358 msg = drv_data->cur_msg; 359 drv_data->cur_msg = NULL; 360 drv_data->cur_transfer = NULL; 361 drv_data->cur_chip = NULL; 362 queue_work(drv_data->workqueue, &drv_data->pump_messages); 363 spin_unlock_irqrestore(&drv_data->lock, flags); 364 365 msg->state = NULL; 366 367 if (!drv_data->cs_change) 368 bfin_spi_cs_deactive(drv_data, chip); 369 370 /* Not stop spi in autobuffer mode */ 371 if (drv_data->tx_dma != 0xFFFF) 372 bfin_spi_disable(drv_data); 373 374 if (msg->complete) 375 msg->complete(msg->context); 376} 377 378/* spi data irq handler */ 379static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id) 380{ 381 struct bfin_spi_master_data *drv_data = dev_id; 382 struct bfin_spi_slave_data *chip = drv_data->cur_chip; 383 struct spi_message *msg = drv_data->cur_msg; 384 int n_bytes = drv_data->n_bytes; 385 int loop = 0; 386 387 /* wait until transfer finished. */ 388 while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS)) 389 cpu_relax(); 390 391 if ((drv_data->tx && drv_data->tx >= drv_data->tx_end) || 392 (drv_data->rx && drv_data->rx >= (drv_data->rx_end - n_bytes))) { 393 /* last read */ 394 if (drv_data->rx) { 395 dev_dbg(&drv_data->pdev->dev, "last read\n"); 396 if (!(n_bytes % 2)) { 397 u16 *buf = (u16 *)drv_data->rx; 398 for (loop = 0; loop < n_bytes / 2; loop++) 399 *buf++ = bfin_read(&drv_data->regs->rdbr); 400 } else { 401 u8 *buf = (u8 *)drv_data->rx; 402 for (loop = 0; loop < n_bytes; loop++) 403 *buf++ = bfin_read(&drv_data->regs->rdbr); 404 } 405 drv_data->rx += n_bytes; 406 } 407 408 msg->actual_length += drv_data->len_in_bytes; 409 if (drv_data->cs_change) 410 bfin_spi_cs_deactive(drv_data, chip); 411 /* Move to next transfer */ 412 msg->state = bfin_spi_next_transfer(drv_data); 413 414 disable_irq_nosync(drv_data->spi_irq); 415 416 /* Schedule transfer tasklet */ 417 tasklet_schedule(&drv_data->pump_transfers); 418 return IRQ_HANDLED; 419 } 420 421 if (drv_data->rx && drv_data->tx) { 422 /* duplex */ 423 dev_dbg(&drv_data->pdev->dev, "duplex: write_TDBR\n"); 424 if (!(n_bytes % 2)) { 425 u16 *buf = (u16 *)drv_data->rx; 426 u16 *buf2 = (u16 *)drv_data->tx; 427 for (loop = 0; loop < n_bytes / 2; loop++) { 428 *buf++ = bfin_read(&drv_data->regs->rdbr); 429 bfin_write(&drv_data->regs->tdbr, *buf2++); 430 } 431 } else { 432 u8 *buf = (u8 *)drv_data->rx; 433 u8 *buf2 = (u8 *)drv_data->tx; 434 for (loop = 0; loop < n_bytes; loop++) { 435 *buf++ = bfin_read(&drv_data->regs->rdbr); 436 bfin_write(&drv_data->regs->tdbr, *buf2++); 437 } 438 } 439 } else if (drv_data->rx) { 440 /* read */ 441 dev_dbg(&drv_data->pdev->dev, "read: write_TDBR\n"); 442 if (!(n_bytes % 2)) { 443 u16 *buf = (u16 *)drv_data->rx; 444 for (loop = 0; loop < n_bytes / 2; loop++) { 445 *buf++ = bfin_read(&drv_data->regs->rdbr); 446 bfin_write(&drv_data->regs->tdbr, chip->idle_tx_val); 447 } 448 } else { 449 u8 *buf = (u8 *)drv_data->rx; 450 for (loop = 0; loop < n_bytes; loop++) { 451 *buf++ = bfin_read(&drv_data->regs->rdbr); 452 bfin_write(&drv_data->regs->tdbr, chip->idle_tx_val); 453 } 454 } 455 } else if (drv_data->tx) { 456 /* write */ 457 dev_dbg(&drv_data->pdev->dev, "write: write_TDBR\n"); 458 if (!(n_bytes % 2)) { 459 u16 *buf = (u16 *)drv_data->tx; 460 for (loop = 0; loop < n_bytes / 2; loop++) { 461 bfin_read(&drv_data->regs->rdbr); 462 bfin_write(&drv_data->regs->tdbr, *buf++); 463 } 464 } else { 465 u8 *buf = (u8 *)drv_data->tx; 466 for (loop = 0; loop < n_bytes; loop++) { 467 bfin_read(&drv_data->regs->rdbr); 468 bfin_write(&drv_data->regs->tdbr, *buf++); 469 } 470 } 471 } 472 473 if (drv_data->tx) 474 drv_data->tx += n_bytes; 475 if (drv_data->rx) 476 drv_data->rx += n_bytes; 477 478 return IRQ_HANDLED; 479} 480 481static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id) 482{ 483 struct bfin_spi_master_data *drv_data = dev_id; 484 struct bfin_spi_slave_data *chip = drv_data->cur_chip; 485 struct spi_message *msg = drv_data->cur_msg; 486 unsigned long timeout; 487 unsigned short dmastat = get_dma_curr_irqstat(drv_data->dma_channel); 488 u16 spistat = bfin_read(&drv_data->regs->stat); 489 490 dev_dbg(&drv_data->pdev->dev, 491 "in dma_irq_handler dmastat:0x%x spistat:0x%x\n", 492 dmastat, spistat); 493 494 if (drv_data->rx != NULL) { 495 u16 cr = bfin_read(&drv_data->regs->ctl); 496 /* discard old RX data and clear RXS */ 497 bfin_spi_dummy_read(drv_data); 498 bfin_write(&drv_data->regs->ctl, cr & ~BIT_CTL_ENABLE); /* Disable SPI */ 499 bfin_write(&drv_data->regs->ctl, cr & ~BIT_CTL_TIMOD); /* Restore State */ 500 bfin_write(&drv_data->regs->stat, BIT_STAT_CLR); /* Clear Status */ 501 } 502 503 clear_dma_irqstat(drv_data->dma_channel); 504 505 /* 506 * wait for the last transaction shifted out. HRM states: 507 * at this point there may still be data in the SPI DMA FIFO waiting 508 * to be transmitted ... software needs to poll TXS in the SPI_STAT 509 * register until it goes low for 2 successive reads 510 */ 511 if (drv_data->tx != NULL) { 512 while ((bfin_read(&drv_data->regs->stat) & BIT_STAT_TXS) || 513 (bfin_read(&drv_data->regs->stat) & BIT_STAT_TXS)) 514 cpu_relax(); 515 } 516 517 dev_dbg(&drv_data->pdev->dev, 518 "in dma_irq_handler dmastat:0x%x spistat:0x%x\n", 519 dmastat, bfin_read(&drv_data->regs->stat)); 520 521 timeout = jiffies + HZ; 522 while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_SPIF)) 523 if (!time_before(jiffies, timeout)) { 524 dev_warn(&drv_data->pdev->dev, "timeout waiting for SPIF\n"); 525 break; 526 } else 527 cpu_relax(); 528 529 if ((dmastat & DMA_ERR) && (spistat & BIT_STAT_RBSY)) { 530 msg->state = ERROR_STATE; 531 dev_err(&drv_data->pdev->dev, "dma receive: fifo/buffer overflow\n"); 532 } else { 533 msg->actual_length += drv_data->len_in_bytes; 534 535 if (drv_data->cs_change) 536 bfin_spi_cs_deactive(drv_data, chip); 537 538 /* Move to next transfer */ 539 msg->state = bfin_spi_next_transfer(drv_data); 540 } 541 542 /* Schedule transfer tasklet */ 543 tasklet_schedule(&drv_data->pump_transfers); 544 545 /* free the irq handler before next transfer */ 546 dev_dbg(&drv_data->pdev->dev, 547 "disable dma channel irq%d\n", 548 drv_data->dma_channel); 549 dma_disable_irq_nosync(drv_data->dma_channel); 550 551 return IRQ_HANDLED; 552} 553 554static void bfin_spi_pump_transfers(unsigned long data) 555{ 556 struct bfin_spi_master_data *drv_data = (struct bfin_spi_master_data *)data; 557 struct spi_message *message = NULL; 558 struct spi_transfer *transfer = NULL; 559 struct spi_transfer *previous = NULL; 560 struct bfin_spi_slave_data *chip = NULL; 561 unsigned int bits_per_word; 562 u16 cr, cr_width = 0, dma_width, dma_config; 563 u32 tranf_success = 1; 564 u8 full_duplex = 0; 565 566 /* Get current state information */ 567 message = drv_data->cur_msg; 568 transfer = drv_data->cur_transfer; 569 chip = drv_data->cur_chip; 570 571 /* 572 * if msg is error or done, report it back using complete() callback 573 */ 574 575 /* Handle for abort */ 576 if (message->state == ERROR_STATE) { 577 dev_dbg(&drv_data->pdev->dev, "transfer: we've hit an error\n"); 578 message->status = -EIO; 579 bfin_spi_giveback(drv_data); 580 return; 581 } 582 583 /* Handle end of message */ 584 if (message->state == DONE_STATE) { 585 dev_dbg(&drv_data->pdev->dev, "transfer: all done!\n"); 586 message->status = 0; 587 bfin_spi_flush(drv_data); 588 bfin_spi_giveback(drv_data); 589 return; 590 } 591 592 /* Delay if requested at end of transfer */ 593 if (message->state == RUNNING_STATE) { 594 dev_dbg(&drv_data->pdev->dev, "transfer: still running ...\n"); 595 previous = list_entry(transfer->transfer_list.prev, 596 struct spi_transfer, transfer_list); 597 if (previous->delay_usecs) 598 udelay(previous->delay_usecs); 599 } 600 601 /* Flush any existing transfers that may be sitting in the hardware */ 602 if (bfin_spi_flush(drv_data) == 0) { 603 dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n"); 604 message->status = -EIO; 605 bfin_spi_giveback(drv_data); 606 return; 607 } 608 609 if (transfer->len == 0) { 610 /* Move to next transfer of this msg */ 611 message->state = bfin_spi_next_transfer(drv_data); 612 /* Schedule next transfer tasklet */ 613 tasklet_schedule(&drv_data->pump_transfers); 614 return; 615 } 616 617 if (transfer->tx_buf != NULL) { 618 drv_data->tx = (void *)transfer->tx_buf; 619 drv_data->tx_end = drv_data->tx + transfer->len; 620 dev_dbg(&drv_data->pdev->dev, "tx_buf is %p, tx_end is %p\n", 621 transfer->tx_buf, drv_data->tx_end); 622 } else { 623 drv_data->tx = NULL; 624 } 625 626 if (transfer->rx_buf != NULL) { 627 full_duplex = transfer->tx_buf != NULL; 628 drv_data->rx = transfer->rx_buf; 629 drv_data->rx_end = drv_data->rx + transfer->len; 630 dev_dbg(&drv_data->pdev->dev, "rx_buf is %p, rx_end is %p\n", 631 transfer->rx_buf, drv_data->rx_end); 632 } else { 633 drv_data->rx = NULL; 634 } 635 636 drv_data->rx_dma = transfer->rx_dma; 637 drv_data->tx_dma = transfer->tx_dma; 638 drv_data->len_in_bytes = transfer->len; 639 drv_data->cs_change = transfer->cs_change; 640 641 /* Bits per word setup */ 642 bits_per_word = transfer->bits_per_word; 643 if (bits_per_word == 16) { 644 drv_data->n_bytes = bits_per_word/8; 645 drv_data->len = (transfer->len) >> 1; 646 cr_width = BIT_CTL_WORDSIZE; 647 drv_data->ops = &bfin_bfin_spi_transfer_ops_u16; 648 } else if (bits_per_word == 8) { 649 drv_data->n_bytes = bits_per_word/8; 650 drv_data->len = transfer->len; 651 drv_data->ops = &bfin_bfin_spi_transfer_ops_u8; 652 } 653 cr = bfin_read(&drv_data->regs->ctl) & ~(BIT_CTL_TIMOD | BIT_CTL_WORDSIZE); 654 cr |= cr_width; 655 bfin_write(&drv_data->regs->ctl, cr); 656 657 dev_dbg(&drv_data->pdev->dev, 658 "transfer: drv_data->ops is %p, chip->ops is %p, u8_ops is %p\n", 659 drv_data->ops, chip->ops, &bfin_bfin_spi_transfer_ops_u8); 660 661 message->state = RUNNING_STATE; 662 dma_config = 0; 663 664 /* Speed setup (surely valid because already checked) */ 665 if (transfer->speed_hz) 666 bfin_write(&drv_data->regs->baud, hz_to_spi_baud(transfer->speed_hz)); 667 else 668 bfin_write(&drv_data->regs->baud, chip->baud); 669 670 bfin_write(&drv_data->regs->stat, BIT_STAT_CLR); 671 bfin_spi_cs_active(drv_data, chip); 672 673 dev_dbg(&drv_data->pdev->dev, 674 "now pumping a transfer: width is %d, len is %d\n", 675 cr_width, transfer->len); 676 677 /* 678 * Try to map dma buffer and do a dma transfer. If successful use, 679 * different way to r/w according to the enable_dma settings and if 680 * we are not doing a full duplex transfer (since the hardware does 681 * not support full duplex DMA transfers). 682 */ 683 if (!full_duplex && drv_data->cur_chip->enable_dma 684 && drv_data->len > 6) { 685 686 unsigned long dma_start_addr, flags; 687 688 disable_dma(drv_data->dma_channel); 689 clear_dma_irqstat(drv_data->dma_channel); 690 691 /* config dma channel */ 692 dev_dbg(&drv_data->pdev->dev, "doing dma transfer\n"); 693 set_dma_x_count(drv_data->dma_channel, drv_data->len); 694 if (cr_width == BIT_CTL_WORDSIZE) { 695 set_dma_x_modify(drv_data->dma_channel, 2); 696 dma_width = WDSIZE_16; 697 } else { 698 set_dma_x_modify(drv_data->dma_channel, 1); 699 dma_width = WDSIZE_8; 700 } 701 702 /* poll for SPI completion before start */ 703 while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_SPIF)) 704 cpu_relax(); 705 706 /* dirty hack for autobuffer DMA mode */ 707 if (drv_data->tx_dma == 0xFFFF) { 708 dev_dbg(&drv_data->pdev->dev, 709 "doing autobuffer DMA out.\n"); 710 711 /* no irq in autobuffer mode */ 712 dma_config = 713 (DMAFLOW_AUTO | RESTART | dma_width | DI_EN); 714 set_dma_config(drv_data->dma_channel, dma_config); 715 set_dma_start_addr(drv_data->dma_channel, 716 (unsigned long)drv_data->tx); 717 enable_dma(drv_data->dma_channel); 718 719 /* start SPI transfer */ 720 bfin_write(&drv_data->regs->ctl, cr | BIT_CTL_TIMOD_DMA_TX); 721 722 /* just return here, there can only be one transfer 723 * in this mode 724 */ 725 message->status = 0; 726 bfin_spi_giveback(drv_data); 727 return; 728 } 729 730 /* In dma mode, rx or tx must be NULL in one transfer */ 731 dma_config = (RESTART | dma_width | DI_EN); 732 if (drv_data->rx != NULL) { 733 /* set transfer mode, and enable SPI */ 734 dev_dbg(&drv_data->pdev->dev, "doing DMA in to %p (size %zx)\n", 735 drv_data->rx, drv_data->len_in_bytes); 736 737 /* invalidate caches, if needed */ 738 if (bfin_addr_dcacheable((unsigned long) drv_data->rx)) 739 invalidate_dcache_range((unsigned long) drv_data->rx, 740 (unsigned long) (drv_data->rx + 741 drv_data->len_in_bytes)); 742 743 dma_config |= WNR; 744 dma_start_addr = (unsigned long)drv_data->rx; 745 cr |= BIT_CTL_TIMOD_DMA_RX | BIT_CTL_SENDOPT; 746 747 } else if (drv_data->tx != NULL) { 748 dev_dbg(&drv_data->pdev->dev, "doing DMA out.\n"); 749 750 /* flush caches, if needed */ 751 if (bfin_addr_dcacheable((unsigned long) drv_data->tx)) 752 flush_dcache_range((unsigned long) drv_data->tx, 753 (unsigned long) (drv_data->tx + 754 drv_data->len_in_bytes)); 755 756 dma_start_addr = (unsigned long)drv_data->tx; 757 cr |= BIT_CTL_TIMOD_DMA_TX; 758 759 } else 760 BUG(); 761 762 /* oh man, here there be monsters ... and i dont mean the 763 * fluffy cute ones from pixar, i mean the kind that'll eat 764 * your data, kick your dog, and love it all. do *not* try 765 * and change these lines unless you (1) heavily test DMA 766 * with SPI flashes on a loaded system (e.g. ping floods), 767 * (2) know just how broken the DMA engine interaction with 768 * the SPI peripheral is, and (3) have someone else to blame 769 * when you screw it all up anyways. 770 */ 771 set_dma_start_addr(drv_data->dma_channel, dma_start_addr); 772 set_dma_config(drv_data->dma_channel, dma_config); 773 local_irq_save(flags); 774 SSYNC(); 775 bfin_write(&drv_data->regs->ctl, cr); 776 enable_dma(drv_data->dma_channel); 777 dma_enable_irq(drv_data->dma_channel); 778 local_irq_restore(flags); 779 780 return; 781 } 782 783 /* 784 * We always use SPI_WRITE mode (transfer starts with TDBR write). 785 * SPI_READ mode (transfer starts with RDBR read) seems to have 786 * problems with setting up the output value in TDBR prior to the 787 * start of the transfer. 788 */ 789 bfin_write(&drv_data->regs->ctl, cr | BIT_CTL_TXMOD); 790 791 if (chip->pio_interrupt) { 792 /* SPI irq should have been disabled by now */ 793 794 /* discard old RX data and clear RXS */ 795 bfin_spi_dummy_read(drv_data); 796 797 /* start transfer */ 798 if (drv_data->tx == NULL) 799 bfin_write(&drv_data->regs->tdbr, chip->idle_tx_val); 800 else { 801 int loop; 802 if (bits_per_word == 16) { 803 u16 *buf = (u16 *)drv_data->tx; 804 for (loop = 0; loop < bits_per_word / 16; 805 loop++) { 806 bfin_write(&drv_data->regs->tdbr, *buf++); 807 } 808 } else if (bits_per_word == 8) { 809 u8 *buf = (u8 *)drv_data->tx; 810 for (loop = 0; loop < bits_per_word / 8; loop++) 811 bfin_write(&drv_data->regs->tdbr, *buf++); 812 } 813 814 drv_data->tx += drv_data->n_bytes; 815 } 816 817 /* once TDBR is empty, interrupt is triggered */ 818 enable_irq(drv_data->spi_irq); 819 return; 820 } 821 822 /* IO mode */ 823 dev_dbg(&drv_data->pdev->dev, "doing IO transfer\n"); 824 825 if (full_duplex) { 826 /* full duplex mode */ 827 BUG_ON((drv_data->tx_end - drv_data->tx) != 828 (drv_data->rx_end - drv_data->rx)); 829 dev_dbg(&drv_data->pdev->dev, 830 "IO duplex: cr is 0x%x\n", cr); 831 832 drv_data->ops->duplex(drv_data); 833 834 if (drv_data->tx != drv_data->tx_end) 835 tranf_success = 0; 836 } else if (drv_data->tx != NULL) { 837 /* write only half duplex */ 838 dev_dbg(&drv_data->pdev->dev, 839 "IO write: cr is 0x%x\n", cr); 840 841 drv_data->ops->write(drv_data); 842 843 if (drv_data->tx != drv_data->tx_end) 844 tranf_success = 0; 845 } else if (drv_data->rx != NULL) { 846 /* read only half duplex */ 847 dev_dbg(&drv_data->pdev->dev, 848 "IO read: cr is 0x%x\n", cr); 849 850 drv_data->ops->read(drv_data); 851 if (drv_data->rx != drv_data->rx_end) 852 tranf_success = 0; 853 } 854 855 if (!tranf_success) { 856 dev_dbg(&drv_data->pdev->dev, 857 "IO write error!\n"); 858 message->state = ERROR_STATE; 859 } else { 860 /* Update total byte transferred */ 861 message->actual_length += drv_data->len_in_bytes; 862 /* Move to next transfer of this msg */ 863 message->state = bfin_spi_next_transfer(drv_data); 864 if (drv_data->cs_change && message->state != DONE_STATE) { 865 bfin_spi_flush(drv_data); 866 bfin_spi_cs_deactive(drv_data, chip); 867 } 868 } 869 870 /* Schedule next transfer tasklet */ 871 tasklet_schedule(&drv_data->pump_transfers); 872} 873 874/* pop a msg from queue and kick off real transfer */ 875static void bfin_spi_pump_messages(struct work_struct *work) 876{ 877 struct bfin_spi_master_data *drv_data; 878 unsigned long flags; 879 880 drv_data = container_of(work, struct bfin_spi_master_data, pump_messages); 881 882 /* Lock queue and check for queue work */ 883 spin_lock_irqsave(&drv_data->lock, flags); 884 if (list_empty(&drv_data->queue) || !drv_data->running) { 885 /* pumper kicked off but no work to do */ 886 drv_data->busy = 0; 887 spin_unlock_irqrestore(&drv_data->lock, flags); 888 return; 889 } 890 891 /* Make sure we are not already running a message */ 892 if (drv_data->cur_msg) { 893 spin_unlock_irqrestore(&drv_data->lock, flags); 894 return; 895 } 896 897 /* Extract head of queue */ 898 drv_data->cur_msg = list_entry(drv_data->queue.next, 899 struct spi_message, queue); 900 901 /* Setup the SSP using the per chip configuration */ 902 drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi); 903 bfin_spi_restore_state(drv_data); 904 905 list_del_init(&drv_data->cur_msg->queue); 906 907 /* Initial message state */ 908 drv_data->cur_msg->state = START_STATE; 909 drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next, 910 struct spi_transfer, transfer_list); 911 912 dev_dbg(&drv_data->pdev->dev, 913 "got a message to pump, state is set to: baud " 914 "%d, flag 0x%x, ctl 0x%x\n", 915 drv_data->cur_chip->baud, drv_data->cur_chip->flag, 916 drv_data->cur_chip->ctl_reg); 917 918 dev_dbg(&drv_data->pdev->dev, 919 "the first transfer len is %d\n", 920 drv_data->cur_transfer->len); 921 922 /* Mark as busy and launch transfers */ 923 tasklet_schedule(&drv_data->pump_transfers); 924 925 drv_data->busy = 1; 926 spin_unlock_irqrestore(&drv_data->lock, flags); 927} 928 929/* 930 * got a msg to transfer, queue it in drv_data->queue. 931 * And kick off message pumper 932 */ 933static int bfin_spi_transfer(struct spi_device *spi, struct spi_message *msg) 934{ 935 struct bfin_spi_master_data *drv_data = spi_master_get_devdata(spi->master); 936 unsigned long flags; 937 938 spin_lock_irqsave(&drv_data->lock, flags); 939 940 if (!drv_data->running) { 941 spin_unlock_irqrestore(&drv_data->lock, flags); 942 return -ESHUTDOWN; 943 } 944 945 msg->actual_length = 0; 946 msg->status = -EINPROGRESS; 947 msg->state = START_STATE; 948 949 dev_dbg(&spi->dev, "adding an msg in transfer() \n"); 950 list_add_tail(&msg->queue, &drv_data->queue); 951 952 if (drv_data->running && !drv_data->busy) 953 queue_work(drv_data->workqueue, &drv_data->pump_messages); 954 955 spin_unlock_irqrestore(&drv_data->lock, flags); 956 957 return 0; 958} 959 960#define MAX_SPI_SSEL 7 961 962static const u16 ssel[][MAX_SPI_SSEL] = { 963 {P_SPI0_SSEL1, P_SPI0_SSEL2, P_SPI0_SSEL3, 964 P_SPI0_SSEL4, P_SPI0_SSEL5, 965 P_SPI0_SSEL6, P_SPI0_SSEL7}, 966 967 {P_SPI1_SSEL1, P_SPI1_SSEL2, P_SPI1_SSEL3, 968 P_SPI1_SSEL4, P_SPI1_SSEL5, 969 P_SPI1_SSEL6, P_SPI1_SSEL7}, 970 971 {P_SPI2_SSEL1, P_SPI2_SSEL2, P_SPI2_SSEL3, 972 P_SPI2_SSEL4, P_SPI2_SSEL5, 973 P_SPI2_SSEL6, P_SPI2_SSEL7}, 974}; 975 976/* setup for devices (may be called multiple times -- not just first setup) */ 977static int bfin_spi_setup(struct spi_device *spi) 978{ 979 struct bfin5xx_spi_chip *chip_info; 980 struct bfin_spi_slave_data *chip = NULL; 981 struct bfin_spi_master_data *drv_data = spi_master_get_devdata(spi->master); 982 u16 bfin_ctl_reg; 983 int ret = -EINVAL; 984 985 /* Only alloc (or use chip_info) on first setup */ 986 chip_info = NULL; 987 chip = spi_get_ctldata(spi); 988 if (chip == NULL) { 989 chip = kzalloc(sizeof(*chip), GFP_KERNEL); 990 if (!chip) { 991 dev_err(&spi->dev, "cannot allocate chip data\n"); 992 ret = -ENOMEM; 993 goto error; 994 } 995 996 chip->enable_dma = 0; 997 chip_info = spi->controller_data; 998 } 999 1000 /* Let people set non-standard bits directly */ 1001 bfin_ctl_reg = BIT_CTL_OPENDRAIN | BIT_CTL_EMISO | 1002 BIT_CTL_PSSE | BIT_CTL_GM | BIT_CTL_SZ; 1003 1004 /* chip_info isn't always needed */ 1005 if (chip_info) { 1006 /* Make sure people stop trying to set fields via ctl_reg 1007 * when they should actually be using common SPI framework. 1008 * Currently we let through: WOM EMISO PSSE GM SZ. 1009 * Not sure if a user actually needs/uses any of these, 1010 * but let's assume (for now) they do. 1011 */ 1012 if (chip_info->ctl_reg & ~bfin_ctl_reg) { 1013 dev_err(&spi->dev, 1014 "do not set bits in ctl_reg that the SPI framework manages\n"); 1015 goto error; 1016 } 1017 chip->enable_dma = chip_info->enable_dma != 0 1018 && drv_data->master_info->enable_dma; 1019 chip->ctl_reg = chip_info->ctl_reg; 1020 chip->cs_chg_udelay = chip_info->cs_chg_udelay; 1021 chip->idle_tx_val = chip_info->idle_tx_val; 1022 chip->pio_interrupt = chip_info->pio_interrupt; 1023 } else { 1024 /* force a default base state */ 1025 chip->ctl_reg &= bfin_ctl_reg; 1026 } 1027 1028 /* translate common spi framework into our register */ 1029 if (spi->mode & SPI_CPOL) 1030 chip->ctl_reg |= BIT_CTL_CPOL; 1031 if (spi->mode & SPI_CPHA) 1032 chip->ctl_reg |= BIT_CTL_CPHA; 1033 if (spi->mode & SPI_LSB_FIRST) 1034 chip->ctl_reg |= BIT_CTL_LSBF; 1035 /* we dont support running in slave mode (yet?) */ 1036 chip->ctl_reg |= BIT_CTL_MASTER; 1037 1038 /* 1039 * Notice: for blackfin, the speed_hz is the value of register 1040 * SPI_BAUD, not the real baudrate 1041 */ 1042 chip->baud = hz_to_spi_baud(spi->max_speed_hz); 1043 chip->chip_select_num = spi->chip_select; 1044 if (chip->chip_select_num < MAX_CTRL_CS) { 1045 if (!(spi->mode & SPI_CPHA)) 1046 dev_warn(&spi->dev, 1047 "Warning: SPI CPHA not set: Slave Select not under software control!\n" 1048 "See Documentation/blackfin/bfin-spi-notes.txt\n"); 1049 1050 chip->flag = (1 << spi->chip_select) << 8; 1051 } else 1052 chip->cs_gpio = chip->chip_select_num - MAX_CTRL_CS; 1053 1054 if (chip->enable_dma && chip->pio_interrupt) { 1055 dev_err(&spi->dev, 1056 "enable_dma is set, do not set pio_interrupt\n"); 1057 goto error; 1058 } 1059 /* 1060 * if any one SPI chip is registered and wants DMA, request the 1061 * DMA channel for it 1062 */ 1063 if (chip->enable_dma && !drv_data->dma_requested) { 1064 /* register dma irq handler */ 1065 ret = request_dma(drv_data->dma_channel, "BFIN_SPI_DMA"); 1066 if (ret) { 1067 dev_err(&spi->dev, 1068 "Unable to request BlackFin SPI DMA channel\n"); 1069 goto error; 1070 } 1071 drv_data->dma_requested = 1; 1072 1073 ret = set_dma_callback(drv_data->dma_channel, 1074 bfin_spi_dma_irq_handler, drv_data); 1075 if (ret) { 1076 dev_err(&spi->dev, "Unable to set dma callback\n"); 1077 goto error; 1078 } 1079 dma_disable_irq(drv_data->dma_channel); 1080 } 1081 1082 if (chip->pio_interrupt && !drv_data->irq_requested) { 1083 ret = request_irq(drv_data->spi_irq, bfin_spi_pio_irq_handler, 1084 0, "BFIN_SPI", drv_data); 1085 if (ret) { 1086 dev_err(&spi->dev, "Unable to register spi IRQ\n"); 1087 goto error; 1088 } 1089 drv_data->irq_requested = 1; 1090 /* we use write mode, spi irq has to be disabled here */ 1091 disable_irq(drv_data->spi_irq); 1092 } 1093 1094 if (chip->chip_select_num >= MAX_CTRL_CS) { 1095 /* Only request on first setup */ 1096 if (spi_get_ctldata(spi) == NULL) { 1097 ret = gpio_request(chip->cs_gpio, spi->modalias); 1098 if (ret) { 1099 dev_err(&spi->dev, "gpio_request() error\n"); 1100 goto pin_error; 1101 } 1102 gpio_direction_output(chip->cs_gpio, 1); 1103 } 1104 } 1105 1106 dev_dbg(&spi->dev, "setup spi chip %s, width is %d, dma is %d\n", 1107 spi->modalias, spi->bits_per_word, chip->enable_dma); 1108 dev_dbg(&spi->dev, "ctl_reg is 0x%x, flag_reg is 0x%x\n", 1109 chip->ctl_reg, chip->flag); 1110 1111 spi_set_ctldata(spi, chip); 1112 1113 dev_dbg(&spi->dev, "chip select number is %d\n", chip->chip_select_num); 1114 if (chip->chip_select_num < MAX_CTRL_CS) { 1115 ret = peripheral_request(ssel[spi->master->bus_num] 1116 [chip->chip_select_num-1], spi->modalias); 1117 if (ret) { 1118 dev_err(&spi->dev, "peripheral_request() error\n"); 1119 goto pin_error; 1120 } 1121 } 1122 1123 bfin_spi_cs_enable(drv_data, chip); 1124 bfin_spi_cs_deactive(drv_data, chip); 1125 1126 return 0; 1127 1128 pin_error: 1129 if (chip->chip_select_num >= MAX_CTRL_CS) 1130 gpio_free(chip->cs_gpio); 1131 else 1132 peripheral_free(ssel[spi->master->bus_num] 1133 [chip->chip_select_num - 1]); 1134 error: 1135 if (chip) { 1136 if (drv_data->dma_requested) 1137 free_dma(drv_data->dma_channel); 1138 drv_data->dma_requested = 0; 1139 1140 kfree(chip); 1141 /* prevent free 'chip' twice */ 1142 spi_set_ctldata(spi, NULL); 1143 } 1144 1145 return ret; 1146} 1147 1148/* 1149 * callback for spi framework. 1150 * clean driver specific data 1151 */ 1152static void bfin_spi_cleanup(struct spi_device *spi) 1153{ 1154 struct bfin_spi_slave_data *chip = spi_get_ctldata(spi); 1155 struct bfin_spi_master_data *drv_data = spi_master_get_devdata(spi->master); 1156 1157 if (!chip) 1158 return; 1159 1160 if (chip->chip_select_num < MAX_CTRL_CS) { 1161 peripheral_free(ssel[spi->master->bus_num] 1162 [chip->chip_select_num-1]); 1163 bfin_spi_cs_disable(drv_data, chip); 1164 } else 1165 gpio_free(chip->cs_gpio); 1166 1167 kfree(chip); 1168 /* prevent free 'chip' twice */ 1169 spi_set_ctldata(spi, NULL); 1170} 1171 1172static int bfin_spi_init_queue(struct bfin_spi_master_data *drv_data) 1173{ 1174 INIT_LIST_HEAD(&drv_data->queue); 1175 spin_lock_init(&drv_data->lock); 1176 1177 drv_data->running = false; 1178 drv_data->busy = 0; 1179 1180 /* init transfer tasklet */ 1181 tasklet_init(&drv_data->pump_transfers, 1182 bfin_spi_pump_transfers, (unsigned long)drv_data); 1183 1184 /* init messages workqueue */ 1185 INIT_WORK(&drv_data->pump_messages, bfin_spi_pump_messages); 1186 drv_data->workqueue = create_singlethread_workqueue( 1187 dev_name(drv_data->master->dev.parent)); 1188 if (drv_data->workqueue == NULL) 1189 return -EBUSY; 1190 1191 return 0; 1192} 1193 1194static int bfin_spi_start_queue(struct bfin_spi_master_data *drv_data) 1195{ 1196 unsigned long flags; 1197 1198 spin_lock_irqsave(&drv_data->lock, flags); 1199 1200 if (drv_data->running || drv_data->busy) { 1201 spin_unlock_irqrestore(&drv_data->lock, flags); 1202 return -EBUSY; 1203 } 1204 1205 drv_data->running = true; 1206 drv_data->cur_msg = NULL; 1207 drv_data->cur_transfer = NULL; 1208 drv_data->cur_chip = NULL; 1209 spin_unlock_irqrestore(&drv_data->lock, flags); 1210 1211 queue_work(drv_data->workqueue, &drv_data->pump_messages); 1212 1213 return 0; 1214} 1215 1216static int bfin_spi_stop_queue(struct bfin_spi_master_data *drv_data) 1217{ 1218 unsigned long flags; 1219 unsigned limit = 500; 1220 int status = 0; 1221 1222 spin_lock_irqsave(&drv_data->lock, flags); 1223 1224 /* 1225 * This is a bit lame, but is optimized for the common execution path. 1226 * A wait_queue on the drv_data->busy could be used, but then the common 1227 * execution path (pump_messages) would be required to call wake_up or 1228 * friends on every SPI message. Do this instead 1229 */ 1230 drv_data->running = false; 1231 while ((!list_empty(&drv_data->queue) || drv_data->busy) && limit--) { 1232 spin_unlock_irqrestore(&drv_data->lock, flags); 1233 msleep(10); 1234 spin_lock_irqsave(&drv_data->lock, flags); 1235 } 1236 1237 if (!list_empty(&drv_data->queue) || drv_data->busy) 1238 status = -EBUSY; 1239 1240 spin_unlock_irqrestore(&drv_data->lock, flags); 1241 1242 return status; 1243} 1244 1245static int bfin_spi_destroy_queue(struct bfin_spi_master_data *drv_data) 1246{ 1247 int status; 1248 1249 status = bfin_spi_stop_queue(drv_data); 1250 if (status != 0) 1251 return status; 1252 1253 destroy_workqueue(drv_data->workqueue); 1254 1255 return 0; 1256} 1257 1258static int bfin_spi_probe(struct platform_device *pdev) 1259{ 1260 struct device *dev = &pdev->dev; 1261 struct bfin5xx_spi_master *platform_info; 1262 struct spi_master *master; 1263 struct bfin_spi_master_data *drv_data; 1264 struct resource *res; 1265 int status = 0; 1266 1267 platform_info = dev_get_platdata(dev); 1268 1269 /* Allocate master with space for drv_data */ 1270 master = spi_alloc_master(dev, sizeof(*drv_data)); 1271 if (!master) { 1272 dev_err(&pdev->dev, "can not alloc spi_master\n"); 1273 return -ENOMEM; 1274 } 1275 1276 drv_data = spi_master_get_devdata(master); 1277 drv_data->master = master; 1278 drv_data->master_info = platform_info; 1279 drv_data->pdev = pdev; 1280 drv_data->pin_req = platform_info->pin_req; 1281 1282 /* the spi->mode bits supported by this driver: */ 1283 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST; 1284 master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16); 1285 master->bus_num = pdev->id; 1286 master->num_chipselect = platform_info->num_chipselect; 1287 master->cleanup = bfin_spi_cleanup; 1288 master->setup = bfin_spi_setup; 1289 master->transfer = bfin_spi_transfer; 1290 1291 /* Find and map our resources */ 1292 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1293 if (res == NULL) { 1294 dev_err(dev, "Cannot get IORESOURCE_MEM\n"); 1295 status = -ENOENT; 1296 goto out_error_get_res; 1297 } 1298 1299 drv_data->regs = ioremap(res->start, resource_size(res)); 1300 if (drv_data->regs == NULL) { 1301 dev_err(dev, "Cannot map IO\n"); 1302 status = -ENXIO; 1303 goto out_error_ioremap; 1304 } 1305 1306 res = platform_get_resource(pdev, IORESOURCE_DMA, 0); 1307 if (res == NULL) { 1308 dev_err(dev, "No DMA channel specified\n"); 1309 status = -ENOENT; 1310 goto out_error_free_io; 1311 } 1312 drv_data->dma_channel = res->start; 1313 1314 drv_data->spi_irq = platform_get_irq(pdev, 0); 1315 if (drv_data->spi_irq < 0) { 1316 dev_err(dev, "No spi pio irq specified\n"); 1317 status = -ENOENT; 1318 goto out_error_free_io; 1319 } 1320 1321 /* Initial and start queue */ 1322 status = bfin_spi_init_queue(drv_data); 1323 if (status != 0) { 1324 dev_err(dev, "problem initializing queue\n"); 1325 goto out_error_queue_alloc; 1326 } 1327 1328 status = bfin_spi_start_queue(drv_data); 1329 if (status != 0) { 1330 dev_err(dev, "problem starting queue\n"); 1331 goto out_error_queue_alloc; 1332 } 1333 1334 status = peripheral_request_list(drv_data->pin_req, DRV_NAME); 1335 if (status != 0) { 1336 dev_err(&pdev->dev, ": Requesting Peripherals failed\n"); 1337 goto out_error_queue_alloc; 1338 } 1339 1340 /* Reset SPI registers. If these registers were used by the boot loader, 1341 * the sky may fall on your head if you enable the dma controller. 1342 */ 1343 bfin_write(&drv_data->regs->ctl, BIT_CTL_CPHA | BIT_CTL_MASTER); 1344 bfin_write(&drv_data->regs->flg, 0xFF00); 1345 1346 /* Register with the SPI framework */ 1347 platform_set_drvdata(pdev, drv_data); 1348 status = spi_register_master(master); 1349 if (status != 0) { 1350 dev_err(dev, "problem registering spi master\n"); 1351 goto out_error_queue_alloc; 1352 } 1353 1354 dev_info(dev, "%s, Version %s, regs@%p, dma channel@%d\n", 1355 DRV_DESC, DRV_VERSION, drv_data->regs, 1356 drv_data->dma_channel); 1357 return status; 1358 1359out_error_queue_alloc: 1360 bfin_spi_destroy_queue(drv_data); 1361out_error_free_io: 1362 iounmap(drv_data->regs); 1363out_error_ioremap: 1364out_error_get_res: 1365 spi_master_put(master); 1366 1367 return status; 1368} 1369 1370/* stop hardware and remove the driver */ 1371static int bfin_spi_remove(struct platform_device *pdev) 1372{ 1373 struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev); 1374 int status = 0; 1375 1376 if (!drv_data) 1377 return 0; 1378 1379 /* Remove the queue */ 1380 status = bfin_spi_destroy_queue(drv_data); 1381 if (status != 0) 1382 return status; 1383 1384 /* Disable the SSP at the peripheral and SOC level */ 1385 bfin_spi_disable(drv_data); 1386 1387 /* Release DMA */ 1388 if (drv_data->master_info->enable_dma) { 1389 if (dma_channel_active(drv_data->dma_channel)) 1390 free_dma(drv_data->dma_channel); 1391 } 1392 1393 if (drv_data->irq_requested) { 1394 free_irq(drv_data->spi_irq, drv_data); 1395 drv_data->irq_requested = 0; 1396 } 1397 1398 /* Disconnect from the SPI framework */ 1399 spi_unregister_master(drv_data->master); 1400 1401 peripheral_free_list(drv_data->pin_req); 1402 1403 return 0; 1404} 1405 1406#ifdef CONFIG_PM_SLEEP 1407static int bfin_spi_suspend(struct device *dev) 1408{ 1409 struct bfin_spi_master_data *drv_data = dev_get_drvdata(dev); 1410 int status = 0; 1411 1412 status = bfin_spi_stop_queue(drv_data); 1413 if (status != 0) 1414 return status; 1415 1416 drv_data->ctrl_reg = bfin_read(&drv_data->regs->ctl); 1417 drv_data->flag_reg = bfin_read(&drv_data->regs->flg); 1418 1419 /* 1420 * reset SPI_CTL and SPI_FLG registers 1421 */ 1422 bfin_write(&drv_data->regs->ctl, BIT_CTL_CPHA | BIT_CTL_MASTER); 1423 bfin_write(&drv_data->regs->flg, 0xFF00); 1424 1425 return 0; 1426} 1427 1428static int bfin_spi_resume(struct device *dev) 1429{ 1430 struct bfin_spi_master_data *drv_data = dev_get_drvdata(dev); 1431 int status = 0; 1432 1433 bfin_write(&drv_data->regs->ctl, drv_data->ctrl_reg); 1434 bfin_write(&drv_data->regs->flg, drv_data->flag_reg); 1435 1436 /* Start the queue running */ 1437 status = bfin_spi_start_queue(drv_data); 1438 if (status != 0) { 1439 dev_err(dev, "problem starting queue (%d)\n", status); 1440 return status; 1441 } 1442 1443 return 0; 1444} 1445 1446static SIMPLE_DEV_PM_OPS(bfin_spi_pm_ops, bfin_spi_suspend, bfin_spi_resume); 1447 1448#define BFIN_SPI_PM_OPS (&bfin_spi_pm_ops) 1449#else 1450#define BFIN_SPI_PM_OPS NULL 1451#endif 1452 1453MODULE_ALIAS("platform:bfin-spi"); 1454static struct platform_driver bfin_spi_driver = { 1455 .driver = { 1456 .name = DRV_NAME, 1457 .pm = BFIN_SPI_PM_OPS, 1458 }, 1459 .probe = bfin_spi_probe, 1460 .remove = bfin_spi_remove, 1461}; 1462 1463static int __init bfin_spi_init(void) 1464{ 1465 return platform_driver_register(&bfin_spi_driver); 1466} 1467subsys_initcall(bfin_spi_init); 1468 1469static void __exit bfin_spi_exit(void) 1470{ 1471 platform_driver_unregister(&bfin_spi_driver); 1472} 1473module_exit(bfin_spi_exit); 1474