1/* 2 * drivers/ata/sata_dwc_460ex.c 3 * 4 * Synopsys DesignWare Cores (DWC) SATA host driver 5 * 6 * Author: Mark Miesfeld <mmiesfeld@amcc.com> 7 * 8 * Ported from 2.6.19.2 to 2.6.25/26 by Stefan Roese <sr@denx.de> 9 * Copyright 2008 DENX Software Engineering 10 * 11 * Based on versions provided by AMCC and Synopsys which are: 12 * Copyright 2006 Applied Micro Circuits Corporation 13 * COPYRIGHT (C) 2005 SYNOPSYS, INC. ALL RIGHTS RESERVED 14 * 15 * This program is free software; you can redistribute it and/or modify it 16 * under the terms of the GNU General Public License as published by the 17 * Free Software Foundation; either version 2 of the License, or (at your 18 * option) any later version. 19 */ 20 21#ifdef CONFIG_SATA_DWC_DEBUG 22#define DEBUG 23#endif 24 25#ifdef CONFIG_SATA_DWC_VDEBUG 26#define VERBOSE_DEBUG 27#define DEBUG_NCQ 28#endif 29 30#include <linux/kernel.h> 31#include <linux/module.h> 32#include <linux/device.h> 33#include <linux/of_address.h> 34#include <linux/of_irq.h> 35#include <linux/of_platform.h> 36#include <linux/platform_device.h> 37#include <linux/libata.h> 38#include <linux/slab.h> 39 40#include "libata.h" 41 42#include <scsi/scsi_host.h> 43#include <scsi/scsi_cmnd.h> 44 45/* Supported DMA engine drivers */ 46#include <linux/platform_data/dma-dw.h> 47#include <linux/dma/dw.h> 48 49/* These two are defined in "libata.h" */ 50#undef DRV_NAME 51#undef DRV_VERSION 52 53#define DRV_NAME "sata-dwc" 54#define DRV_VERSION "1.3" 55 56#ifndef out_le32 57#define out_le32(a, v) __raw_writel(__cpu_to_le32(v), (void __iomem *)(a)) 58#endif 59 60#ifndef in_le32 61#define in_le32(a) __le32_to_cpu(__raw_readl((void __iomem *)(a))) 62#endif 63 64#ifndef NO_IRQ 65#define NO_IRQ 0 66#endif 67 68#define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length*/ 69 70enum { 71 SATA_DWC_MAX_PORTS = 1, 72 73 SATA_DWC_SCR_OFFSET = 0x24, 74 SATA_DWC_REG_OFFSET = 0x64, 75}; 76 77/* DWC SATA Registers */ 78struct sata_dwc_regs { 79 u32 fptagr; /* 1st party DMA tag */ 80 u32 fpbor; /* 1st party DMA buffer offset */ 81 u32 fptcr; /* 1st party DMA Xfr count */ 82 u32 dmacr; /* DMA Control */ 83 u32 dbtsr; /* DMA Burst Transac size */ 84 u32 intpr; /* Interrupt Pending */ 85 u32 intmr; /* Interrupt Mask */ 86 u32 errmr; /* Error Mask */ 87 u32 llcr; /* Link Layer Control */ 88 u32 phycr; /* PHY Control */ 89 u32 physr; /* PHY Status */ 90 u32 rxbistpd; /* Recvd BIST pattern def register */ 91 u32 rxbistpd1; /* Recvd BIST data dword1 */ 92 u32 rxbistpd2; /* Recvd BIST pattern data dword2 */ 93 u32 txbistpd; /* Trans BIST pattern def register */ 94 u32 txbistpd1; /* Trans BIST data dword1 */ 95 u32 txbistpd2; /* Trans BIST data dword2 */ 96 u32 bistcr; /* BIST Control Register */ 97 u32 bistfctr; /* BIST FIS Count Register */ 98 u32 bistsr; /* BIST Status Register */ 99 u32 bistdecr; /* BIST Dword Error count register */ 100 u32 res[15]; /* Reserved locations */ 101 u32 testr; /* Test Register */ 102 u32 versionr; /* Version Register */ 103 u32 idr; /* ID Register */ 104 u32 unimpl[192]; /* Unimplemented */ 105 u32 dmadr[256]; /* FIFO Locations in DMA Mode */ 106}; 107 108enum { 109 SCR_SCONTROL_DET_ENABLE = 0x00000001, 110 SCR_SSTATUS_DET_PRESENT = 0x00000001, 111 SCR_SERROR_DIAG_X = 0x04000000, 112/* DWC SATA Register Operations */ 113 SATA_DWC_TXFIFO_DEPTH = 0x01FF, 114 SATA_DWC_RXFIFO_DEPTH = 0x01FF, 115 SATA_DWC_DMACR_TMOD_TXCHEN = 0x00000004, 116 SATA_DWC_DMACR_TXCHEN = (0x00000001 | SATA_DWC_DMACR_TMOD_TXCHEN), 117 SATA_DWC_DMACR_RXCHEN = (0x00000002 | SATA_DWC_DMACR_TMOD_TXCHEN), 118 SATA_DWC_DMACR_TXRXCH_CLEAR = SATA_DWC_DMACR_TMOD_TXCHEN, 119 SATA_DWC_INTPR_DMAT = 0x00000001, 120 SATA_DWC_INTPR_NEWFP = 0x00000002, 121 SATA_DWC_INTPR_PMABRT = 0x00000004, 122 SATA_DWC_INTPR_ERR = 0x00000008, 123 SATA_DWC_INTPR_NEWBIST = 0x00000010, 124 SATA_DWC_INTPR_IPF = 0x10000000, 125 SATA_DWC_INTMR_DMATM = 0x00000001, 126 SATA_DWC_INTMR_NEWFPM = 0x00000002, 127 SATA_DWC_INTMR_PMABRTM = 0x00000004, 128 SATA_DWC_INTMR_ERRM = 0x00000008, 129 SATA_DWC_INTMR_NEWBISTM = 0x00000010, 130 SATA_DWC_LLCR_SCRAMEN = 0x00000001, 131 SATA_DWC_LLCR_DESCRAMEN = 0x00000002, 132 SATA_DWC_LLCR_RPDEN = 0x00000004, 133/* This is all error bits, zero's are reserved fields. */ 134 SATA_DWC_SERROR_ERR_BITS = 0x0FFF0F03 135}; 136 137#define SATA_DWC_SCR0_SPD_GET(v) (((v) >> 4) & 0x0000000F) 138#define SATA_DWC_DMACR_TX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_TXCHEN) |\ 139 SATA_DWC_DMACR_TMOD_TXCHEN) 140#define SATA_DWC_DMACR_RX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_RXCHEN) |\ 141 SATA_DWC_DMACR_TMOD_TXCHEN) 142#define SATA_DWC_DBTSR_MWR(size) (((size)/4) & SATA_DWC_TXFIFO_DEPTH) 143#define SATA_DWC_DBTSR_MRD(size) ((((size)/4) & SATA_DWC_RXFIFO_DEPTH)\ 144 << 16) 145struct sata_dwc_device { 146 struct device *dev; /* generic device struct */ 147 struct ata_probe_ent *pe; /* ptr to probe-ent */ 148 struct ata_host *host; 149 u8 __iomem *reg_base; 150 struct sata_dwc_regs *sata_dwc_regs; /* DW Synopsys SATA specific */ 151 struct dw_dma_chip *dma; 152}; 153 154#define SATA_DWC_QCMD_MAX 32 155 156struct sata_dwc_device_port { 157 struct sata_dwc_device *hsdev; 158 int cmd_issued[SATA_DWC_QCMD_MAX]; 159 int dma_pending[SATA_DWC_QCMD_MAX]; 160 161 /* DMA info */ 162 struct dw_dma_slave *dws; 163 struct dma_chan *chan; 164 struct dma_async_tx_descriptor *desc[SATA_DWC_QCMD_MAX]; 165 u32 dma_interrupt_count; 166}; 167 168/* 169 * Commonly used DWC SATA driver Macros 170 */ 171#define HSDEV_FROM_HOST(host) ((struct sata_dwc_device *)\ 172 (host)->private_data) 173#define HSDEV_FROM_AP(ap) ((struct sata_dwc_device *)\ 174 (ap)->host->private_data) 175#define HSDEVP_FROM_AP(ap) ((struct sata_dwc_device_port *)\ 176 (ap)->private_data) 177#define HSDEV_FROM_QC(qc) ((struct sata_dwc_device *)\ 178 (qc)->ap->host->private_data) 179#define HSDEV_FROM_HSDEVP(p) ((struct sata_dwc_device *)\ 180 (hsdevp)->hsdev) 181 182enum { 183 SATA_DWC_CMD_ISSUED_NOT = 0, 184 SATA_DWC_CMD_ISSUED_PEND = 1, 185 SATA_DWC_CMD_ISSUED_EXEC = 2, 186 SATA_DWC_CMD_ISSUED_NODATA = 3, 187 188 SATA_DWC_DMA_PENDING_NONE = 0, 189 SATA_DWC_DMA_PENDING_TX = 1, 190 SATA_DWC_DMA_PENDING_RX = 2, 191}; 192 193struct sata_dwc_host_priv { 194 void __iomem *scr_addr_sstatus; 195 u32 sata_dwc_sactive_issued ; 196 u32 sata_dwc_sactive_queued ; 197}; 198 199static struct sata_dwc_host_priv host_pvt; 200 201static struct dw_dma_slave sata_dwc_dma_dws = { 202 .src_id = 0, 203 .dst_id = 0, 204 .src_master = 0, 205 .dst_master = 1, 206}; 207 208/* 209 * Prototypes 210 */ 211static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag); 212static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc, 213 u32 check_status); 214static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status); 215static void sata_dwc_port_stop(struct ata_port *ap); 216static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag); 217 218static const char *get_prot_descript(u8 protocol) 219{ 220 switch ((enum ata_tf_protocols)protocol) { 221 case ATA_PROT_NODATA: 222 return "ATA no data"; 223 case ATA_PROT_PIO: 224 return "ATA PIO"; 225 case ATA_PROT_DMA: 226 return "ATA DMA"; 227 case ATA_PROT_NCQ: 228 return "ATA NCQ"; 229 case ATAPI_PROT_NODATA: 230 return "ATAPI no data"; 231 case ATAPI_PROT_PIO: 232 return "ATAPI PIO"; 233 case ATAPI_PROT_DMA: 234 return "ATAPI DMA"; 235 default: 236 return "unknown"; 237 } 238} 239 240static const char *get_dma_dir_descript(int dma_dir) 241{ 242 switch ((enum dma_data_direction)dma_dir) { 243 case DMA_BIDIRECTIONAL: 244 return "bidirectional"; 245 case DMA_TO_DEVICE: 246 return "to device"; 247 case DMA_FROM_DEVICE: 248 return "from device"; 249 default: 250 return "none"; 251 } 252} 253 254static void sata_dwc_tf_dump(struct ata_port *ap, struct ata_taskfile *tf) 255{ 256 dev_vdbg(ap->dev, 257 "taskfile cmd: 0x%02x protocol: %s flags: 0x%lx device: %x\n", 258 tf->command, get_prot_descript(tf->protocol), tf->flags, 259 tf->device); 260 dev_vdbg(ap->dev, 261 "feature: 0x%02x nsect: 0x%x lbal: 0x%x lbam: 0x%x lbah: 0x%x\n", 262 tf->feature, tf->nsect, tf->lbal, tf->lbam, tf->lbah); 263 dev_vdbg(ap->dev, 264 "hob_feature: 0x%02x hob_nsect: 0x%x hob_lbal: 0x%x hob_lbam: 0x%x hob_lbah: 0x%x\n", 265 tf->hob_feature, tf->hob_nsect, tf->hob_lbal, tf->hob_lbam, 266 tf->hob_lbah); 267} 268 269static void dma_dwc_xfer_done(void *hsdev_instance) 270{ 271 unsigned long flags; 272 struct sata_dwc_device *hsdev = hsdev_instance; 273 struct ata_host *host = (struct ata_host *)hsdev->host; 274 struct ata_port *ap; 275 struct sata_dwc_device_port *hsdevp; 276 u8 tag = 0; 277 unsigned int port = 0; 278 279 spin_lock_irqsave(&host->lock, flags); 280 ap = host->ports[port]; 281 hsdevp = HSDEVP_FROM_AP(ap); 282 tag = ap->link.active_tag; 283 284 /* 285 * Each DMA command produces 2 interrupts. Only 286 * complete the command after both interrupts have been 287 * seen. (See sata_dwc_isr()) 288 */ 289 hsdevp->dma_interrupt_count++; 290 sata_dwc_clear_dmacr(hsdevp, tag); 291 292 if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) { 293 dev_err(ap->dev, "DMA not pending tag=0x%02x pending=%d\n", 294 tag, hsdevp->dma_pending[tag]); 295 } 296 297 if ((hsdevp->dma_interrupt_count % 2) == 0) 298 sata_dwc_dma_xfer_complete(ap, 1); 299 300 spin_unlock_irqrestore(&host->lock, flags); 301} 302 303static struct dma_async_tx_descriptor *dma_dwc_xfer_setup(struct ata_queued_cmd *qc) 304{ 305 struct ata_port *ap = qc->ap; 306 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 307 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); 308 dma_addr_t addr = (dma_addr_t)&hsdev->sata_dwc_regs->dmadr; 309 struct dma_slave_config sconf; 310 struct dma_async_tx_descriptor *desc; 311 312 if (qc->dma_dir == DMA_DEV_TO_MEM) { 313 sconf.src_addr = addr; 314 sconf.device_fc = true; 315 } else { /* DMA_MEM_TO_DEV */ 316 sconf.dst_addr = addr; 317 sconf.device_fc = false; 318 } 319 320 sconf.direction = qc->dma_dir; 321 sconf.src_maxburst = AHB_DMA_BRST_DFLT; 322 sconf.dst_maxburst = AHB_DMA_BRST_DFLT; 323 sconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 324 sconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 325 326 dmaengine_slave_config(hsdevp->chan, &sconf); 327 328 /* Convert SG list to linked list of items (LLIs) for AHB DMA */ 329 desc = dmaengine_prep_slave_sg(hsdevp->chan, qc->sg, qc->n_elem, 330 qc->dma_dir, 331 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 332 333 if (!desc) 334 return NULL; 335 336 desc->callback = dma_dwc_xfer_done; 337 desc->callback_param = hsdev; 338 339 dev_dbg(hsdev->dev, "%s sg: 0x%p, count: %d addr: %pad\n", 340 __func__, qc->sg, qc->n_elem, &addr); 341 342 return desc; 343} 344 345static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val) 346{ 347 if (scr > SCR_NOTIFICATION) { 348 dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n", 349 __func__, scr); 350 return -EINVAL; 351 } 352 353 *val = in_le32(link->ap->ioaddr.scr_addr + (scr * 4)); 354 dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n", 355 __func__, link->ap->print_id, scr, *val); 356 357 return 0; 358} 359 360static int sata_dwc_scr_write(struct ata_link *link, unsigned int scr, u32 val) 361{ 362 dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n", 363 __func__, link->ap->print_id, scr, val); 364 if (scr > SCR_NOTIFICATION) { 365 dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n", 366 __func__, scr); 367 return -EINVAL; 368 } 369 out_le32(link->ap->ioaddr.scr_addr + (scr * 4), val); 370 371 return 0; 372} 373 374static u32 core_scr_read(unsigned int scr) 375{ 376 return in_le32(host_pvt.scr_addr_sstatus + (scr * 4)); 377} 378 379static void core_scr_write(unsigned int scr, u32 val) 380{ 381 out_le32(host_pvt.scr_addr_sstatus + (scr * 4), val); 382} 383 384static void clear_serror(void) 385{ 386 u32 val; 387 val = core_scr_read(SCR_ERROR); 388 core_scr_write(SCR_ERROR, val); 389} 390 391static void clear_interrupt_bit(struct sata_dwc_device *hsdev, u32 bit) 392{ 393 out_le32(&hsdev->sata_dwc_regs->intpr, 394 in_le32(&hsdev->sata_dwc_regs->intpr)); 395} 396 397static u32 qcmd_tag_to_mask(u8 tag) 398{ 399 return 0x00000001 << (tag & 0x1f); 400} 401 402/* See ahci.c */ 403static void sata_dwc_error_intr(struct ata_port *ap, 404 struct sata_dwc_device *hsdev, uint intpr) 405{ 406 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 407 struct ata_eh_info *ehi = &ap->link.eh_info; 408 unsigned int err_mask = 0, action = 0; 409 struct ata_queued_cmd *qc; 410 u32 serror; 411 u8 status, tag; 412 413 ata_ehi_clear_desc(ehi); 414 415 serror = core_scr_read(SCR_ERROR); 416 status = ap->ops->sff_check_status(ap); 417 418 tag = ap->link.active_tag; 419 420 dev_err(ap->dev, 421 "%s SCR_ERROR=0x%08x intpr=0x%08x status=0x%08x dma_intp=%d pending=%d issued=%d", 422 __func__, serror, intpr, status, hsdevp->dma_interrupt_count, 423 hsdevp->dma_pending[tag], hsdevp->cmd_issued[tag]); 424 425 /* Clear error register and interrupt bit */ 426 clear_serror(); 427 clear_interrupt_bit(hsdev, SATA_DWC_INTPR_ERR); 428 429 /* This is the only error happening now. TODO check for exact error */ 430 431 err_mask |= AC_ERR_HOST_BUS; 432 action |= ATA_EH_RESET; 433 434 /* Pass this on to EH */ 435 ehi->serror |= serror; 436 ehi->action |= action; 437 438 qc = ata_qc_from_tag(ap, tag); 439 if (qc) 440 qc->err_mask |= err_mask; 441 else 442 ehi->err_mask |= err_mask; 443 444 ata_port_abort(ap); 445} 446 447/* 448 * Function : sata_dwc_isr 449 * arguments : irq, void *dev_instance, struct pt_regs *regs 450 * Return value : irqreturn_t - status of IRQ 451 * This Interrupt handler called via port ops registered function. 452 * .irq_handler = sata_dwc_isr 453 */ 454static irqreturn_t sata_dwc_isr(int irq, void *dev_instance) 455{ 456 struct ata_host *host = (struct ata_host *)dev_instance; 457 struct sata_dwc_device *hsdev = HSDEV_FROM_HOST(host); 458 struct ata_port *ap; 459 struct ata_queued_cmd *qc; 460 unsigned long flags; 461 u8 status, tag; 462 int handled, num_processed, port = 0; 463 uint intpr, sactive, sactive2, tag_mask; 464 struct sata_dwc_device_port *hsdevp; 465 host_pvt.sata_dwc_sactive_issued = 0; 466 467 spin_lock_irqsave(&host->lock, flags); 468 469 /* Read the interrupt register */ 470 intpr = in_le32(&hsdev->sata_dwc_regs->intpr); 471 472 ap = host->ports[port]; 473 hsdevp = HSDEVP_FROM_AP(ap); 474 475 dev_dbg(ap->dev, "%s intpr=0x%08x active_tag=%d\n", __func__, intpr, 476 ap->link.active_tag); 477 478 /* Check for error interrupt */ 479 if (intpr & SATA_DWC_INTPR_ERR) { 480 sata_dwc_error_intr(ap, hsdev, intpr); 481 handled = 1; 482 goto DONE; 483 } 484 485 /* Check for DMA SETUP FIS (FP DMA) interrupt */ 486 if (intpr & SATA_DWC_INTPR_NEWFP) { 487 clear_interrupt_bit(hsdev, SATA_DWC_INTPR_NEWFP); 488 489 tag = (u8)(in_le32(&hsdev->sata_dwc_regs->fptagr)); 490 dev_dbg(ap->dev, "%s: NEWFP tag=%d\n", __func__, tag); 491 if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_PEND) 492 dev_warn(ap->dev, "CMD tag=%d not pending?\n", tag); 493 494 host_pvt.sata_dwc_sactive_issued |= qcmd_tag_to_mask(tag); 495 496 qc = ata_qc_from_tag(ap, tag); 497 /* 498 * Start FP DMA for NCQ command. At this point the tag is the 499 * active tag. It is the tag that matches the command about to 500 * be completed. 501 */ 502 qc->ap->link.active_tag = tag; 503 sata_dwc_bmdma_start_by_tag(qc, tag); 504 505 handled = 1; 506 goto DONE; 507 } 508 sactive = core_scr_read(SCR_ACTIVE); 509 tag_mask = (host_pvt.sata_dwc_sactive_issued | sactive) ^ sactive; 510 511 /* If no sactive issued and tag_mask is zero then this is not NCQ */ 512 if (host_pvt.sata_dwc_sactive_issued == 0 && tag_mask == 0) { 513 if (ap->link.active_tag == ATA_TAG_POISON) 514 tag = 0; 515 else 516 tag = ap->link.active_tag; 517 qc = ata_qc_from_tag(ap, tag); 518 519 /* DEV interrupt w/ no active qc? */ 520 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { 521 dev_err(ap->dev, 522 "%s interrupt with no active qc qc=%p\n", 523 __func__, qc); 524 ap->ops->sff_check_status(ap); 525 handled = 1; 526 goto DONE; 527 } 528 status = ap->ops->sff_check_status(ap); 529 530 qc->ap->link.active_tag = tag; 531 hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT; 532 533 if (status & ATA_ERR) { 534 dev_dbg(ap->dev, "interrupt ATA_ERR (0x%x)\n", status); 535 sata_dwc_qc_complete(ap, qc, 1); 536 handled = 1; 537 goto DONE; 538 } 539 540 dev_dbg(ap->dev, "%s non-NCQ cmd interrupt, protocol: %s\n", 541 __func__, get_prot_descript(qc->tf.protocol)); 542DRVSTILLBUSY: 543 if (ata_is_dma(qc->tf.protocol)) { 544 /* 545 * Each DMA transaction produces 2 interrupts. The DMAC 546 * transfer complete interrupt and the SATA controller 547 * operation done interrupt. The command should be 548 * completed only after both interrupts are seen. 549 */ 550 hsdevp->dma_interrupt_count++; 551 if (hsdevp->dma_pending[tag] == \ 552 SATA_DWC_DMA_PENDING_NONE) { 553 dev_err(ap->dev, 554 "%s: DMA not pending intpr=0x%08x status=0x%08x pending=%d\n", 555 __func__, intpr, status, 556 hsdevp->dma_pending[tag]); 557 } 558 559 if ((hsdevp->dma_interrupt_count % 2) == 0) 560 sata_dwc_dma_xfer_complete(ap, 1); 561 } else if (ata_is_pio(qc->tf.protocol)) { 562 ata_sff_hsm_move(ap, qc, status, 0); 563 handled = 1; 564 goto DONE; 565 } else { 566 if (unlikely(sata_dwc_qc_complete(ap, qc, 1))) 567 goto DRVSTILLBUSY; 568 } 569 570 handled = 1; 571 goto DONE; 572 } 573 574 /* 575 * This is a NCQ command. At this point we need to figure out for which 576 * tags we have gotten a completion interrupt. One interrupt may serve 577 * as completion for more than one operation when commands are queued 578 * (NCQ). We need to process each completed command. 579 */ 580 581 /* process completed commands */ 582 sactive = core_scr_read(SCR_ACTIVE); 583 tag_mask = (host_pvt.sata_dwc_sactive_issued | sactive) ^ sactive; 584 585 if (sactive != 0 || (host_pvt.sata_dwc_sactive_issued) > 1 || \ 586 tag_mask > 1) { 587 dev_dbg(ap->dev, 588 "%s NCQ:sactive=0x%08x sactive_issued=0x%08x tag_mask=0x%08x\n", 589 __func__, sactive, host_pvt.sata_dwc_sactive_issued, 590 tag_mask); 591 } 592 593 if ((tag_mask | (host_pvt.sata_dwc_sactive_issued)) != \ 594 (host_pvt.sata_dwc_sactive_issued)) { 595 dev_warn(ap->dev, 596 "Bad tag mask? sactive=0x%08x (host_pvt.sata_dwc_sactive_issued)=0x%08x tag_mask=0x%08x\n", 597 sactive, host_pvt.sata_dwc_sactive_issued, tag_mask); 598 } 599 600 /* read just to clear ... not bad if currently still busy */ 601 status = ap->ops->sff_check_status(ap); 602 dev_dbg(ap->dev, "%s ATA status register=0x%x\n", __func__, status); 603 604 tag = 0; 605 num_processed = 0; 606 while (tag_mask) { 607 num_processed++; 608 while (!(tag_mask & 0x00000001)) { 609 tag++; 610 tag_mask <<= 1; 611 } 612 613 tag_mask &= (~0x00000001); 614 qc = ata_qc_from_tag(ap, tag); 615 616 /* To be picked up by completion functions */ 617 qc->ap->link.active_tag = tag; 618 hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT; 619 620 /* Let libata/scsi layers handle error */ 621 if (status & ATA_ERR) { 622 dev_dbg(ap->dev, "%s ATA_ERR (0x%x)\n", __func__, 623 status); 624 sata_dwc_qc_complete(ap, qc, 1); 625 handled = 1; 626 goto DONE; 627 } 628 629 /* Process completed command */ 630 dev_dbg(ap->dev, "%s NCQ command, protocol: %s\n", __func__, 631 get_prot_descript(qc->tf.protocol)); 632 if (ata_is_dma(qc->tf.protocol)) { 633 hsdevp->dma_interrupt_count++; 634 if (hsdevp->dma_pending[tag] == \ 635 SATA_DWC_DMA_PENDING_NONE) 636 dev_warn(ap->dev, "%s: DMA not pending?\n", 637 __func__); 638 if ((hsdevp->dma_interrupt_count % 2) == 0) 639 sata_dwc_dma_xfer_complete(ap, 1); 640 } else { 641 if (unlikely(sata_dwc_qc_complete(ap, qc, 1))) 642 goto STILLBUSY; 643 } 644 continue; 645 646STILLBUSY: 647 ap->stats.idle_irq++; 648 dev_warn(ap->dev, "STILL BUSY IRQ ata%d: irq trap\n", 649 ap->print_id); 650 } /* while tag_mask */ 651 652 /* 653 * Check to see if any commands completed while we were processing our 654 * initial set of completed commands (read status clears interrupts, 655 * so we might miss a completed command interrupt if one came in while 656 * we were processing --we read status as part of processing a completed 657 * command). 658 */ 659 sactive2 = core_scr_read(SCR_ACTIVE); 660 if (sactive2 != sactive) { 661 dev_dbg(ap->dev, 662 "More completed - sactive=0x%x sactive2=0x%x\n", 663 sactive, sactive2); 664 } 665 handled = 1; 666 667DONE: 668 spin_unlock_irqrestore(&host->lock, flags); 669 return IRQ_RETVAL(handled); 670} 671 672static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag) 673{ 674 struct sata_dwc_device *hsdev = HSDEV_FROM_HSDEVP(hsdevp); 675 676 if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) { 677 out_le32(&(hsdev->sata_dwc_regs->dmacr), 678 SATA_DWC_DMACR_RX_CLEAR( 679 in_le32(&(hsdev->sata_dwc_regs->dmacr)))); 680 } else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) { 681 out_le32(&(hsdev->sata_dwc_regs->dmacr), 682 SATA_DWC_DMACR_TX_CLEAR( 683 in_le32(&(hsdev->sata_dwc_regs->dmacr)))); 684 } else { 685 /* 686 * This should not happen, it indicates the driver is out of 687 * sync. If it does happen, clear dmacr anyway. 688 */ 689 dev_err(hsdev->dev, 690 "%s DMA protocol RX and TX DMA not pending tag=0x%02x pending=%d dmacr: 0x%08x\n", 691 __func__, tag, hsdevp->dma_pending[tag], 692 in_le32(&hsdev->sata_dwc_regs->dmacr)); 693 out_le32(&(hsdev->sata_dwc_regs->dmacr), 694 SATA_DWC_DMACR_TXRXCH_CLEAR); 695 } 696} 697 698static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status) 699{ 700 struct ata_queued_cmd *qc; 701 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 702 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); 703 u8 tag = 0; 704 705 tag = ap->link.active_tag; 706 qc = ata_qc_from_tag(ap, tag); 707 if (!qc) { 708 dev_err(ap->dev, "failed to get qc"); 709 return; 710 } 711 712#ifdef DEBUG_NCQ 713 if (tag > 0) { 714 dev_info(ap->dev, 715 "%s tag=%u cmd=0x%02x dma dir=%s proto=%s dmacr=0x%08x\n", 716 __func__, qc->tag, qc->tf.command, 717 get_dma_dir_descript(qc->dma_dir), 718 get_prot_descript(qc->tf.protocol), 719 in_le32(&(hsdev->sata_dwc_regs->dmacr))); 720 } 721#endif 722 723 if (ata_is_dma(qc->tf.protocol)) { 724 if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) { 725 dev_err(ap->dev, 726 "%s DMA protocol RX and TX DMA not pending dmacr: 0x%08x\n", 727 __func__, 728 in_le32(&(hsdev->sata_dwc_regs->dmacr))); 729 } 730 731 hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE; 732 sata_dwc_qc_complete(ap, qc, check_status); 733 ap->link.active_tag = ATA_TAG_POISON; 734 } else { 735 sata_dwc_qc_complete(ap, qc, check_status); 736 } 737} 738 739static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc, 740 u32 check_status) 741{ 742 u8 status = 0; 743 u32 mask = 0x0; 744 u8 tag = qc->tag; 745 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 746 host_pvt.sata_dwc_sactive_queued = 0; 747 dev_dbg(ap->dev, "%s checkstatus? %x\n", __func__, check_status); 748 749 if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) 750 dev_err(ap->dev, "TX DMA PENDING\n"); 751 else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) 752 dev_err(ap->dev, "RX DMA PENDING\n"); 753 dev_dbg(ap->dev, 754 "QC complete cmd=0x%02x status=0x%02x ata%u: protocol=%d\n", 755 qc->tf.command, status, ap->print_id, qc->tf.protocol); 756 757 /* clear active bit */ 758 mask = (~(qcmd_tag_to_mask(tag))); 759 host_pvt.sata_dwc_sactive_queued = (host_pvt.sata_dwc_sactive_queued) \ 760 & mask; 761 host_pvt.sata_dwc_sactive_issued = (host_pvt.sata_dwc_sactive_issued) \ 762 & mask; 763 ata_qc_complete(qc); 764 return 0; 765} 766 767static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev) 768{ 769 /* Enable selective interrupts by setting the interrupt maskregister*/ 770 out_le32(&hsdev->sata_dwc_regs->intmr, 771 SATA_DWC_INTMR_ERRM | 772 SATA_DWC_INTMR_NEWFPM | 773 SATA_DWC_INTMR_PMABRTM | 774 SATA_DWC_INTMR_DMATM); 775 /* 776 * Unmask the error bits that should trigger an error interrupt by 777 * setting the error mask register. 778 */ 779 out_le32(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS); 780 781 dev_dbg(hsdev->dev, "%s: INTMR = 0x%08x, ERRMR = 0x%08x\n", 782 __func__, in_le32(&hsdev->sata_dwc_regs->intmr), 783 in_le32(&hsdev->sata_dwc_regs->errmr)); 784} 785 786static bool sata_dwc_dma_filter(struct dma_chan *chan, void *param) 787{ 788 struct sata_dwc_device_port *hsdevp = param; 789 struct dw_dma_slave *dws = hsdevp->dws; 790 791 if (dws->dma_dev != chan->device->dev) 792 return false; 793 794 chan->private = dws; 795 return true; 796} 797 798static void sata_dwc_setup_port(struct ata_ioports *port, unsigned long base) 799{ 800 port->cmd_addr = (void __iomem *)base + 0x00; 801 port->data_addr = (void __iomem *)base + 0x00; 802 803 port->error_addr = (void __iomem *)base + 0x04; 804 port->feature_addr = (void __iomem *)base + 0x04; 805 806 port->nsect_addr = (void __iomem *)base + 0x08; 807 808 port->lbal_addr = (void __iomem *)base + 0x0c; 809 port->lbam_addr = (void __iomem *)base + 0x10; 810 port->lbah_addr = (void __iomem *)base + 0x14; 811 812 port->device_addr = (void __iomem *)base + 0x18; 813 port->command_addr = (void __iomem *)base + 0x1c; 814 port->status_addr = (void __iomem *)base + 0x1c; 815 816 port->altstatus_addr = (void __iomem *)base + 0x20; 817 port->ctl_addr = (void __iomem *)base + 0x20; 818} 819 820/* 821 * Function : sata_dwc_port_start 822 * arguments : struct ata_ioports *port 823 * Return value : returns 0 if success, error code otherwise 824 * This function allocates the scatter gather LLI table for AHB DMA 825 */ 826static int sata_dwc_port_start(struct ata_port *ap) 827{ 828 int err = 0; 829 struct sata_dwc_device *hsdev; 830 struct sata_dwc_device_port *hsdevp = NULL; 831 struct device *pdev; 832 dma_cap_mask_t mask; 833 int i; 834 835 hsdev = HSDEV_FROM_AP(ap); 836 837 dev_dbg(ap->dev, "%s: port_no=%d\n", __func__, ap->port_no); 838 839 hsdev->host = ap->host; 840 pdev = ap->host->dev; 841 if (!pdev) { 842 dev_err(ap->dev, "%s: no ap->host->dev\n", __func__); 843 err = -ENODEV; 844 goto CLEANUP; 845 } 846 847 /* Allocate Port Struct */ 848 hsdevp = kzalloc(sizeof(*hsdevp), GFP_KERNEL); 849 if (!hsdevp) { 850 dev_err(ap->dev, "%s: kmalloc failed for hsdevp\n", __func__); 851 err = -ENOMEM; 852 goto CLEANUP; 853 } 854 hsdevp->hsdev = hsdev; 855 856 hsdevp->dws = &sata_dwc_dma_dws; 857 hsdevp->dws->dma_dev = hsdev->dev; 858 859 dma_cap_zero(mask); 860 dma_cap_set(DMA_SLAVE, mask); 861 862 /* Acquire DMA channel */ 863 hsdevp->chan = dma_request_channel(mask, sata_dwc_dma_filter, hsdevp); 864 if (!hsdevp->chan) { 865 dev_err(hsdev->dev, "%s: dma channel unavailable\n", 866 __func__); 867 err = -EAGAIN; 868 goto CLEANUP_ALLOC; 869 } 870 871 for (i = 0; i < SATA_DWC_QCMD_MAX; i++) 872 hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT; 873 874 ap->bmdma_prd = NULL; /* set these so libata doesn't use them */ 875 ap->bmdma_prd_dma = 0; 876 877 if (ap->port_no == 0) { 878 dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n", 879 __func__); 880 out_le32(&hsdev->sata_dwc_regs->dmacr, 881 SATA_DWC_DMACR_TXRXCH_CLEAR); 882 883 dev_dbg(ap->dev, "%s: setting burst size in DBTSR\n", 884 __func__); 885 out_le32(&hsdev->sata_dwc_regs->dbtsr, 886 (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) | 887 SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT))); 888 } 889 890 /* Clear any error bits before libata starts issuing commands */ 891 clear_serror(); 892 ap->private_data = hsdevp; 893 dev_dbg(ap->dev, "%s: done\n", __func__); 894 return 0; 895 896CLEANUP_ALLOC: 897 kfree(hsdevp); 898CLEANUP: 899 dev_dbg(ap->dev, "%s: fail. ap->id = %d\n", __func__, ap->print_id); 900 return err; 901} 902 903static void sata_dwc_port_stop(struct ata_port *ap) 904{ 905 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 906 907 dev_dbg(ap->dev, "%s: ap->id = %d\n", __func__, ap->print_id); 908 909 dmaengine_terminate_all(hsdevp->chan); 910 dma_release_channel(hsdevp->chan); 911 912 kfree(hsdevp); 913 ap->private_data = NULL; 914} 915 916/* 917 * Function : sata_dwc_exec_command_by_tag 918 * arguments : ata_port *ap, ata_taskfile *tf, u8 tag, u32 cmd_issued 919 * Return value : None 920 * This function keeps track of individual command tag ids and calls 921 * ata_exec_command in libata 922 */ 923static void sata_dwc_exec_command_by_tag(struct ata_port *ap, 924 struct ata_taskfile *tf, 925 u8 tag, u32 cmd_issued) 926{ 927 unsigned long flags; 928 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 929 930 dev_dbg(ap->dev, "%s cmd(0x%02x): %s tag=%d\n", __func__, tf->command, 931 ata_get_cmd_descript(tf->command), tag); 932 933 spin_lock_irqsave(&ap->host->lock, flags); 934 hsdevp->cmd_issued[tag] = cmd_issued; 935 spin_unlock_irqrestore(&ap->host->lock, flags); 936 /* 937 * Clear SError before executing a new command. 938 * sata_dwc_scr_write and read can not be used here. Clearing the PM 939 * managed SError register for the disk needs to be done before the 940 * task file is loaded. 941 */ 942 clear_serror(); 943 ata_sff_exec_command(ap, tf); 944} 945 946static void sata_dwc_bmdma_setup_by_tag(struct ata_queued_cmd *qc, u8 tag) 947{ 948 sata_dwc_exec_command_by_tag(qc->ap, &qc->tf, tag, 949 SATA_DWC_CMD_ISSUED_PEND); 950} 951 952static void sata_dwc_bmdma_setup(struct ata_queued_cmd *qc) 953{ 954 u8 tag = qc->tag; 955 956 if (ata_is_ncq(qc->tf.protocol)) { 957 dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n", 958 __func__, qc->ap->link.sactive, tag); 959 } else { 960 tag = 0; 961 } 962 sata_dwc_bmdma_setup_by_tag(qc, tag); 963} 964 965static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag) 966{ 967 int start_dma; 968 u32 reg; 969 struct sata_dwc_device *hsdev = HSDEV_FROM_QC(qc); 970 struct ata_port *ap = qc->ap; 971 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 972 struct dma_async_tx_descriptor *desc = hsdevp->desc[tag]; 973 int dir = qc->dma_dir; 974 975 if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_NOT) { 976 start_dma = 1; 977 if (dir == DMA_TO_DEVICE) 978 hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_TX; 979 else 980 hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_RX; 981 } else { 982 dev_err(ap->dev, 983 "%s: Command not pending cmd_issued=%d (tag=%d) DMA NOT started\n", 984 __func__, hsdevp->cmd_issued[tag], tag); 985 start_dma = 0; 986 } 987 988 dev_dbg(ap->dev, 989 "%s qc=%p tag: %x cmd: 0x%02x dma_dir: %s start_dma? %x\n", 990 __func__, qc, tag, qc->tf.command, 991 get_dma_dir_descript(qc->dma_dir), start_dma); 992 sata_dwc_tf_dump(ap, &qc->tf); 993 994 if (start_dma) { 995 reg = core_scr_read(SCR_ERROR); 996 if (reg & SATA_DWC_SERROR_ERR_BITS) { 997 dev_err(ap->dev, "%s: ****** SError=0x%08x ******\n", 998 __func__, reg); 999 } 1000 1001 if (dir == DMA_TO_DEVICE) 1002 out_le32(&hsdev->sata_dwc_regs->dmacr, 1003 SATA_DWC_DMACR_TXCHEN); 1004 else 1005 out_le32(&hsdev->sata_dwc_regs->dmacr, 1006 SATA_DWC_DMACR_RXCHEN); 1007 1008 /* Enable AHB DMA transfer on the specified channel */ 1009 dmaengine_submit(desc); 1010 dma_async_issue_pending(hsdevp->chan); 1011 } 1012} 1013 1014static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc) 1015{ 1016 u8 tag = qc->tag; 1017 1018 if (ata_is_ncq(qc->tf.protocol)) { 1019 dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n", 1020 __func__, qc->ap->link.sactive, tag); 1021 } else { 1022 tag = 0; 1023 } 1024 dev_dbg(qc->ap->dev, "%s\n", __func__); 1025 sata_dwc_bmdma_start_by_tag(qc, tag); 1026} 1027 1028/* 1029 * Function : sata_dwc_qc_prep_by_tag 1030 * arguments : ata_queued_cmd *qc, u8 tag 1031 * Return value : None 1032 * qc_prep for a particular queued command based on tag 1033 */ 1034static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag) 1035{ 1036 struct dma_async_tx_descriptor *desc; 1037 struct ata_port *ap = qc->ap; 1038 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 1039 1040 dev_dbg(ap->dev, "%s: port=%d dma dir=%s n_elem=%d\n", 1041 __func__, ap->port_no, get_dma_dir_descript(qc->dma_dir), 1042 qc->n_elem); 1043 1044 desc = dma_dwc_xfer_setup(qc); 1045 if (!desc) { 1046 dev_err(ap->dev, "%s: dma_dwc_xfer_setup returns NULL\n", 1047 __func__); 1048 return; 1049 } 1050 hsdevp->desc[tag] = desc; 1051} 1052 1053static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc) 1054{ 1055 u32 sactive; 1056 u8 tag = qc->tag; 1057 struct ata_port *ap = qc->ap; 1058 1059#ifdef DEBUG_NCQ 1060 if (qc->tag > 0 || ap->link.sactive > 1) 1061 dev_info(ap->dev, 1062 "%s ap id=%d cmd(0x%02x)=%s qc tag=%d prot=%s ap active_tag=0x%08x ap sactive=0x%08x\n", 1063 __func__, ap->print_id, qc->tf.command, 1064 ata_get_cmd_descript(qc->tf.command), 1065 qc->tag, get_prot_descript(qc->tf.protocol), 1066 ap->link.active_tag, ap->link.sactive); 1067#endif 1068 1069 if (!ata_is_ncq(qc->tf.protocol)) 1070 tag = 0; 1071 sata_dwc_qc_prep_by_tag(qc, tag); 1072 1073 if (ata_is_ncq(qc->tf.protocol)) { 1074 sactive = core_scr_read(SCR_ACTIVE); 1075 sactive |= (0x00000001 << tag); 1076 core_scr_write(SCR_ACTIVE, sactive); 1077 1078 dev_dbg(qc->ap->dev, 1079 "%s: tag=%d ap->link.sactive = 0x%08x sactive=0x%08x\n", 1080 __func__, tag, qc->ap->link.sactive, sactive); 1081 1082 ap->ops->sff_tf_load(ap, &qc->tf); 1083 sata_dwc_exec_command_by_tag(ap, &qc->tf, qc->tag, 1084 SATA_DWC_CMD_ISSUED_PEND); 1085 } else { 1086 ata_sff_qc_issue(qc); 1087 } 1088 return 0; 1089} 1090 1091/* 1092 * Function : sata_dwc_qc_prep 1093 * arguments : ata_queued_cmd *qc 1094 * Return value : None 1095 * qc_prep for a particular queued command 1096 */ 1097 1098static void sata_dwc_qc_prep(struct ata_queued_cmd *qc) 1099{ 1100 if ((qc->dma_dir == DMA_NONE) || (qc->tf.protocol == ATA_PROT_PIO)) 1101 return; 1102 1103#ifdef DEBUG_NCQ 1104 if (qc->tag > 0) 1105 dev_info(qc->ap->dev, "%s: qc->tag=%d ap->active_tag=0x%08x\n", 1106 __func__, qc->tag, qc->ap->link.active_tag); 1107 1108 return ; 1109#endif 1110} 1111 1112static void sata_dwc_error_handler(struct ata_port *ap) 1113{ 1114 ata_sff_error_handler(ap); 1115} 1116 1117static int sata_dwc_hardreset(struct ata_link *link, unsigned int *class, 1118 unsigned long deadline) 1119{ 1120 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(link->ap); 1121 int ret; 1122 1123 ret = sata_sff_hardreset(link, class, deadline); 1124 1125 sata_dwc_enable_interrupts(hsdev); 1126 1127 /* Reconfigure the DMA control register */ 1128 out_le32(&hsdev->sata_dwc_regs->dmacr, 1129 SATA_DWC_DMACR_TXRXCH_CLEAR); 1130 1131 /* Reconfigure the DMA Burst Transaction Size register */ 1132 out_le32(&hsdev->sata_dwc_regs->dbtsr, 1133 SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) | 1134 SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT)); 1135 1136 return ret; 1137} 1138 1139/* 1140 * scsi mid-layer and libata interface structures 1141 */ 1142static struct scsi_host_template sata_dwc_sht = { 1143 ATA_NCQ_SHT(DRV_NAME), 1144 /* 1145 * test-only: Currently this driver doesn't handle NCQ 1146 * correctly. We enable NCQ but set the queue depth to a 1147 * max of 1. This will get fixed in in a future release. 1148 */ 1149 .sg_tablesize = LIBATA_MAX_PRD, 1150 /* .can_queue = ATA_MAX_QUEUE, */ 1151 .dma_boundary = ATA_DMA_BOUNDARY, 1152}; 1153 1154static struct ata_port_operations sata_dwc_ops = { 1155 .inherits = &ata_sff_port_ops, 1156 1157 .error_handler = sata_dwc_error_handler, 1158 .hardreset = sata_dwc_hardreset, 1159 1160 .qc_prep = sata_dwc_qc_prep, 1161 .qc_issue = sata_dwc_qc_issue, 1162 1163 .scr_read = sata_dwc_scr_read, 1164 .scr_write = sata_dwc_scr_write, 1165 1166 .port_start = sata_dwc_port_start, 1167 .port_stop = sata_dwc_port_stop, 1168 1169 .bmdma_setup = sata_dwc_bmdma_setup, 1170 .bmdma_start = sata_dwc_bmdma_start, 1171}; 1172 1173static const struct ata_port_info sata_dwc_port_info[] = { 1174 { 1175 .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ, 1176 .pio_mask = ATA_PIO4, 1177 .udma_mask = ATA_UDMA6, 1178 .port_ops = &sata_dwc_ops, 1179 }, 1180}; 1181 1182static int sata_dwc_probe(struct platform_device *ofdev) 1183{ 1184 struct sata_dwc_device *hsdev; 1185 u32 idr, versionr; 1186 char *ver = (char *)&versionr; 1187 u8 __iomem *base; 1188 int err = 0; 1189 int irq; 1190 struct ata_host *host; 1191 struct ata_port_info pi = sata_dwc_port_info[0]; 1192 const struct ata_port_info *ppi[] = { &pi, NULL }; 1193 struct device_node *np = ofdev->dev.of_node; 1194 1195 /* Allocate DWC SATA device */ 1196 host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS); 1197 hsdev = devm_kzalloc(&ofdev->dev, sizeof(*hsdev), GFP_KERNEL); 1198 if (!host || !hsdev) 1199 return -ENOMEM; 1200 1201 host->private_data = hsdev; 1202 1203 /* Ioremap SATA registers */ 1204 base = of_iomap(np, 0); 1205 if (!base) { 1206 dev_err(&ofdev->dev, 1207 "ioremap failed for SATA register address\n"); 1208 return -ENODEV; 1209 } 1210 hsdev->reg_base = base; 1211 dev_dbg(&ofdev->dev, "ioremap done for SATA register address\n"); 1212 1213 /* Synopsys DWC SATA specific Registers */ 1214 hsdev->sata_dwc_regs = (void *__iomem)(base + SATA_DWC_REG_OFFSET); 1215 1216 /* Setup port */ 1217 host->ports[0]->ioaddr.cmd_addr = base; 1218 host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET; 1219 host_pvt.scr_addr_sstatus = base + SATA_DWC_SCR_OFFSET; 1220 sata_dwc_setup_port(&host->ports[0]->ioaddr, (unsigned long)base); 1221 1222 /* Read the ID and Version Registers */ 1223 idr = in_le32(&hsdev->sata_dwc_regs->idr); 1224 versionr = in_le32(&hsdev->sata_dwc_regs->versionr); 1225 dev_notice(&ofdev->dev, "id %d, controller version %c.%c%c\n", 1226 idr, ver[0], ver[1], ver[2]); 1227 1228 /* Get SATA DMA interrupt number */ 1229 hsdev->dma->irq = irq_of_parse_and_map(np, 1); 1230 if (hsdev->dma->irq == NO_IRQ) { 1231 dev_err(&ofdev->dev, "no SATA DMA irq\n"); 1232 err = -ENODEV; 1233 goto error_iomap; 1234 } 1235 1236 /* Get physical SATA DMA register base address */ 1237 hsdev->dma->regs = of_iomap(np, 1); 1238 if (!hsdev->dma->regs) { 1239 dev_err(&ofdev->dev, 1240 "ioremap failed for AHBDMA register address\n"); 1241 err = -ENODEV; 1242 goto error_iomap; 1243 } 1244 1245 /* Save dev for later use in dev_xxx() routines */ 1246 hsdev->dev = &ofdev->dev; 1247 1248 hsdev->dma->dev = &ofdev->dev; 1249 1250 /* Initialize AHB DMAC */ 1251 err = dw_dma_probe(hsdev->dma, NULL); 1252 if (err) 1253 goto error_dma_iomap; 1254 1255 /* Enable SATA Interrupts */ 1256 sata_dwc_enable_interrupts(hsdev); 1257 1258 /* Get SATA interrupt number */ 1259 irq = irq_of_parse_and_map(np, 0); 1260 if (irq == NO_IRQ) { 1261 dev_err(&ofdev->dev, "no SATA DMA irq\n"); 1262 err = -ENODEV; 1263 goto error_out; 1264 } 1265 1266 /* 1267 * Now, register with libATA core, this will also initiate the 1268 * device discovery process, invoking our port_start() handler & 1269 * error_handler() to execute a dummy Softreset EH session 1270 */ 1271 err = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht); 1272 if (err) 1273 dev_err(&ofdev->dev, "failed to activate host"); 1274 1275 dev_set_drvdata(&ofdev->dev, host); 1276 return 0; 1277 1278error_out: 1279 /* Free SATA DMA resources */ 1280 dw_dma_remove(hsdev->dma); 1281error_dma_iomap: 1282 iounmap(hsdev->dma->regs); 1283error_iomap: 1284 iounmap(base); 1285 return err; 1286} 1287 1288static int sata_dwc_remove(struct platform_device *ofdev) 1289{ 1290 struct device *dev = &ofdev->dev; 1291 struct ata_host *host = dev_get_drvdata(dev); 1292 struct sata_dwc_device *hsdev = host->private_data; 1293 1294 ata_host_detach(host); 1295 1296 /* Free SATA DMA resources */ 1297 dw_dma_remove(hsdev->dma); 1298 1299 iounmap(hsdev->dma->regs); 1300 iounmap(hsdev->reg_base); 1301 dev_dbg(&ofdev->dev, "done\n"); 1302 return 0; 1303} 1304 1305static const struct of_device_id sata_dwc_match[] = { 1306 { .compatible = "amcc,sata-460ex", }, 1307 {} 1308}; 1309MODULE_DEVICE_TABLE(of, sata_dwc_match); 1310 1311static struct platform_driver sata_dwc_driver = { 1312 .driver = { 1313 .name = DRV_NAME, 1314 .of_match_table = sata_dwc_match, 1315 }, 1316 .probe = sata_dwc_probe, 1317 .remove = sata_dwc_remove, 1318}; 1319 1320module_platform_driver(sata_dwc_driver); 1321 1322MODULE_LICENSE("GPL"); 1323MODULE_AUTHOR("Mark Miesfeld <mmiesfeld@amcc.com>"); 1324MODULE_DESCRIPTION("DesignWare Cores SATA controller low level driver"); 1325MODULE_VERSION(DRV_VERSION); 1326