1/* 2 * Generic PXA PATA driver 3 * 4 * Copyright (C) 2010 Marek Vasut <marek.vasut@gmail.com> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2, or (at your option) 9 * any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; see the file COPYING. If not, write to 18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 19 */ 20 21#include <linux/kernel.h> 22#include <linux/module.h> 23#include <linux/blkdev.h> 24#include <linux/ata.h> 25#include <linux/libata.h> 26#include <linux/platform_device.h> 27#include <linux/dmaengine.h> 28#include <linux/dma/pxa-dma.h> 29#include <linux/gpio.h> 30#include <linux/slab.h> 31#include <linux/completion.h> 32 33#include <scsi/scsi_host.h> 34 35#include <linux/platform_data/ata-pxa.h> 36 37#define DRV_NAME "pata_pxa" 38#define DRV_VERSION "0.1" 39 40struct pata_pxa_data { 41 struct dma_chan *dma_chan; 42 dma_cookie_t dma_cookie; 43 struct completion dma_done; 44}; 45 46/* 47 * DMA interrupt handler. 48 */ 49static void pxa_ata_dma_irq(void *d) 50{ 51 struct pata_pxa_data *pd = d; 52 enum dma_status status; 53 54 status = dmaengine_tx_status(pd->dma_chan, pd->dma_cookie, NULL); 55 if (status == DMA_ERROR || status == DMA_COMPLETE) 56 complete(&pd->dma_done); 57} 58 59/* 60 * Prepare taskfile for submission. 61 */ 62static void pxa_qc_prep(struct ata_queued_cmd *qc) 63{ 64 struct pata_pxa_data *pd = qc->ap->private_data; 65 struct dma_async_tx_descriptor *tx; 66 enum dma_transfer_direction dir; 67 68 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 69 return; 70 71 dir = (qc->dma_dir == DMA_TO_DEVICE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM); 72 tx = dmaengine_prep_slave_sg(pd->dma_chan, qc->sg, qc->n_elem, dir, 73 DMA_PREP_INTERRUPT); 74 if (!tx) { 75 ata_dev_err(qc->dev, "prep_slave_sg() failed\n"); 76 return; 77 } 78 tx->callback = pxa_ata_dma_irq; 79 tx->callback_param = pd; 80 pd->dma_cookie = dmaengine_submit(tx); 81} 82 83/* 84 * Configure the DMA controller, load the DMA descriptors, but don't start the 85 * DMA controller yet. Only issue the ATA command. 86 */ 87static void pxa_bmdma_setup(struct ata_queued_cmd *qc) 88{ 89 qc->ap->ops->sff_exec_command(qc->ap, &qc->tf); 90} 91 92/* 93 * Execute the DMA transfer. 94 */ 95static void pxa_bmdma_start(struct ata_queued_cmd *qc) 96{ 97 struct pata_pxa_data *pd = qc->ap->private_data; 98 init_completion(&pd->dma_done); 99 dma_async_issue_pending(pd->dma_chan); 100} 101 102/* 103 * Wait until the DMA transfer completes, then stop the DMA controller. 104 */ 105static void pxa_bmdma_stop(struct ata_queued_cmd *qc) 106{ 107 struct pata_pxa_data *pd = qc->ap->private_data; 108 enum dma_status status; 109 110 status = dmaengine_tx_status(pd->dma_chan, pd->dma_cookie, NULL); 111 if (status != DMA_ERROR && status != DMA_COMPLETE && 112 wait_for_completion_timeout(&pd->dma_done, HZ)) 113 ata_dev_err(qc->dev, "Timeout waiting for DMA completion!"); 114 115 dmaengine_terminate_all(pd->dma_chan); 116} 117 118/* 119 * Read DMA status. The bmdma_stop() will take care of properly finishing the 120 * DMA transfer so we always have DMA-complete interrupt here. 121 */ 122static unsigned char pxa_bmdma_status(struct ata_port *ap) 123{ 124 struct pata_pxa_data *pd = ap->private_data; 125 unsigned char ret = ATA_DMA_INTR; 126 struct dma_tx_state state; 127 enum dma_status status; 128 129 status = dmaengine_tx_status(pd->dma_chan, pd->dma_cookie, &state); 130 if (status != DMA_COMPLETE) 131 ret |= ATA_DMA_ERR; 132 133 return ret; 134} 135 136/* 137 * No IRQ register present so we do nothing. 138 */ 139static void pxa_irq_clear(struct ata_port *ap) 140{ 141} 142 143/* 144 * Check for ATAPI DMA. ATAPI DMA is unsupported by this driver. It's still 145 * unclear why ATAPI has DMA issues. 146 */ 147static int pxa_check_atapi_dma(struct ata_queued_cmd *qc) 148{ 149 return -EOPNOTSUPP; 150} 151 152static struct scsi_host_template pxa_ata_sht = { 153 ATA_BMDMA_SHT(DRV_NAME), 154}; 155 156static struct ata_port_operations pxa_ata_port_ops = { 157 .inherits = &ata_bmdma_port_ops, 158 .cable_detect = ata_cable_40wire, 159 160 .bmdma_setup = pxa_bmdma_setup, 161 .bmdma_start = pxa_bmdma_start, 162 .bmdma_stop = pxa_bmdma_stop, 163 .bmdma_status = pxa_bmdma_status, 164 165 .check_atapi_dma = pxa_check_atapi_dma, 166 167 .sff_irq_clear = pxa_irq_clear, 168 169 .qc_prep = pxa_qc_prep, 170}; 171 172static int pxa_ata_probe(struct platform_device *pdev) 173{ 174 struct ata_host *host; 175 struct ata_port *ap; 176 struct pata_pxa_data *data; 177 struct resource *cmd_res; 178 struct resource *ctl_res; 179 struct resource *dma_res; 180 struct resource *irq_res; 181 struct pata_pxa_pdata *pdata = dev_get_platdata(&pdev->dev); 182 struct dma_slave_config config; 183 dma_cap_mask_t mask; 184 struct pxad_param param; 185 int ret = 0; 186 187 /* 188 * Resource validation, three resources are needed: 189 * - CMD port base address 190 * - CTL port base address 191 * - DMA port base address 192 * - IRQ pin 193 */ 194 if (pdev->num_resources != 4) { 195 dev_err(&pdev->dev, "invalid number of resources\n"); 196 return -EINVAL; 197 } 198 199 /* 200 * CMD port base address 201 */ 202 cmd_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 203 if (unlikely(cmd_res == NULL)) 204 return -EINVAL; 205 206 /* 207 * CTL port base address 208 */ 209 ctl_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 210 if (unlikely(ctl_res == NULL)) 211 return -EINVAL; 212 213 /* 214 * DMA port base address 215 */ 216 dma_res = platform_get_resource(pdev, IORESOURCE_DMA, 0); 217 if (unlikely(dma_res == NULL)) 218 return -EINVAL; 219 220 /* 221 * IRQ pin 222 */ 223 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 224 if (unlikely(irq_res == NULL)) 225 return -EINVAL; 226 227 /* 228 * Allocate the host 229 */ 230 host = ata_host_alloc(&pdev->dev, 1); 231 if (!host) 232 return -ENOMEM; 233 234 ap = host->ports[0]; 235 ap->ops = &pxa_ata_port_ops; 236 ap->pio_mask = ATA_PIO4; 237 ap->mwdma_mask = ATA_MWDMA2; 238 239 ap->ioaddr.cmd_addr = devm_ioremap(&pdev->dev, cmd_res->start, 240 resource_size(cmd_res)); 241 ap->ioaddr.ctl_addr = devm_ioremap(&pdev->dev, ctl_res->start, 242 resource_size(ctl_res)); 243 ap->ioaddr.bmdma_addr = devm_ioremap(&pdev->dev, dma_res->start, 244 resource_size(dma_res)); 245 246 /* 247 * Adjust register offsets 248 */ 249 ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr; 250 ap->ioaddr.data_addr = ap->ioaddr.cmd_addr + 251 (ATA_REG_DATA << pdata->reg_shift); 252 ap->ioaddr.error_addr = ap->ioaddr.cmd_addr + 253 (ATA_REG_ERR << pdata->reg_shift); 254 ap->ioaddr.feature_addr = ap->ioaddr.cmd_addr + 255 (ATA_REG_FEATURE << pdata->reg_shift); 256 ap->ioaddr.nsect_addr = ap->ioaddr.cmd_addr + 257 (ATA_REG_NSECT << pdata->reg_shift); 258 ap->ioaddr.lbal_addr = ap->ioaddr.cmd_addr + 259 (ATA_REG_LBAL << pdata->reg_shift); 260 ap->ioaddr.lbam_addr = ap->ioaddr.cmd_addr + 261 (ATA_REG_LBAM << pdata->reg_shift); 262 ap->ioaddr.lbah_addr = ap->ioaddr.cmd_addr + 263 (ATA_REG_LBAH << pdata->reg_shift); 264 ap->ioaddr.device_addr = ap->ioaddr.cmd_addr + 265 (ATA_REG_DEVICE << pdata->reg_shift); 266 ap->ioaddr.status_addr = ap->ioaddr.cmd_addr + 267 (ATA_REG_STATUS << pdata->reg_shift); 268 ap->ioaddr.command_addr = ap->ioaddr.cmd_addr + 269 (ATA_REG_CMD << pdata->reg_shift); 270 271 /* 272 * Allocate and load driver's internal data structure 273 */ 274 data = devm_kzalloc(&pdev->dev, sizeof(struct pata_pxa_data), 275 GFP_KERNEL); 276 if (!data) 277 return -ENOMEM; 278 279 ap->private_data = data; 280 281 dma_cap_zero(mask); 282 dma_cap_set(DMA_SLAVE, mask); 283 param.prio = PXAD_PRIO_LOWEST; 284 param.drcmr = pdata->dma_dreq; 285 memset(&config, 0, sizeof(config)); 286 config.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 287 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 288 config.src_addr = dma_res->start; 289 config.dst_addr = dma_res->start; 290 config.src_maxburst = 32; 291 config.dst_maxburst = 32; 292 293 /* 294 * Request the DMA channel 295 */ 296 data->dma_chan = 297 dma_request_slave_channel_compat(mask, pxad_filter_fn, 298 ¶m, &pdev->dev, "data"); 299 if (!data->dma_chan) 300 return -EBUSY; 301 ret = dmaengine_slave_config(data->dma_chan, &config); 302 if (ret < 0) { 303 dev_err(&pdev->dev, "dma configuration failed: %d\n", ret); 304 return ret; 305 } 306 307 /* 308 * Activate the ATA host 309 */ 310 ret = ata_host_activate(host, irq_res->start, ata_sff_interrupt, 311 pdata->irq_flags, &pxa_ata_sht); 312 if (ret) 313 dma_release_channel(data->dma_chan); 314 315 return ret; 316} 317 318static int pxa_ata_remove(struct platform_device *pdev) 319{ 320 struct ata_host *host = platform_get_drvdata(pdev); 321 struct pata_pxa_data *data = host->ports[0]->private_data; 322 323 dma_release_channel(data->dma_chan); 324 325 ata_host_detach(host); 326 327 return 0; 328} 329 330static struct platform_driver pxa_ata_driver = { 331 .probe = pxa_ata_probe, 332 .remove = pxa_ata_remove, 333 .driver = { 334 .name = DRV_NAME, 335 }, 336}; 337 338module_platform_driver(pxa_ata_driver); 339 340MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>"); 341MODULE_DESCRIPTION("DMA-capable driver for PATA on PXA CPU"); 342MODULE_LICENSE("GPL"); 343MODULE_VERSION(DRV_VERSION); 344MODULE_ALIAS("platform:" DRV_NAME); 345