1/* 2 * Core driver for the High Speed UART DMA 3 * 4 * Copyright (C) 2015 Intel Corporation 5 * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> 6 * 7 * Partially based on the bits found in drivers/tty/serial/mfd.c. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 14/* 15 * DMA channel allocation: 16 * 1. Even number chans are used for DMA Read (UART TX), odd chans for DMA 17 * Write (UART RX). 18 * 2. 0/1 channel are assigned to port 0, 2/3 chan to port 1, 4/5 chan to 19 * port 3, and so on. 20 */ 21 22#include <linux/delay.h> 23#include <linux/dmaengine.h> 24#include <linux/dma-mapping.h> 25#include <linux/init.h> 26#include <linux/module.h> 27#include <linux/slab.h> 28 29#include "hsu.h" 30 31#define HSU_DMA_BUSWIDTHS \ 32 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ 33 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ 34 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ 35 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ 36 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ 37 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) | \ 38 BIT(DMA_SLAVE_BUSWIDTH_16_BYTES) 39 40static inline void hsu_chan_disable(struct hsu_dma_chan *hsuc) 41{ 42 hsu_chan_writel(hsuc, HSU_CH_CR, 0); 43} 44 45static inline void hsu_chan_enable(struct hsu_dma_chan *hsuc) 46{ 47 u32 cr = HSU_CH_CR_CHA; 48 49 if (hsuc->direction == DMA_MEM_TO_DEV) 50 cr &= ~HSU_CH_CR_CHD; 51 else if (hsuc->direction == DMA_DEV_TO_MEM) 52 cr |= HSU_CH_CR_CHD; 53 54 hsu_chan_writel(hsuc, HSU_CH_CR, cr); 55} 56 57static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc) 58{ 59 struct dma_slave_config *config = &hsuc->config; 60 struct hsu_dma_desc *desc = hsuc->desc; 61 u32 bsr = 0, mtsr = 0; /* to shut the compiler up */ 62 u32 dcr = HSU_CH_DCR_CHSOE | HSU_CH_DCR_CHEI; 63 unsigned int i, count; 64 65 if (hsuc->direction == DMA_MEM_TO_DEV) { 66 bsr = config->dst_maxburst; 67 mtsr = config->dst_addr_width; 68 } else if (hsuc->direction == DMA_DEV_TO_MEM) { 69 bsr = config->src_maxburst; 70 mtsr = config->src_addr_width; 71 } 72 73 hsu_chan_disable(hsuc); 74 75 hsu_chan_writel(hsuc, HSU_CH_DCR, 0); 76 hsu_chan_writel(hsuc, HSU_CH_BSR, bsr); 77 hsu_chan_writel(hsuc, HSU_CH_MTSR, mtsr); 78 79 /* Set descriptors */ 80 count = (desc->nents - desc->active) % HSU_DMA_CHAN_NR_DESC; 81 for (i = 0; i < count; i++) { 82 hsu_chan_writel(hsuc, HSU_CH_DxSAR(i), desc->sg[i].addr); 83 hsu_chan_writel(hsuc, HSU_CH_DxTSR(i), desc->sg[i].len); 84 85 /* Prepare value for DCR */ 86 dcr |= HSU_CH_DCR_DESCA(i); 87 dcr |= HSU_CH_DCR_CHTOI(i); /* timeout bit, see HSU Errata 1 */ 88 89 desc->active++; 90 } 91 /* Only for the last descriptor in the chain */ 92 dcr |= HSU_CH_DCR_CHSOD(count - 1); 93 dcr |= HSU_CH_DCR_CHDI(count - 1); 94 95 hsu_chan_writel(hsuc, HSU_CH_DCR, dcr); 96 97 hsu_chan_enable(hsuc); 98} 99 100static void hsu_dma_stop_channel(struct hsu_dma_chan *hsuc) 101{ 102 hsu_chan_disable(hsuc); 103 hsu_chan_writel(hsuc, HSU_CH_DCR, 0); 104} 105 106static void hsu_dma_start_channel(struct hsu_dma_chan *hsuc) 107{ 108 hsu_dma_chan_start(hsuc); 109} 110 111static void hsu_dma_start_transfer(struct hsu_dma_chan *hsuc) 112{ 113 struct virt_dma_desc *vdesc; 114 115 /* Get the next descriptor */ 116 vdesc = vchan_next_desc(&hsuc->vchan); 117 if (!vdesc) { 118 hsuc->desc = NULL; 119 return; 120 } 121 122 list_del(&vdesc->node); 123 hsuc->desc = to_hsu_dma_desc(vdesc); 124 125 /* Start the channel with a new descriptor */ 126 hsu_dma_start_channel(hsuc); 127} 128 129static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc) 130{ 131 unsigned long flags; 132 u32 sr; 133 134 spin_lock_irqsave(&hsuc->vchan.lock, flags); 135 sr = hsu_chan_readl(hsuc, HSU_CH_SR); 136 spin_unlock_irqrestore(&hsuc->vchan.lock, flags); 137 138 return sr & ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY); 139} 140 141irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr) 142{ 143 struct hsu_dma_chan *hsuc; 144 struct hsu_dma_desc *desc; 145 unsigned long flags; 146 u32 sr; 147 148 /* Sanity check */ 149 if (nr >= chip->hsu->nr_channels) 150 return IRQ_NONE; 151 152 hsuc = &chip->hsu->chan[nr]; 153 154 /* 155 * No matter what situation, need read clear the IRQ status 156 * There is a bug, see Errata 5, HSD 2900918 157 */ 158 sr = hsu_dma_chan_get_sr(hsuc); 159 if (!sr) 160 return IRQ_NONE; 161 162 /* Timeout IRQ, need wait some time, see Errata 2 */ 163 if (hsuc->direction == DMA_DEV_TO_MEM && (sr & HSU_CH_SR_DESCTO_ANY)) 164 udelay(2); 165 166 sr &= ~HSU_CH_SR_DESCTO_ANY; 167 if (!sr) 168 return IRQ_HANDLED; 169 170 spin_lock_irqsave(&hsuc->vchan.lock, flags); 171 desc = hsuc->desc; 172 if (desc) { 173 if (sr & HSU_CH_SR_CHE) { 174 desc->status = DMA_ERROR; 175 } else if (desc->active < desc->nents) { 176 hsu_dma_start_channel(hsuc); 177 } else { 178 vchan_cookie_complete(&desc->vdesc); 179 desc->status = DMA_COMPLETE; 180 hsu_dma_start_transfer(hsuc); 181 } 182 } 183 spin_unlock_irqrestore(&hsuc->vchan.lock, flags); 184 185 return IRQ_HANDLED; 186} 187EXPORT_SYMBOL_GPL(hsu_dma_irq); 188 189static struct hsu_dma_desc *hsu_dma_alloc_desc(unsigned int nents) 190{ 191 struct hsu_dma_desc *desc; 192 193 desc = kzalloc(sizeof(*desc), GFP_NOWAIT); 194 if (!desc) 195 return NULL; 196 197 desc->sg = kcalloc(nents, sizeof(*desc->sg), GFP_NOWAIT); 198 if (!desc->sg) { 199 kfree(desc); 200 return NULL; 201 } 202 203 return desc; 204} 205 206static void hsu_dma_desc_free(struct virt_dma_desc *vdesc) 207{ 208 struct hsu_dma_desc *desc = to_hsu_dma_desc(vdesc); 209 210 kfree(desc->sg); 211 kfree(desc); 212} 213 214static struct dma_async_tx_descriptor *hsu_dma_prep_slave_sg( 215 struct dma_chan *chan, struct scatterlist *sgl, 216 unsigned int sg_len, enum dma_transfer_direction direction, 217 unsigned long flags, void *context) 218{ 219 struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); 220 struct hsu_dma_desc *desc; 221 struct scatterlist *sg; 222 unsigned int i; 223 224 desc = hsu_dma_alloc_desc(sg_len); 225 if (!desc) 226 return NULL; 227 228 for_each_sg(sgl, sg, sg_len, i) { 229 desc->sg[i].addr = sg_dma_address(sg); 230 desc->sg[i].len = sg_dma_len(sg); 231 } 232 233 desc->nents = sg_len; 234 desc->direction = direction; 235 /* desc->active = 0 by kzalloc */ 236 desc->status = DMA_IN_PROGRESS; 237 238 return vchan_tx_prep(&hsuc->vchan, &desc->vdesc, flags); 239} 240 241static void hsu_dma_issue_pending(struct dma_chan *chan) 242{ 243 struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); 244 unsigned long flags; 245 246 spin_lock_irqsave(&hsuc->vchan.lock, flags); 247 if (vchan_issue_pending(&hsuc->vchan) && !hsuc->desc) 248 hsu_dma_start_transfer(hsuc); 249 spin_unlock_irqrestore(&hsuc->vchan.lock, flags); 250} 251 252static size_t hsu_dma_desc_size(struct hsu_dma_desc *desc) 253{ 254 size_t bytes = 0; 255 unsigned int i; 256 257 for (i = desc->active; i < desc->nents; i++) 258 bytes += desc->sg[i].len; 259 260 return bytes; 261} 262 263static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc) 264{ 265 struct hsu_dma_desc *desc = hsuc->desc; 266 size_t bytes = hsu_dma_desc_size(desc); 267 int i; 268 269 i = desc->active % HSU_DMA_CHAN_NR_DESC; 270 do { 271 bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i)); 272 } while (--i >= 0); 273 274 return bytes; 275} 276 277static enum dma_status hsu_dma_tx_status(struct dma_chan *chan, 278 dma_cookie_t cookie, struct dma_tx_state *state) 279{ 280 struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); 281 struct virt_dma_desc *vdesc; 282 enum dma_status status; 283 size_t bytes; 284 unsigned long flags; 285 286 status = dma_cookie_status(chan, cookie, state); 287 if (status == DMA_COMPLETE) 288 return status; 289 290 spin_lock_irqsave(&hsuc->vchan.lock, flags); 291 vdesc = vchan_find_desc(&hsuc->vchan, cookie); 292 if (hsuc->desc && cookie == hsuc->desc->vdesc.tx.cookie) { 293 bytes = hsu_dma_active_desc_size(hsuc); 294 dma_set_residue(state, bytes); 295 status = hsuc->desc->status; 296 } else if (vdesc) { 297 bytes = hsu_dma_desc_size(to_hsu_dma_desc(vdesc)); 298 dma_set_residue(state, bytes); 299 } 300 spin_unlock_irqrestore(&hsuc->vchan.lock, flags); 301 302 return status; 303} 304 305static int hsu_dma_slave_config(struct dma_chan *chan, 306 struct dma_slave_config *config) 307{ 308 struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); 309 310 /* Check if chan will be configured for slave transfers */ 311 if (!is_slave_direction(config->direction)) 312 return -EINVAL; 313 314 memcpy(&hsuc->config, config, sizeof(hsuc->config)); 315 316 return 0; 317} 318 319static int hsu_dma_pause(struct dma_chan *chan) 320{ 321 struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); 322 unsigned long flags; 323 324 spin_lock_irqsave(&hsuc->vchan.lock, flags); 325 if (hsuc->desc && hsuc->desc->status == DMA_IN_PROGRESS) { 326 hsu_chan_disable(hsuc); 327 hsuc->desc->status = DMA_PAUSED; 328 } 329 spin_unlock_irqrestore(&hsuc->vchan.lock, flags); 330 331 return 0; 332} 333 334static int hsu_dma_resume(struct dma_chan *chan) 335{ 336 struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); 337 unsigned long flags; 338 339 spin_lock_irqsave(&hsuc->vchan.lock, flags); 340 if (hsuc->desc && hsuc->desc->status == DMA_PAUSED) { 341 hsuc->desc->status = DMA_IN_PROGRESS; 342 hsu_chan_enable(hsuc); 343 } 344 spin_unlock_irqrestore(&hsuc->vchan.lock, flags); 345 346 return 0; 347} 348 349static int hsu_dma_terminate_all(struct dma_chan *chan) 350{ 351 struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); 352 unsigned long flags; 353 LIST_HEAD(head); 354 355 spin_lock_irqsave(&hsuc->vchan.lock, flags); 356 357 hsu_dma_stop_channel(hsuc); 358 if (hsuc->desc) { 359 hsu_dma_desc_free(&hsuc->desc->vdesc); 360 hsuc->desc = NULL; 361 } 362 363 vchan_get_all_descriptors(&hsuc->vchan, &head); 364 spin_unlock_irqrestore(&hsuc->vchan.lock, flags); 365 vchan_dma_desc_free_list(&hsuc->vchan, &head); 366 367 return 0; 368} 369 370static void hsu_dma_free_chan_resources(struct dma_chan *chan) 371{ 372 vchan_free_chan_resources(to_virt_chan(chan)); 373} 374 375int hsu_dma_probe(struct hsu_dma_chip *chip) 376{ 377 struct hsu_dma *hsu; 378 void __iomem *addr = chip->regs + chip->offset; 379 unsigned short i; 380 int ret; 381 382 hsu = devm_kzalloc(chip->dev, sizeof(*hsu), GFP_KERNEL); 383 if (!hsu) 384 return -ENOMEM; 385 386 chip->hsu = hsu; 387 388 /* Calculate nr_channels from the IO space length */ 389 hsu->nr_channels = (chip->length - chip->offset) / HSU_DMA_CHAN_LENGTH; 390 391 hsu->chan = devm_kcalloc(chip->dev, hsu->nr_channels, 392 sizeof(*hsu->chan), GFP_KERNEL); 393 if (!hsu->chan) 394 return -ENOMEM; 395 396 INIT_LIST_HEAD(&hsu->dma.channels); 397 for (i = 0; i < hsu->nr_channels; i++) { 398 struct hsu_dma_chan *hsuc = &hsu->chan[i]; 399 400 hsuc->vchan.desc_free = hsu_dma_desc_free; 401 vchan_init(&hsuc->vchan, &hsu->dma); 402 403 hsuc->direction = (i & 0x1) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV; 404 hsuc->reg = addr + i * HSU_DMA_CHAN_LENGTH; 405 } 406 407 dma_cap_set(DMA_SLAVE, hsu->dma.cap_mask); 408 dma_cap_set(DMA_PRIVATE, hsu->dma.cap_mask); 409 410 hsu->dma.device_free_chan_resources = hsu_dma_free_chan_resources; 411 412 hsu->dma.device_prep_slave_sg = hsu_dma_prep_slave_sg; 413 414 hsu->dma.device_issue_pending = hsu_dma_issue_pending; 415 hsu->dma.device_tx_status = hsu_dma_tx_status; 416 417 hsu->dma.device_config = hsu_dma_slave_config; 418 hsu->dma.device_pause = hsu_dma_pause; 419 hsu->dma.device_resume = hsu_dma_resume; 420 hsu->dma.device_terminate_all = hsu_dma_terminate_all; 421 422 hsu->dma.src_addr_widths = HSU_DMA_BUSWIDTHS; 423 hsu->dma.dst_addr_widths = HSU_DMA_BUSWIDTHS; 424 hsu->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 425 hsu->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 426 427 hsu->dma.dev = chip->dev; 428 429 ret = dma_async_device_register(&hsu->dma); 430 if (ret) 431 return ret; 432 433 dev_info(chip->dev, "Found HSU DMA, %d channels\n", hsu->nr_channels); 434 return 0; 435} 436EXPORT_SYMBOL_GPL(hsu_dma_probe); 437 438int hsu_dma_remove(struct hsu_dma_chip *chip) 439{ 440 struct hsu_dma *hsu = chip->hsu; 441 unsigned short i; 442 443 dma_async_device_unregister(&hsu->dma); 444 445 for (i = 0; i < hsu->nr_channels; i++) { 446 struct hsu_dma_chan *hsuc = &hsu->chan[i]; 447 448 tasklet_kill(&hsuc->vchan.task); 449 } 450 451 return 0; 452} 453EXPORT_SYMBOL_GPL(hsu_dma_remove); 454 455MODULE_LICENSE("GPL v2"); 456MODULE_DESCRIPTION("High Speed UART DMA core driver"); 457MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>"); 458