1/* 2 comedi/drivers/mite.c 3 Hardware driver for NI Mite PCI interface chip 4 5 COMEDI - Linux Control and Measurement Device Interface 6 Copyright (C) 1997-2002 David A. Schleef <ds@schleef.org> 7 8 This program is free software; you can redistribute it and/or modify 9 it under the terms of the GNU General Public License as published by 10 the Free Software Foundation; either version 2 of the License, or 11 (at your option) any later version. 12 13 This program is distributed in the hope that it will be useful, 14 but WITHOUT ANY WARRANTY; without even the implied warranty of 15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 GNU General Public License for more details. 17*/ 18 19/* 20 The PCI-MIO E series driver was originally written by 21 Tomasz Motylewski <...>, and ported to comedi by ds. 22 23 References for specifications: 24 25 321747b.pdf Register Level Programmer Manual (obsolete) 26 321747c.pdf Register Level Programmer Manual (new) 27 DAQ-STC reference manual 28 29 Other possibly relevant info: 30 31 320517c.pdf User manual (obsolete) 32 320517f.pdf User manual (new) 33 320889a.pdf delete 34 320906c.pdf maximum signal ratings 35 321066a.pdf about 16x 36 321791a.pdf discontinuation of at-mio-16e-10 rev. c 37 321808a.pdf about at-mio-16e-10 rev P 38 321837a.pdf discontinuation of at-mio-16de-10 rev d 39 321838a.pdf about at-mio-16de-10 rev N 40 41 ISSUES: 42 43*/ 44 45/* #define USE_KMALLOC */ 46 47#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 48 49#include <linux/module.h> 50 51#include "../comedi_pci.h" 52 53#include "mite.h" 54 55#define TOP_OF_PAGE(x) ((x)|(~(PAGE_MASK))) 56 57struct mite_struct *mite_alloc(struct pci_dev *pcidev) 58{ 59 struct mite_struct *mite; 60 unsigned int i; 61 62 mite = kzalloc(sizeof(*mite), GFP_KERNEL); 63 if (mite) { 64 spin_lock_init(&mite->lock); 65 mite->pcidev = pcidev; 66 for (i = 0; i < MAX_MITE_DMA_CHANNELS; ++i) { 67 mite->channels[i].mite = mite; 68 mite->channels[i].channel = i; 69 mite->channels[i].done = 1; 70 } 71 } 72 return mite; 73} 74EXPORT_SYMBOL_GPL(mite_alloc); 75 76static void dump_chip_signature(u32 csigr_bits) 77{ 78 pr_info("version = %i, type = %i, mite mode = %i, interface mode = %i\n", 79 mite_csigr_version(csigr_bits), mite_csigr_type(csigr_bits), 80 mite_csigr_mmode(csigr_bits), mite_csigr_imode(csigr_bits)); 81 pr_info("num channels = %i, write post fifo depth = %i, wins = %i, iowins = %i\n", 82 mite_csigr_dmac(csigr_bits), mite_csigr_wpdep(csigr_bits), 83 mite_csigr_wins(csigr_bits), mite_csigr_iowins(csigr_bits)); 84} 85 86static unsigned mite_fifo_size(struct mite_struct *mite, unsigned channel) 87{ 88 unsigned fcr_bits = readl(mite->mite_io_addr + MITE_FCR(channel)); 89 unsigned empty_count = (fcr_bits >> 16) & 0xff; 90 unsigned full_count = fcr_bits & 0xff; 91 92 return empty_count + full_count; 93} 94 95int mite_setup2(struct comedi_device *dev, 96 struct mite_struct *mite, bool use_win1) 97{ 98 unsigned long length; 99 int i; 100 u32 csigr_bits; 101 unsigned unknown_dma_burst_bits; 102 103 pci_set_master(mite->pcidev); 104 105 mite->mite_io_addr = pci_ioremap_bar(mite->pcidev, 0); 106 if (!mite->mite_io_addr) { 107 dev_err(dev->class_dev, 108 "Failed to remap mite io memory address\n"); 109 return -ENOMEM; 110 } 111 mite->mite_phys_addr = pci_resource_start(mite->pcidev, 0); 112 113 dev->mmio = pci_ioremap_bar(mite->pcidev, 1); 114 if (!dev->mmio) { 115 dev_err(dev->class_dev, 116 "Failed to remap daq io memory address\n"); 117 return -ENOMEM; 118 } 119 mite->daq_phys_addr = pci_resource_start(mite->pcidev, 1); 120 length = pci_resource_len(mite->pcidev, 1); 121 122 if (use_win1) { 123 writel(0, mite->mite_io_addr + MITE_IODWBSR); 124 dev_info(dev->class_dev, 125 "using I/O Window Base Size register 1\n"); 126 writel(mite->daq_phys_addr | WENAB | 127 MITE_IODWBSR_1_WSIZE_bits(length), 128 mite->mite_io_addr + MITE_IODWBSR_1); 129 writel(0, mite->mite_io_addr + MITE_IODWCR_1); 130 } else { 131 writel(mite->daq_phys_addr | WENAB, 132 mite->mite_io_addr + MITE_IODWBSR); 133 } 134 /* 135 * make sure dma bursts work. I got this from running a bus analyzer 136 * on a pxi-6281 and a pxi-6713. 6713 powered up with register value 137 * of 0x61f and bursts worked. 6281 powered up with register value of 138 * 0x1f and bursts didn't work. The NI windows driver reads the 139 * register, then does a bitwise-or of 0x600 with it and writes it back. 140 */ 141 unknown_dma_burst_bits = 142 readl(mite->mite_io_addr + MITE_UNKNOWN_DMA_BURST_REG); 143 unknown_dma_burst_bits |= UNKNOWN_DMA_BURST_ENABLE_BITS; 144 writel(unknown_dma_burst_bits, 145 mite->mite_io_addr + MITE_UNKNOWN_DMA_BURST_REG); 146 147 csigr_bits = readl(mite->mite_io_addr + MITE_CSIGR); 148 mite->num_channels = mite_csigr_dmac(csigr_bits); 149 if (mite->num_channels > MAX_MITE_DMA_CHANNELS) { 150 dev_warn(dev->class_dev, 151 "mite: bug? chip claims to have %i dma channels. Setting to %i.\n", 152 mite->num_channels, MAX_MITE_DMA_CHANNELS); 153 mite->num_channels = MAX_MITE_DMA_CHANNELS; 154 } 155 dump_chip_signature(csigr_bits); 156 for (i = 0; i < mite->num_channels; i++) { 157 writel(CHOR_DMARESET, mite->mite_io_addr + MITE_CHOR(i)); 158 /* disable interrupts */ 159 writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE | CHCR_CLR_SAR_IE | 160 CHCR_CLR_DONE_IE | CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE | 161 CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE, 162 mite->mite_io_addr + MITE_CHCR(i)); 163 } 164 mite->fifo_size = mite_fifo_size(mite, 0); 165 dev_info(dev->class_dev, "fifo size is %i.\n", mite->fifo_size); 166 return 0; 167} 168EXPORT_SYMBOL_GPL(mite_setup2); 169 170void mite_detach(struct mite_struct *mite) 171{ 172 if (!mite) 173 return; 174 175 if (mite->mite_io_addr) 176 iounmap(mite->mite_io_addr); 177 178 kfree(mite); 179} 180EXPORT_SYMBOL_GPL(mite_detach); 181 182struct mite_dma_descriptor_ring *mite_alloc_ring(struct mite_struct *mite) 183{ 184 struct mite_dma_descriptor_ring *ring = 185 kmalloc(sizeof(struct mite_dma_descriptor_ring), GFP_KERNEL); 186 187 if (!ring) 188 return NULL; 189 ring->hw_dev = get_device(&mite->pcidev->dev); 190 if (!ring->hw_dev) { 191 kfree(ring); 192 return NULL; 193 } 194 ring->n_links = 0; 195 ring->descriptors = NULL; 196 ring->descriptors_dma_addr = 0; 197 return ring; 198}; 199EXPORT_SYMBOL_GPL(mite_alloc_ring); 200 201void mite_free_ring(struct mite_dma_descriptor_ring *ring) 202{ 203 if (ring) { 204 if (ring->descriptors) { 205 dma_free_coherent(ring->hw_dev, 206 ring->n_links * 207 sizeof(struct mite_dma_descriptor), 208 ring->descriptors, 209 ring->descriptors_dma_addr); 210 } 211 put_device(ring->hw_dev); 212 kfree(ring); 213 } 214}; 215EXPORT_SYMBOL_GPL(mite_free_ring); 216 217struct mite_channel *mite_request_channel_in_range(struct mite_struct *mite, 218 struct 219 mite_dma_descriptor_ring 220 *ring, unsigned min_channel, 221 unsigned max_channel) 222{ 223 int i; 224 unsigned long flags; 225 struct mite_channel *channel = NULL; 226 227 /* spin lock so mite_release_channel can be called safely 228 * from interrupts 229 */ 230 spin_lock_irqsave(&mite->lock, flags); 231 for (i = min_channel; i <= max_channel; ++i) { 232 if (mite->channel_allocated[i] == 0) { 233 mite->channel_allocated[i] = 1; 234 channel = &mite->channels[i]; 235 channel->ring = ring; 236 break; 237 } 238 } 239 spin_unlock_irqrestore(&mite->lock, flags); 240 return channel; 241} 242EXPORT_SYMBOL_GPL(mite_request_channel_in_range); 243 244void mite_release_channel(struct mite_channel *mite_chan) 245{ 246 struct mite_struct *mite = mite_chan->mite; 247 unsigned long flags; 248 249 /* spin lock to prevent races with mite_request_channel */ 250 spin_lock_irqsave(&mite->lock, flags); 251 if (mite->channel_allocated[mite_chan->channel]) { 252 mite_dma_disarm(mite_chan); 253 mite_dma_reset(mite_chan); 254 /* 255 * disable all channel's interrupts (do it after disarm/reset so 256 * MITE_CHCR reg isn't changed while dma is still active!) 257 */ 258 writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE | 259 CHCR_CLR_SAR_IE | CHCR_CLR_DONE_IE | 260 CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE | 261 CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE, 262 mite->mite_io_addr + MITE_CHCR(mite_chan->channel)); 263 mite->channel_allocated[mite_chan->channel] = 0; 264 mite_chan->ring = NULL; 265 mmiowb(); 266 } 267 spin_unlock_irqrestore(&mite->lock, flags); 268} 269EXPORT_SYMBOL_GPL(mite_release_channel); 270 271void mite_dma_arm(struct mite_channel *mite_chan) 272{ 273 struct mite_struct *mite = mite_chan->mite; 274 int chor; 275 unsigned long flags; 276 277 /* 278 * memory barrier is intended to insure any twiddling with the buffer 279 * is done before writing to the mite to arm dma transfer 280 */ 281 smp_mb(); 282 /* arm */ 283 chor = CHOR_START; 284 spin_lock_irqsave(&mite->lock, flags); 285 mite_chan->done = 0; 286 writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel)); 287 mmiowb(); 288 spin_unlock_irqrestore(&mite->lock, flags); 289/* mite_dma_tcr(mite, channel); */ 290} 291EXPORT_SYMBOL_GPL(mite_dma_arm); 292 293/**************************************/ 294 295int mite_buf_change(struct mite_dma_descriptor_ring *ring, 296 struct comedi_subdevice *s) 297{ 298 struct comedi_async *async = s->async; 299 unsigned int n_links; 300 int i; 301 302 if (ring->descriptors) { 303 dma_free_coherent(ring->hw_dev, 304 ring->n_links * 305 sizeof(struct mite_dma_descriptor), 306 ring->descriptors, 307 ring->descriptors_dma_addr); 308 } 309 ring->descriptors = NULL; 310 ring->descriptors_dma_addr = 0; 311 ring->n_links = 0; 312 313 if (async->prealloc_bufsz == 0) 314 return 0; 315 316 n_links = async->prealloc_bufsz >> PAGE_SHIFT; 317 318 ring->descriptors = 319 dma_alloc_coherent(ring->hw_dev, 320 n_links * sizeof(struct mite_dma_descriptor), 321 &ring->descriptors_dma_addr, GFP_KERNEL); 322 if (!ring->descriptors) { 323 dev_err(s->device->class_dev, 324 "mite: ring buffer allocation failed\n"); 325 return -ENOMEM; 326 } 327 ring->n_links = n_links; 328 329 for (i = 0; i < n_links; i++) { 330 ring->descriptors[i].count = cpu_to_le32(PAGE_SIZE); 331 ring->descriptors[i].addr = 332 cpu_to_le32(async->buf_map->page_list[i].dma_addr); 333 ring->descriptors[i].next = 334 cpu_to_le32(ring->descriptors_dma_addr + (i + 335 1) * 336 sizeof(struct mite_dma_descriptor)); 337 } 338 ring->descriptors[n_links - 1].next = 339 cpu_to_le32(ring->descriptors_dma_addr); 340 /* 341 * barrier is meant to insure that all the writes to the dma descriptors 342 * have completed before the dma controller is commanded to read them 343 */ 344 smp_wmb(); 345 return 0; 346} 347EXPORT_SYMBOL_GPL(mite_buf_change); 348 349void mite_prep_dma(struct mite_channel *mite_chan, 350 unsigned int num_device_bits, unsigned int num_memory_bits) 351{ 352 unsigned int chor, chcr, mcr, dcr, lkcr; 353 struct mite_struct *mite = mite_chan->mite; 354 355 /* reset DMA and FIFO */ 356 chor = CHOR_DMARESET | CHOR_FRESET; 357 writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel)); 358 359 /* short link chaining mode */ 360 chcr = CHCR_SET_DMA_IE | CHCR_LINKSHORT | CHCR_SET_DONE_IE | 361 CHCR_BURSTEN; 362 /* 363 * Link Complete Interrupt: interrupt every time a link 364 * in MITE_RING is completed. This can generate a lot of 365 * extra interrupts, but right now we update the values 366 * of buf_int_ptr and buf_int_count at each interrupt. A 367 * better method is to poll the MITE before each user 368 * "read()" to calculate the number of bytes available. 369 */ 370 chcr |= CHCR_SET_LC_IE; 371 if (num_memory_bits == 32 && num_device_bits == 16) { 372 /* 373 * Doing a combined 32 and 16 bit byteswap gets the 16 bit 374 * samples into the fifo in the right order. Tested doing 32 bit 375 * memory to 16 bit device transfers to the analog out of a 376 * pxi-6281, which has mite version = 1, type = 4. This also 377 * works for dma reads from the counters on e-series boards. 378 */ 379 chcr |= CHCR_BYTE_SWAP_DEVICE | CHCR_BYTE_SWAP_MEMORY; 380 } 381 if (mite_chan->dir == COMEDI_INPUT) 382 chcr |= CHCR_DEV_TO_MEM; 383 384 writel(chcr, mite->mite_io_addr + MITE_CHCR(mite_chan->channel)); 385 386 /* to/from memory */ 387 mcr = CR_RL(64) | CR_ASEQUP; 388 switch (num_memory_bits) { 389 case 8: 390 mcr |= CR_PSIZE8; 391 break; 392 case 16: 393 mcr |= CR_PSIZE16; 394 break; 395 case 32: 396 mcr |= CR_PSIZE32; 397 break; 398 default: 399 pr_warn("bug! invalid mem bit width for dma transfer\n"); 400 break; 401 } 402 writel(mcr, mite->mite_io_addr + MITE_MCR(mite_chan->channel)); 403 404 /* from/to device */ 405 dcr = CR_RL(64) | CR_ASEQUP; 406 dcr |= CR_PORTIO | CR_AMDEVICE | CR_REQSDRQ(mite_chan->channel); 407 switch (num_device_bits) { 408 case 8: 409 dcr |= CR_PSIZE8; 410 break; 411 case 16: 412 dcr |= CR_PSIZE16; 413 break; 414 case 32: 415 dcr |= CR_PSIZE32; 416 break; 417 default: 418 pr_warn("bug! invalid dev bit width for dma transfer\n"); 419 break; 420 } 421 writel(dcr, mite->mite_io_addr + MITE_DCR(mite_chan->channel)); 422 423 /* reset the DAR */ 424 writel(0, mite->mite_io_addr + MITE_DAR(mite_chan->channel)); 425 426 /* the link is 32bits */ 427 lkcr = CR_RL(64) | CR_ASEQUP | CR_PSIZE32; 428 writel(lkcr, mite->mite_io_addr + MITE_LKCR(mite_chan->channel)); 429 430 /* starting address for link chaining */ 431 writel(mite_chan->ring->descriptors_dma_addr, 432 mite->mite_io_addr + MITE_LKAR(mite_chan->channel)); 433} 434EXPORT_SYMBOL_GPL(mite_prep_dma); 435 436static u32 mite_device_bytes_transferred(struct mite_channel *mite_chan) 437{ 438 struct mite_struct *mite = mite_chan->mite; 439 440 return readl(mite->mite_io_addr + MITE_DAR(mite_chan->channel)); 441} 442 443u32 mite_bytes_in_transit(struct mite_channel *mite_chan) 444{ 445 struct mite_struct *mite = mite_chan->mite; 446 447 return readl(mite->mite_io_addr + 448 MITE_FCR(mite_chan->channel)) & 0x000000FF; 449} 450EXPORT_SYMBOL_GPL(mite_bytes_in_transit); 451 452/* returns lower bound for number of bytes transferred from device to memory */ 453u32 mite_bytes_written_to_memory_lb(struct mite_channel *mite_chan) 454{ 455 u32 device_byte_count; 456 457 device_byte_count = mite_device_bytes_transferred(mite_chan); 458 return device_byte_count - mite_bytes_in_transit(mite_chan); 459} 460EXPORT_SYMBOL_GPL(mite_bytes_written_to_memory_lb); 461 462/* returns upper bound for number of bytes transferred from device to memory */ 463u32 mite_bytes_written_to_memory_ub(struct mite_channel *mite_chan) 464{ 465 u32 in_transit_count; 466 467 in_transit_count = mite_bytes_in_transit(mite_chan); 468 return mite_device_bytes_transferred(mite_chan) - in_transit_count; 469} 470EXPORT_SYMBOL_GPL(mite_bytes_written_to_memory_ub); 471 472/* returns lower bound for number of bytes read from memory to device */ 473u32 mite_bytes_read_from_memory_lb(struct mite_channel *mite_chan) 474{ 475 u32 device_byte_count; 476 477 device_byte_count = mite_device_bytes_transferred(mite_chan); 478 return device_byte_count + mite_bytes_in_transit(mite_chan); 479} 480EXPORT_SYMBOL_GPL(mite_bytes_read_from_memory_lb); 481 482/* returns upper bound for number of bytes read from memory to device */ 483u32 mite_bytes_read_from_memory_ub(struct mite_channel *mite_chan) 484{ 485 u32 in_transit_count; 486 487 in_transit_count = mite_bytes_in_transit(mite_chan); 488 return mite_device_bytes_transferred(mite_chan) + in_transit_count; 489} 490EXPORT_SYMBOL_GPL(mite_bytes_read_from_memory_ub); 491 492unsigned mite_dma_tcr(struct mite_channel *mite_chan) 493{ 494 struct mite_struct *mite = mite_chan->mite; 495 496 return readl(mite->mite_io_addr + MITE_TCR(mite_chan->channel)); 497} 498EXPORT_SYMBOL_GPL(mite_dma_tcr); 499 500void mite_dma_disarm(struct mite_channel *mite_chan) 501{ 502 struct mite_struct *mite = mite_chan->mite; 503 unsigned chor; 504 505 /* disarm */ 506 chor = CHOR_ABORT; 507 writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel)); 508} 509EXPORT_SYMBOL_GPL(mite_dma_disarm); 510 511int mite_sync_input_dma(struct mite_channel *mite_chan, 512 struct comedi_subdevice *s) 513{ 514 struct comedi_async *async = s->async; 515 int count; 516 unsigned int nbytes, old_alloc_count; 517 518 old_alloc_count = async->buf_write_alloc_count; 519 /* write alloc as much as we can */ 520 comedi_buf_write_alloc(s, async->prealloc_bufsz); 521 522 nbytes = mite_bytes_written_to_memory_lb(mite_chan); 523 if ((int)(mite_bytes_written_to_memory_ub(mite_chan) - 524 old_alloc_count) > 0) { 525 dev_warn(s->device->class_dev, 526 "mite: DMA overwrite of free area\n"); 527 async->events |= COMEDI_CB_OVERFLOW; 528 return -1; 529 } 530 531 count = nbytes - async->buf_write_count; 532 /* it's possible count will be negative due to 533 * conservative value returned by mite_bytes_written_to_memory_lb */ 534 if (count <= 0) 535 return 0; 536 537 comedi_buf_write_free(s, count); 538 comedi_inc_scan_progress(s, count); 539 async->events |= COMEDI_CB_BLOCK; 540 return 0; 541} 542EXPORT_SYMBOL_GPL(mite_sync_input_dma); 543 544int mite_sync_output_dma(struct mite_channel *mite_chan, 545 struct comedi_subdevice *s) 546{ 547 struct comedi_async *async = s->async; 548 struct comedi_cmd *cmd = &async->cmd; 549 u32 stop_count = cmd->stop_arg * comedi_bytes_per_scan(s); 550 unsigned int old_alloc_count = async->buf_read_alloc_count; 551 u32 nbytes_ub, nbytes_lb; 552 int count; 553 554 /* read alloc as much as we can */ 555 comedi_buf_read_alloc(s, async->prealloc_bufsz); 556 nbytes_lb = mite_bytes_read_from_memory_lb(mite_chan); 557 if (cmd->stop_src == TRIG_COUNT && (int)(nbytes_lb - stop_count) > 0) 558 nbytes_lb = stop_count; 559 nbytes_ub = mite_bytes_read_from_memory_ub(mite_chan); 560 if (cmd->stop_src == TRIG_COUNT && (int)(nbytes_ub - stop_count) > 0) 561 nbytes_ub = stop_count; 562 if ((int)(nbytes_ub - old_alloc_count) > 0) { 563 dev_warn(s->device->class_dev, "mite: DMA underrun\n"); 564 async->events |= COMEDI_CB_OVERFLOW; 565 return -1; 566 } 567 count = nbytes_lb - async->buf_read_count; 568 if (count <= 0) 569 return 0; 570 571 if (count) { 572 comedi_buf_read_free(s, count); 573 async->events |= COMEDI_CB_BLOCK; 574 } 575 return 0; 576} 577EXPORT_SYMBOL_GPL(mite_sync_output_dma); 578 579unsigned mite_get_status(struct mite_channel *mite_chan) 580{ 581 struct mite_struct *mite = mite_chan->mite; 582 unsigned status; 583 unsigned long flags; 584 585 spin_lock_irqsave(&mite->lock, flags); 586 status = readl(mite->mite_io_addr + MITE_CHSR(mite_chan->channel)); 587 if (status & CHSR_DONE) { 588 mite_chan->done = 1; 589 writel(CHOR_CLRDONE, 590 mite->mite_io_addr + MITE_CHOR(mite_chan->channel)); 591 } 592 mmiowb(); 593 spin_unlock_irqrestore(&mite->lock, flags); 594 return status; 595} 596EXPORT_SYMBOL_GPL(mite_get_status); 597 598int mite_done(struct mite_channel *mite_chan) 599{ 600 struct mite_struct *mite = mite_chan->mite; 601 unsigned long flags; 602 int done; 603 604 mite_get_status(mite_chan); 605 spin_lock_irqsave(&mite->lock, flags); 606 done = mite_chan->done; 607 spin_unlock_irqrestore(&mite->lock, flags); 608 return done; 609} 610EXPORT_SYMBOL_GPL(mite_done); 611 612static int __init mite_module_init(void) 613{ 614 return 0; 615} 616 617static void __exit mite_module_exit(void) 618{ 619} 620 621module_init(mite_module_init); 622module_exit(mite_module_exit); 623 624MODULE_AUTHOR("Comedi http://www.comedi.org"); 625MODULE_DESCRIPTION("Comedi low-level driver"); 626MODULE_LICENSE("GPL"); 627