1/* 2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org> 3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com> 4 * 5 * Permission to use, copy, modify, and distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 * 17 */ 18 19/*************************************\ 20* DMA and interrupt masking functions * 21\*************************************/ 22 23/** 24 * DOC: DMA and interrupt masking functions 25 * 26 * Here we setup descriptor pointers (rxdp/txdp) start/stop dma engine and 27 * handle queue setup for 5210 chipset (rest are handled on qcu.c). 28 * Also we setup interrupt mask register (IMR) and read the various interrupt 29 * status registers (ISR). 30 */ 31 32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 33 34#include "ath5k.h" 35#include "reg.h" 36#include "debug.h" 37 38 39/*********\ 40* Receive * 41\*********/ 42 43/** 44 * ath5k_hw_start_rx_dma() - Start DMA receive 45 * @ah: The &struct ath5k_hw 46 */ 47void 48ath5k_hw_start_rx_dma(struct ath5k_hw *ah) 49{ 50 ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR); 51 ath5k_hw_reg_read(ah, AR5K_CR); 52} 53 54/** 55 * ath5k_hw_stop_rx_dma() - Stop DMA receive 56 * @ah: The &struct ath5k_hw 57 */ 58static int 59ath5k_hw_stop_rx_dma(struct ath5k_hw *ah) 60{ 61 unsigned int i; 62 63 ath5k_hw_reg_write(ah, AR5K_CR_RXD, AR5K_CR); 64 65 /* 66 * It may take some time to disable the DMA receive unit 67 */ 68 for (i = 1000; i > 0 && 69 (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) != 0; 70 i--) 71 udelay(100); 72 73 if (!i) 74 ATH5K_DBG(ah, ATH5K_DEBUG_DMA, 75 "failed to stop RX DMA !\n"); 76 77 return i ? 0 : -EBUSY; 78} 79 80/** 81 * ath5k_hw_get_rxdp() - Get RX Descriptor's address 82 * @ah: The &struct ath5k_hw 83 */ 84u32 85ath5k_hw_get_rxdp(struct ath5k_hw *ah) 86{ 87 return ath5k_hw_reg_read(ah, AR5K_RXDP); 88} 89 90/** 91 * ath5k_hw_set_rxdp() - Set RX Descriptor's address 92 * @ah: The &struct ath5k_hw 93 * @phys_addr: RX descriptor address 94 * 95 * Returns -EIO if rx is active 96 */ 97int 98ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr) 99{ 100 if (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) { 101 ATH5K_DBG(ah, ATH5K_DEBUG_DMA, 102 "tried to set RXDP while rx was active !\n"); 103 return -EIO; 104 } 105 106 ath5k_hw_reg_write(ah, phys_addr, AR5K_RXDP); 107 return 0; 108} 109 110 111/**********\ 112* Transmit * 113\**********/ 114 115/** 116 * ath5k_hw_start_tx_dma() - Start DMA transmit for a specific queue 117 * @ah: The &struct ath5k_hw 118 * @queue: The hw queue number 119 * 120 * Start DMA transmit for a specific queue and since 5210 doesn't have 121 * QCU/DCU, set up queue parameters for 5210 here based on queue type (one 122 * queue for normal data and one queue for beacons). For queue setup 123 * on newer chips check out qcu.c. Returns -EINVAL if queue number is out 124 * of range or if queue is already disabled. 125 * 126 * NOTE: Must be called after setting up tx control descriptor for that 127 * queue (see below). 128 */ 129int 130ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue) 131{ 132 u32 tx_queue; 133 134 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 135 136 /* Return if queue is declared inactive */ 137 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) 138 return -EINVAL; 139 140 if (ah->ah_version == AR5K_AR5210) { 141 tx_queue = ath5k_hw_reg_read(ah, AR5K_CR); 142 143 /* 144 * Set the queue by type on 5210 145 */ 146 switch (ah->ah_txq[queue].tqi_type) { 147 case AR5K_TX_QUEUE_DATA: 148 tx_queue |= AR5K_CR_TXE0 & ~AR5K_CR_TXD0; 149 break; 150 case AR5K_TX_QUEUE_BEACON: 151 tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1; 152 ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE, 153 AR5K_BSR); 154 break; 155 case AR5K_TX_QUEUE_CAB: 156 tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1; 157 ath5k_hw_reg_write(ah, AR5K_BCR_TQ1FV | AR5K_BCR_TQ1V | 158 AR5K_BCR_BDMAE, AR5K_BSR); 159 break; 160 default: 161 return -EINVAL; 162 } 163 /* Start queue */ 164 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR); 165 ath5k_hw_reg_read(ah, AR5K_CR); 166 } else { 167 /* Return if queue is disabled */ 168 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXD, queue)) 169 return -EIO; 170 171 /* Start queue */ 172 AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXE, queue); 173 } 174 175 return 0; 176} 177 178/** 179 * ath5k_hw_stop_tx_dma() - Stop DMA transmit on a specific queue 180 * @ah: The &struct ath5k_hw 181 * @queue: The hw queue number 182 * 183 * Stop DMA transmit on a specific hw queue and drain queue so we don't 184 * have any pending frames. Returns -EBUSY if we still have pending frames, 185 * -EINVAL if queue number is out of range or inactive. 186 */ 187static int 188ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue) 189{ 190 unsigned int i = 40; 191 u32 tx_queue, pending; 192 193 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 194 195 /* Return if queue is declared inactive */ 196 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) 197 return -EINVAL; 198 199 if (ah->ah_version == AR5K_AR5210) { 200 tx_queue = ath5k_hw_reg_read(ah, AR5K_CR); 201 202 /* 203 * Set by queue type 204 */ 205 switch (ah->ah_txq[queue].tqi_type) { 206 case AR5K_TX_QUEUE_DATA: 207 tx_queue |= AR5K_CR_TXD0 & ~AR5K_CR_TXE0; 208 break; 209 case AR5K_TX_QUEUE_BEACON: 210 case AR5K_TX_QUEUE_CAB: 211 /* XXX Fix me... */ 212 tx_queue |= AR5K_CR_TXD1 & ~AR5K_CR_TXD1; 213 ath5k_hw_reg_write(ah, 0, AR5K_BSR); 214 break; 215 default: 216 return -EINVAL; 217 } 218 219 /* Stop queue */ 220 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR); 221 ath5k_hw_reg_read(ah, AR5K_CR); 222 } else { 223 224 /* 225 * Enable DCU early termination to quickly 226 * flush any pending frames from QCU 227 */ 228 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), 229 AR5K_QCU_MISC_DCU_EARLY); 230 231 /* 232 * Schedule TX disable and wait until queue is empty 233 */ 234 AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXD, queue); 235 236 /* Wait for queue to stop */ 237 for (i = 1000; i > 0 && 238 (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue) != 0); 239 i--) 240 udelay(100); 241 242 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue)) 243 ATH5K_DBG(ah, ATH5K_DEBUG_DMA, 244 "queue %i didn't stop !\n", queue); 245 246 /* Check for pending frames */ 247 i = 1000; 248 do { 249 pending = ath5k_hw_reg_read(ah, 250 AR5K_QUEUE_STATUS(queue)) & 251 AR5K_QCU_STS_FRMPENDCNT; 252 udelay(100); 253 } while (--i && pending); 254 255 /* For 2413+ order PCU to drop packets using 256 * QUIET mechanism */ 257 if (ah->ah_mac_version >= (AR5K_SREV_AR2414 >> 4) && 258 pending) { 259 /* Set periodicity and duration */ 260 ath5k_hw_reg_write(ah, 261 AR5K_REG_SM(100, AR5K_QUIET_CTL2_QT_PER)| 262 AR5K_REG_SM(10, AR5K_QUIET_CTL2_QT_DUR), 263 AR5K_QUIET_CTL2); 264 265 /* Enable quiet period for current TSF */ 266 ath5k_hw_reg_write(ah, 267 AR5K_QUIET_CTL1_QT_EN | 268 AR5K_REG_SM(ath5k_hw_reg_read(ah, 269 AR5K_TSF_L32_5211) >> 10, 270 AR5K_QUIET_CTL1_NEXT_QT_TSF), 271 AR5K_QUIET_CTL1); 272 273 /* Force channel idle high */ 274 AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW_5211, 275 AR5K_DIAG_SW_CHANNEL_IDLE_HIGH); 276 277 /* Wait a while and disable mechanism */ 278 udelay(400); 279 AR5K_REG_DISABLE_BITS(ah, AR5K_QUIET_CTL1, 280 AR5K_QUIET_CTL1_QT_EN); 281 282 /* Re-check for pending frames */ 283 i = 100; 284 do { 285 pending = ath5k_hw_reg_read(ah, 286 AR5K_QUEUE_STATUS(queue)) & 287 AR5K_QCU_STS_FRMPENDCNT; 288 udelay(100); 289 } while (--i && pending); 290 291 AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW_5211, 292 AR5K_DIAG_SW_CHANNEL_IDLE_HIGH); 293 294 if (pending) 295 ATH5K_DBG(ah, ATH5K_DEBUG_DMA, 296 "quiet mechanism didn't work q:%i !\n", 297 queue); 298 } 299 300 /* 301 * Disable DCU early termination 302 */ 303 AR5K_REG_DISABLE_BITS(ah, AR5K_QUEUE_MISC(queue), 304 AR5K_QCU_MISC_DCU_EARLY); 305 306 /* Clear register */ 307 ath5k_hw_reg_write(ah, 0, AR5K_QCU_TXD); 308 if (pending) { 309 ATH5K_DBG(ah, ATH5K_DEBUG_DMA, 310 "tx dma didn't stop (q:%i, frm:%i) !\n", 311 queue, pending); 312 return -EBUSY; 313 } 314 } 315 316 /* TODO: Check for success on 5210 else return error */ 317 return 0; 318} 319 320/** 321 * ath5k_hw_stop_beacon_queue() - Stop beacon queue 322 * @ah: The &struct ath5k_hw 323 * @queue: The queue number 324 * 325 * Returns -EIO if queue didn't stop 326 */ 327int 328ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue) 329{ 330 int ret; 331 ret = ath5k_hw_stop_tx_dma(ah, queue); 332 if (ret) { 333 ATH5K_DBG(ah, ATH5K_DEBUG_DMA, 334 "beacon queue didn't stop !\n"); 335 return -EIO; 336 } 337 return 0; 338} 339 340/** 341 * ath5k_hw_get_txdp() - Get TX Descriptor's address for a specific queue 342 * @ah: The &struct ath5k_hw 343 * @queue: The hw queue number 344 * 345 * Get TX descriptor's address for a specific queue. For 5210 we ignore 346 * the queue number and use tx queue type since we only have 2 queues. 347 * We use TXDP0 for normal data queue and TXDP1 for beacon queue. 348 * For newer chips with QCU/DCU we just read the corresponding TXDP register. 349 * 350 * XXX: Is TXDP read and clear ? 351 */ 352u32 353ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue) 354{ 355 u16 tx_reg; 356 357 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 358 359 /* 360 * Get the transmit queue descriptor pointer from the selected queue 361 */ 362 /*5210 doesn't have QCU*/ 363 if (ah->ah_version == AR5K_AR5210) { 364 switch (ah->ah_txq[queue].tqi_type) { 365 case AR5K_TX_QUEUE_DATA: 366 tx_reg = AR5K_NOQCU_TXDP0; 367 break; 368 case AR5K_TX_QUEUE_BEACON: 369 case AR5K_TX_QUEUE_CAB: 370 tx_reg = AR5K_NOQCU_TXDP1; 371 break; 372 default: 373 return 0xffffffff; 374 } 375 } else { 376 tx_reg = AR5K_QUEUE_TXDP(queue); 377 } 378 379 return ath5k_hw_reg_read(ah, tx_reg); 380} 381 382/** 383 * ath5k_hw_set_txdp() - Set TX Descriptor's address for a specific queue 384 * @ah: The &struct ath5k_hw 385 * @queue: The hw queue number 386 * @phys_addr: The physical address 387 * 388 * Set TX descriptor's address for a specific queue. For 5210 we ignore 389 * the queue number and we use tx queue type since we only have 2 queues 390 * so as above we use TXDP0 for normal data queue and TXDP1 for beacon queue. 391 * For newer chips with QCU/DCU we just set the corresponding TXDP register. 392 * Returns -EINVAL if queue type is invalid for 5210 and -EIO if queue is still 393 * active. 394 */ 395int 396ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr) 397{ 398 u16 tx_reg; 399 400 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 401 402 /* 403 * Set the transmit queue descriptor pointer register by type 404 * on 5210 405 */ 406 if (ah->ah_version == AR5K_AR5210) { 407 switch (ah->ah_txq[queue].tqi_type) { 408 case AR5K_TX_QUEUE_DATA: 409 tx_reg = AR5K_NOQCU_TXDP0; 410 break; 411 case AR5K_TX_QUEUE_BEACON: 412 case AR5K_TX_QUEUE_CAB: 413 tx_reg = AR5K_NOQCU_TXDP1; 414 break; 415 default: 416 return -EINVAL; 417 } 418 } else { 419 /* 420 * Set the transmit queue descriptor pointer for 421 * the selected queue on QCU for 5211+ 422 * (this won't work if the queue is still active) 423 */ 424 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue)) 425 return -EIO; 426 427 tx_reg = AR5K_QUEUE_TXDP(queue); 428 } 429 430 /* Set descriptor pointer */ 431 ath5k_hw_reg_write(ah, phys_addr, tx_reg); 432 433 return 0; 434} 435 436/** 437 * ath5k_hw_update_tx_triglevel() - Update tx trigger level 438 * @ah: The &struct ath5k_hw 439 * @increase: Flag to force increase of trigger level 440 * 441 * This function increases/decreases the tx trigger level for the tx fifo 442 * buffer (aka FIFO threshold) that is used to indicate when PCU flushes 443 * the buffer and transmits its data. Lowering this results sending small 444 * frames more quickly but can lead to tx underruns, raising it a lot can 445 * result other problems. Right now we start with the lowest possible 446 * (64Bytes) and if we get tx underrun we increase it using the increase 447 * flag. Returns -EIO if we have reached maximum/minimum. 448 * 449 * XXX: Link this with tx DMA size ? 450 * XXX2: Use it to save interrupts ? 451 */ 452int 453ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase) 454{ 455 u32 trigger_level, imr; 456 int ret = -EIO; 457 458 /* 459 * Disable interrupts by setting the mask 460 */ 461 imr = ath5k_hw_set_imr(ah, ah->ah_imr & ~AR5K_INT_GLOBAL); 462 463 trigger_level = AR5K_REG_MS(ath5k_hw_reg_read(ah, AR5K_TXCFG), 464 AR5K_TXCFG_TXFULL); 465 466 if (!increase) { 467 if (--trigger_level < AR5K_TUNE_MIN_TX_FIFO_THRES) 468 goto done; 469 } else 470 trigger_level += 471 ((AR5K_TUNE_MAX_TX_FIFO_THRES - trigger_level) / 2); 472 473 /* 474 * Update trigger level on success 475 */ 476 if (ah->ah_version == AR5K_AR5210) 477 ath5k_hw_reg_write(ah, trigger_level, AR5K_TRIG_LVL); 478 else 479 AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG, 480 AR5K_TXCFG_TXFULL, trigger_level); 481 482 ret = 0; 483 484done: 485 /* 486 * Restore interrupt mask 487 */ 488 ath5k_hw_set_imr(ah, imr); 489 490 return ret; 491} 492 493 494/*******************\ 495* Interrupt masking * 496\*******************/ 497 498/** 499 * ath5k_hw_is_intr_pending() - Check if we have pending interrupts 500 * @ah: The &struct ath5k_hw 501 * 502 * Check if we have pending interrupts to process. Returns 1 if we 503 * have pending interrupts and 0 if we haven't. 504 */ 505bool 506ath5k_hw_is_intr_pending(struct ath5k_hw *ah) 507{ 508 return ath5k_hw_reg_read(ah, AR5K_INTPEND) == 1 ? 1 : 0; 509} 510 511/** 512 * ath5k_hw_get_isr() - Get interrupt status 513 * @ah: The @struct ath5k_hw 514 * @interrupt_mask: Driver's interrupt mask used to filter out 515 * interrupts in sw. 516 * 517 * This function is used inside our interrupt handler to determine the reason 518 * for the interrupt by reading Primary Interrupt Status Register. Returns an 519 * abstract interrupt status mask which is mostly ISR with some uncommon bits 520 * being mapped on some standard non hw-specific positions 521 * (check out &ath5k_int). 522 * 523 * NOTE: We do write-to-clear, so the active PISR/SISR bits at the time this 524 * function gets called are cleared on return. 525 */ 526int 527ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask) 528{ 529 u32 data = 0; 530 531 /* 532 * Read interrupt status from Primary Interrupt 533 * Register. 534 * 535 * Note: PISR/SISR Not available on 5210 536 */ 537 if (ah->ah_version == AR5K_AR5210) { 538 u32 isr = 0; 539 isr = ath5k_hw_reg_read(ah, AR5K_ISR); 540 if (unlikely(isr == AR5K_INT_NOCARD)) { 541 *interrupt_mask = isr; 542 return -ENODEV; 543 } 544 545 /* 546 * Filter out the non-common bits from the interrupt 547 * status. 548 */ 549 *interrupt_mask = (isr & AR5K_INT_COMMON) & ah->ah_imr; 550 551 /* Hanlde INT_FATAL */ 552 if (unlikely(isr & (AR5K_ISR_SSERR | AR5K_ISR_MCABT 553 | AR5K_ISR_DPERR))) 554 *interrupt_mask |= AR5K_INT_FATAL; 555 556 /* 557 * XXX: BMISS interrupts may occur after association. 558 * I found this on 5210 code but it needs testing. If this is 559 * true we should disable them before assoc and re-enable them 560 * after a successful assoc + some jiffies. 561 interrupt_mask &= ~AR5K_INT_BMISS; 562 */ 563 564 data = isr; 565 } else { 566 u32 pisr = 0; 567 u32 pisr_clear = 0; 568 u32 sisr0 = 0; 569 u32 sisr1 = 0; 570 u32 sisr2 = 0; 571 u32 sisr3 = 0; 572 u32 sisr4 = 0; 573 574 /* Read PISR and SISRs... */ 575 pisr = ath5k_hw_reg_read(ah, AR5K_PISR); 576 if (unlikely(pisr == AR5K_INT_NOCARD)) { 577 *interrupt_mask = pisr; 578 return -ENODEV; 579 } 580 581 sisr0 = ath5k_hw_reg_read(ah, AR5K_SISR0); 582 sisr1 = ath5k_hw_reg_read(ah, AR5K_SISR1); 583 sisr2 = ath5k_hw_reg_read(ah, AR5K_SISR2); 584 sisr3 = ath5k_hw_reg_read(ah, AR5K_SISR3); 585 sisr4 = ath5k_hw_reg_read(ah, AR5K_SISR4); 586 587 /* 588 * PISR holds the logical OR of interrupt bits 589 * from SISR registers: 590 * 591 * TXOK and TXDESC -> Logical OR of TXOK and TXDESC 592 * per-queue bits on SISR0 593 * 594 * TXERR and TXEOL -> Logical OR of TXERR and TXEOL 595 * per-queue bits on SISR1 596 * 597 * TXURN -> Logical OR of TXURN per-queue bits on SISR2 598 * 599 * HIUERR -> Logical OR of MCABT, SSERR and DPER bits on SISR2 600 * 601 * BCNMISC -> Logical OR of TIM, CAB_END, DTIM_SYNC 602 * BCN_TIMEOUT, CAB_TIMEOUT and DTIM 603 * (and TSFOOR ?) bits on SISR2 604 * 605 * QCBRORN and QCBRURN -> Logical OR of QCBRORN and 606 * QCBRURN per-queue bits on SISR3 607 * QTRIG -> Logical OR of QTRIG per-queue bits on SISR4 608 * 609 * If we clean these bits on PISR we 'll also clear all 610 * related bits from SISRs, e.g. if we write the TXOK bit on 611 * PISR we 'll clean all TXOK bits from SISR0 so if a new TXOK 612 * interrupt got fired for another queue while we were reading 613 * the interrupt registers and we write back the TXOK bit on 614 * PISR we 'll lose it. So make sure that we don't write back 615 * on PISR any bits that come from SISRs. Clearing them from 616 * SISRs will also clear PISR so no need to worry here. 617 */ 618 619 /* XXX: There seems to be an issue on some cards 620 * with tx interrupt flags not being updated 621 * on PISR despite that all Tx interrupt bits 622 * are cleared on SISRs. Since we handle all 623 * Tx queues all together it shouldn't be an 624 * issue if we clear Tx interrupt flags also 625 * on PISR to avoid that. 626 */ 627 pisr_clear = (pisr & ~AR5K_ISR_BITS_FROM_SISRS) | 628 (pisr & AR5K_INT_TX_ALL); 629 630 /* 631 * Write to clear them... 632 * Note: This means that each bit we write back 633 * to the registers will get cleared, leaving the 634 * rest unaffected. So this won't affect new interrupts 635 * we didn't catch while reading/processing, we 'll get 636 * them next time get_isr gets called. 637 */ 638 ath5k_hw_reg_write(ah, sisr0, AR5K_SISR0); 639 ath5k_hw_reg_write(ah, sisr1, AR5K_SISR1); 640 ath5k_hw_reg_write(ah, sisr2, AR5K_SISR2); 641 ath5k_hw_reg_write(ah, sisr3, AR5K_SISR3); 642 ath5k_hw_reg_write(ah, sisr4, AR5K_SISR4); 643 ath5k_hw_reg_write(ah, pisr_clear, AR5K_PISR); 644 /* Flush previous write */ 645 ath5k_hw_reg_read(ah, AR5K_PISR); 646 647 /* 648 * Filter out the non-common bits from the interrupt 649 * status. 650 */ 651 *interrupt_mask = (pisr & AR5K_INT_COMMON) & ah->ah_imr; 652 653 654 /* We treat TXOK,TXDESC, TXERR and TXEOL 655 * the same way (schedule the tx tasklet) 656 * so we track them all together per queue */ 657 if (pisr & AR5K_ISR_TXOK) 658 ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0, 659 AR5K_SISR0_QCU_TXOK); 660 661 if (pisr & AR5K_ISR_TXDESC) 662 ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0, 663 AR5K_SISR0_QCU_TXDESC); 664 665 if (pisr & AR5K_ISR_TXERR) 666 ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1, 667 AR5K_SISR1_QCU_TXERR); 668 669 if (pisr & AR5K_ISR_TXEOL) 670 ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1, 671 AR5K_SISR1_QCU_TXEOL); 672 673 /* Currently this is not much useful since we treat 674 * all queues the same way if we get a TXURN (update 675 * tx trigger level) but we might need it later on*/ 676 if (pisr & AR5K_ISR_TXURN) 677 ah->ah_txq_isr_txurn |= AR5K_REG_MS(sisr2, 678 AR5K_SISR2_QCU_TXURN); 679 680 /* Misc Beacon related interrupts */ 681 682 /* For AR5211 */ 683 if (pisr & AR5K_ISR_TIM) 684 *interrupt_mask |= AR5K_INT_TIM; 685 686 /* For AR5212+ */ 687 if (pisr & AR5K_ISR_BCNMISC) { 688 if (sisr2 & AR5K_SISR2_TIM) 689 *interrupt_mask |= AR5K_INT_TIM; 690 if (sisr2 & AR5K_SISR2_DTIM) 691 *interrupt_mask |= AR5K_INT_DTIM; 692 if (sisr2 & AR5K_SISR2_DTIM_SYNC) 693 *interrupt_mask |= AR5K_INT_DTIM_SYNC; 694 if (sisr2 & AR5K_SISR2_BCN_TIMEOUT) 695 *interrupt_mask |= AR5K_INT_BCN_TIMEOUT; 696 if (sisr2 & AR5K_SISR2_CAB_TIMEOUT) 697 *interrupt_mask |= AR5K_INT_CAB_TIMEOUT; 698 } 699 700 /* Below interrupts are unlikely to happen */ 701 702 /* HIU = Host Interface Unit (PCI etc) 703 * Can be one of MCABT, SSERR, DPERR from SISR2 */ 704 if (unlikely(pisr & (AR5K_ISR_HIUERR))) 705 *interrupt_mask |= AR5K_INT_FATAL; 706 707 /*Beacon Not Ready*/ 708 if (unlikely(pisr & (AR5K_ISR_BNR))) 709 *interrupt_mask |= AR5K_INT_BNR; 710 711 /* A queue got CBR overrun */ 712 if (unlikely(pisr & (AR5K_ISR_QCBRORN))) { 713 *interrupt_mask |= AR5K_INT_QCBRORN; 714 ah->ah_txq_isr_qcborn |= AR5K_REG_MS(sisr3, 715 AR5K_SISR3_QCBRORN); 716 } 717 718 /* A queue got CBR underrun */ 719 if (unlikely(pisr & (AR5K_ISR_QCBRURN))) { 720 *interrupt_mask |= AR5K_INT_QCBRURN; 721 ah->ah_txq_isr_qcburn |= AR5K_REG_MS(sisr3, 722 AR5K_SISR3_QCBRURN); 723 } 724 725 /* A queue got triggered */ 726 if (unlikely(pisr & (AR5K_ISR_QTRIG))) { 727 *interrupt_mask |= AR5K_INT_QTRIG; 728 ah->ah_txq_isr_qtrig |= AR5K_REG_MS(sisr4, 729 AR5K_SISR4_QTRIG); 730 } 731 732 data = pisr; 733 } 734 735 /* 736 * In case we didn't handle anything, 737 * print the register value. 738 */ 739 if (unlikely(*interrupt_mask == 0 && net_ratelimit())) 740 ATH5K_PRINTF("ISR: 0x%08x IMR: 0x%08x\n", data, ah->ah_imr); 741 742 return 0; 743} 744 745/** 746 * ath5k_hw_set_imr() - Set interrupt mask 747 * @ah: The &struct ath5k_hw 748 * @new_mask: The new interrupt mask to be set 749 * 750 * Set the interrupt mask in hw to save interrupts. We do that by mapping 751 * ath5k_int bits to hw-specific bits to remove abstraction and writing 752 * Interrupt Mask Register. 753 */ 754enum ath5k_int 755ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask) 756{ 757 enum ath5k_int old_mask, int_mask; 758 759 old_mask = ah->ah_imr; 760 761 /* 762 * Disable card interrupts to prevent any race conditions 763 * (they will be re-enabled afterwards if AR5K_INT GLOBAL 764 * is set again on the new mask). 765 */ 766 if (old_mask & AR5K_INT_GLOBAL) { 767 ath5k_hw_reg_write(ah, AR5K_IER_DISABLE, AR5K_IER); 768 ath5k_hw_reg_read(ah, AR5K_IER); 769 } 770 771 /* 772 * Add additional, chipset-dependent interrupt mask flags 773 * and write them to the IMR (interrupt mask register). 774 */ 775 int_mask = new_mask & AR5K_INT_COMMON; 776 777 if (ah->ah_version != AR5K_AR5210) { 778 /* Preserve per queue TXURN interrupt mask */ 779 u32 simr2 = ath5k_hw_reg_read(ah, AR5K_SIMR2) 780 & AR5K_SIMR2_QCU_TXURN; 781 782 /* Fatal interrupt abstraction for 5211+ */ 783 if (new_mask & AR5K_INT_FATAL) { 784 int_mask |= AR5K_IMR_HIUERR; 785 simr2 |= (AR5K_SIMR2_MCABT | AR5K_SIMR2_SSERR 786 | AR5K_SIMR2_DPERR); 787 } 788 789 /* Misc beacon related interrupts */ 790 if (new_mask & AR5K_INT_TIM) 791 int_mask |= AR5K_IMR_TIM; 792 793 if (new_mask & AR5K_INT_TIM) 794 simr2 |= AR5K_SISR2_TIM; 795 if (new_mask & AR5K_INT_DTIM) 796 simr2 |= AR5K_SISR2_DTIM; 797 if (new_mask & AR5K_INT_DTIM_SYNC) 798 simr2 |= AR5K_SISR2_DTIM_SYNC; 799 if (new_mask & AR5K_INT_BCN_TIMEOUT) 800 simr2 |= AR5K_SISR2_BCN_TIMEOUT; 801 if (new_mask & AR5K_INT_CAB_TIMEOUT) 802 simr2 |= AR5K_SISR2_CAB_TIMEOUT; 803 804 /*Beacon Not Ready*/ 805 if (new_mask & AR5K_INT_BNR) 806 int_mask |= AR5K_INT_BNR; 807 808 /* Note: Per queue interrupt masks 809 * are set via ath5k_hw_reset_tx_queue() (qcu.c) */ 810 ath5k_hw_reg_write(ah, int_mask, AR5K_PIMR); 811 ath5k_hw_reg_write(ah, simr2, AR5K_SIMR2); 812 813 } else { 814 /* Fatal interrupt abstraction for 5210 */ 815 if (new_mask & AR5K_INT_FATAL) 816 int_mask |= (AR5K_IMR_SSERR | AR5K_IMR_MCABT 817 | AR5K_IMR_HIUERR | AR5K_IMR_DPERR); 818 819 /* Only common interrupts left for 5210 (no SIMRs) */ 820 ath5k_hw_reg_write(ah, int_mask, AR5K_IMR); 821 } 822 823 /* If RXNOFRM interrupt is masked disable it 824 * by setting AR5K_RXNOFRM to zero */ 825 if (!(new_mask & AR5K_INT_RXNOFRM)) 826 ath5k_hw_reg_write(ah, 0, AR5K_RXNOFRM); 827 828 /* Store new interrupt mask */ 829 ah->ah_imr = new_mask; 830 831 /* ..re-enable interrupts if AR5K_INT_GLOBAL is set */ 832 if (new_mask & AR5K_INT_GLOBAL) { 833 ath5k_hw_reg_write(ah, AR5K_IER_ENABLE, AR5K_IER); 834 ath5k_hw_reg_read(ah, AR5K_IER); 835 } 836 837 return old_mask; 838} 839 840 841/********************\ 842 Init/Stop functions 843\********************/ 844 845/** 846 * ath5k_hw_dma_init() - Initialize DMA unit 847 * @ah: The &struct ath5k_hw 848 * 849 * Set DMA size and pre-enable interrupts 850 * (driver handles tx/rx buffer setup and 851 * dma start/stop) 852 * 853 * XXX: Save/restore RXDP/TXDP registers ? 854 */ 855void 856ath5k_hw_dma_init(struct ath5k_hw *ah) 857{ 858 /* 859 * Set Rx/Tx DMA Configuration 860 * 861 * Set standard DMA size (128). Note that 862 * a DMA size of 512 causes rx overruns and tx errors 863 * on pci-e cards (tested on 5424 but since rx overruns 864 * also occur on 5416/5418 with madwifi we set 128 865 * for all PCI-E cards to be safe). 866 * 867 * XXX: need to check 5210 for this 868 * TODO: Check out tx trigger level, it's always 64 on dumps but I 869 * guess we can tweak it and see how it goes ;-) 870 */ 871 if (ah->ah_version != AR5K_AR5210) { 872 AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG, 873 AR5K_TXCFG_SDMAMR, AR5K_DMASIZE_128B); 874 AR5K_REG_WRITE_BITS(ah, AR5K_RXCFG, 875 AR5K_RXCFG_SDMAMW, AR5K_DMASIZE_128B); 876 } 877 878 /* Pre-enable interrupts on 5211/5212*/ 879 if (ah->ah_version != AR5K_AR5210) 880 ath5k_hw_set_imr(ah, ah->ah_imr); 881 882} 883 884/** 885 * ath5k_hw_dma_stop() - stop DMA unit 886 * @ah: The &struct ath5k_hw 887 * 888 * Stop tx/rx DMA and interrupts. Returns 889 * -EBUSY if tx or rx dma failed to stop. 890 * 891 * XXX: Sometimes DMA unit hangs and we have 892 * stuck frames on tx queues, only a reset 893 * can fix that. 894 */ 895int 896ath5k_hw_dma_stop(struct ath5k_hw *ah) 897{ 898 int i, qmax, err; 899 err = 0; 900 901 /* Disable interrupts */ 902 ath5k_hw_set_imr(ah, 0); 903 904 /* Stop rx dma */ 905 err = ath5k_hw_stop_rx_dma(ah); 906 if (err) 907 return err; 908 909 /* Clear any pending interrupts 910 * and disable tx dma */ 911 if (ah->ah_version != AR5K_AR5210) { 912 ath5k_hw_reg_write(ah, 0xffffffff, AR5K_PISR); 913 qmax = AR5K_NUM_TX_QUEUES; 914 } else { 915 /* PISR/SISR Not available on 5210 */ 916 ath5k_hw_reg_read(ah, AR5K_ISR); 917 qmax = AR5K_NUM_TX_QUEUES_NOQCU; 918 } 919 920 for (i = 0; i < qmax; i++) { 921 err = ath5k_hw_stop_tx_dma(ah, i); 922 /* -EINVAL -> queue inactive */ 923 if (err && err != -EINVAL) 924 return err; 925 } 926 927 return 0; 928} 929