root/drivers/net/wireless/ath/ath9k/mac.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. ath9k_hw_set_txq_interrupts
  2. ath9k_hw_gettxbuf
  3. ath9k_hw_puttxbuf
  4. ath9k_hw_txstart
  5. ath9k_hw_numtxpending
  6. ath9k_hw_updatetxtriglevel
  7. ath9k_hw_abort_tx_dma
  8. ath9k_hw_stop_dma_queue
  9. ath9k_hw_set_txq_props
  10. ath9k_hw_get_txq_props
  11. ath9k_hw_setuptxqueue
  12. ath9k_hw_clear_queue_interrupts
  13. ath9k_hw_releasetxqueue
  14. ath9k_hw_resettxqueue
  15. ath9k_hw_rxprocdesc
  16. ath9k_hw_setrxabort
  17. ath9k_hw_putrxbuf
  18. ath9k_hw_startpcureceive
  19. ath9k_hw_abortpcurecv
  20. ath9k_hw_stopdmarecv
  21. ath9k_hw_beaconq_setup
  22. ath9k_hw_intrpend
  23. ath9k_hw_kill_interrupts
  24. ath9k_hw_disable_interrupts
  25. __ath9k_hw_enable_interrupts
  26. ath9k_hw_resume_interrupts
  27. ath9k_hw_enable_interrupts
  28. ath9k_hw_set_interrupts
  29. ath9k_hw_set_tx_filter

   1 /*
   2  * Copyright (c) 2008-2011 Atheros Communications Inc.
   3  *
   4  * Permission to use, copy, modify, and/or distribute this software for any
   5  * purpose with or without fee is hereby granted, provided that the above
   6  * copyright notice and this permission notice appear in all copies.
   7  *
   8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
   9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15  */
  16 
  17 #include "hw.h"
  18 #include "hw-ops.h"
  19 #include <linux/export.h>
  20 
  21 static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
  22                                         struct ath9k_tx_queue_info *qi)
  23 {
  24         ath_dbg(ath9k_hw_common(ah), INTERRUPT,
  25                 "tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
  26                 ah->txok_interrupt_mask, ah->txerr_interrupt_mask,
  27                 ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask,
  28                 ah->txurn_interrupt_mask);
  29 
  30         ENABLE_REGWRITE_BUFFER(ah);
  31 
  32         REG_WRITE(ah, AR_IMR_S0,
  33                   SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK)
  34                   | SM(ah->txdesc_interrupt_mask, AR_IMR_S0_QCU_TXDESC));
  35         REG_WRITE(ah, AR_IMR_S1,
  36                   SM(ah->txerr_interrupt_mask, AR_IMR_S1_QCU_TXERR)
  37                   | SM(ah->txeol_interrupt_mask, AR_IMR_S1_QCU_TXEOL));
  38 
  39         ah->imrs2_reg &= ~AR_IMR_S2_QCU_TXURN;
  40         ah->imrs2_reg |= (ah->txurn_interrupt_mask & AR_IMR_S2_QCU_TXURN);
  41         REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
  42 
  43         REGWRITE_BUFFER_FLUSH(ah);
  44 }
  45 
  46 u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q)
  47 {
  48         return REG_READ(ah, AR_QTXDP(q));
  49 }
  50 EXPORT_SYMBOL(ath9k_hw_gettxbuf);
  51 
  52 void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp)
  53 {
  54         REG_WRITE(ah, AR_QTXDP(q), txdp);
  55 }
  56 EXPORT_SYMBOL(ath9k_hw_puttxbuf);
  57 
  58 void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
  59 {
  60         ath_dbg(ath9k_hw_common(ah), QUEUE, "Enable TXE on queue: %u\n", q);
  61         REG_WRITE(ah, AR_Q_TXE, 1 << q);
  62 }
  63 EXPORT_SYMBOL(ath9k_hw_txstart);
  64 
  65 u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
  66 {
  67         u32 npend;
  68 
  69         npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT;
  70         if (npend == 0) {
  71 
  72                 if (REG_READ(ah, AR_Q_TXE) & (1 << q))
  73                         npend = 1;
  74         }
  75 
  76         return npend;
  77 }
  78 EXPORT_SYMBOL(ath9k_hw_numtxpending);
  79 
  80 /**
  81  * ath9k_hw_updatetxtriglevel - adjusts the frame trigger level
  82  *
  83  * @ah: atheros hardware struct
  84  * @bIncTrigLevel: whether or not the frame trigger level should be updated
  85  *
  86  * The frame trigger level specifies the minimum number of bytes,
  87  * in units of 64 bytes, that must be DMA'ed into the PCU TX FIFO
  88  * before the PCU will initiate sending the frame on the air. This can
  89  * mean we initiate transmit before a full frame is on the PCU TX FIFO.
  90  * Resets to 0x1 (meaning 64 bytes or a full frame, whichever occurs
  91  * first)
  92  *
  93  * Caution must be taken to ensure to set the frame trigger level based
  94  * on the DMA request size. For example if the DMA request size is set to
  95  * 128 bytes the trigger level cannot exceed 6 * 64 = 384. This is because
  96  * there need to be enough space in the tx FIFO for the requested transfer
  97  * size. Hence the tx FIFO will stop with 512 - 128 = 384 bytes. If we set
  98  * the threshold to a value beyond 6, then the transmit will hang.
  99  *
 100  * Current dual   stream devices have a PCU TX FIFO size of 8 KB.
 101  * Current single stream devices have a PCU TX FIFO size of 4 KB, however,
 102  * there is a hardware issue which forces us to use 2 KB instead so the
 103  * frame trigger level must not exceed 2 KB for these chipsets.
 104  */
 105 bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
 106 {
 107         u32 txcfg, curLevel, newLevel;
 108 
 109         if (ah->tx_trig_level >= ah->config.max_txtrig_level)
 110                 return false;
 111 
 112         ath9k_hw_disable_interrupts(ah);
 113 
 114         txcfg = REG_READ(ah, AR_TXCFG);
 115         curLevel = MS(txcfg, AR_FTRIG);
 116         newLevel = curLevel;
 117         if (bIncTrigLevel) {
 118                 if (curLevel < ah->config.max_txtrig_level)
 119                         newLevel++;
 120         } else if (curLevel > MIN_TX_FIFO_THRESHOLD)
 121                 newLevel--;
 122         if (newLevel != curLevel)
 123                 REG_WRITE(ah, AR_TXCFG,
 124                           (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG));
 125 
 126         ath9k_hw_enable_interrupts(ah);
 127 
 128         ah->tx_trig_level = newLevel;
 129 
 130         return newLevel != curLevel;
 131 }
 132 EXPORT_SYMBOL(ath9k_hw_updatetxtriglevel);
 133 
 134 void ath9k_hw_abort_tx_dma(struct ath_hw *ah)
 135 {
 136         int maxdelay = 1000;
 137         int i, q;
 138 
 139         if (ah->curchan) {
 140                 if (IS_CHAN_HALF_RATE(ah->curchan))
 141                         maxdelay *= 2;
 142                 else if (IS_CHAN_QUARTER_RATE(ah->curchan))
 143                         maxdelay *= 4;
 144         }
 145 
 146         REG_WRITE(ah, AR_Q_TXD, AR_Q_TXD_M);
 147 
 148         REG_SET_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF);
 149         REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
 150         REG_SET_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF);
 151 
 152         for (q = 0; q < AR_NUM_QCU; q++) {
 153                 for (i = 0; i < maxdelay; i++) {
 154                         if (i)
 155                                 udelay(5);
 156 
 157                         if (!ath9k_hw_numtxpending(ah, q))
 158                                 break;
 159                 }
 160         }
 161 
 162         REG_CLR_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF);
 163         REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
 164         REG_CLR_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF);
 165 
 166         REG_WRITE(ah, AR_Q_TXD, 0);
 167 }
 168 EXPORT_SYMBOL(ath9k_hw_abort_tx_dma);
 169 
 170 bool ath9k_hw_stop_dma_queue(struct ath_hw *ah, u32 q)
 171 {
 172 #define ATH9K_TX_STOP_DMA_TIMEOUT       1000    /* usec */
 173 #define ATH9K_TIME_QUANTUM              100     /* usec */
 174         int wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM;
 175         int wait;
 176 
 177         REG_WRITE(ah, AR_Q_TXD, 1 << q);
 178 
 179         for (wait = wait_time; wait != 0; wait--) {
 180                 if (wait != wait_time)
 181                         udelay(ATH9K_TIME_QUANTUM);
 182 
 183                 if (ath9k_hw_numtxpending(ah, q) == 0)
 184                         break;
 185         }
 186 
 187         REG_WRITE(ah, AR_Q_TXD, 0);
 188 
 189         return wait != 0;
 190 
 191 #undef ATH9K_TX_STOP_DMA_TIMEOUT
 192 #undef ATH9K_TIME_QUANTUM
 193 }
 194 EXPORT_SYMBOL(ath9k_hw_stop_dma_queue);
 195 
 196 bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
 197                             const struct ath9k_tx_queue_info *qinfo)
 198 {
 199         u32 cw;
 200         struct ath_common *common = ath9k_hw_common(ah);
 201         struct ath9k_tx_queue_info *qi;
 202 
 203         qi = &ah->txq[q];
 204         if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
 205                 ath_dbg(common, QUEUE,
 206                         "Set TXQ properties, inactive queue: %u\n", q);
 207                 return false;
 208         }
 209 
 210         ath_dbg(common, QUEUE, "Set queue properties for: %u\n", q);
 211 
 212         qi->tqi_ver = qinfo->tqi_ver;
 213         qi->tqi_subtype = qinfo->tqi_subtype;
 214         qi->tqi_qflags = qinfo->tqi_qflags;
 215         qi->tqi_priority = qinfo->tqi_priority;
 216         if (qinfo->tqi_aifs != ATH9K_TXQ_USEDEFAULT)
 217                 qi->tqi_aifs = min(qinfo->tqi_aifs, 255U);
 218         else
 219                 qi->tqi_aifs = INIT_AIFS;
 220         if (qinfo->tqi_cwmin != ATH9K_TXQ_USEDEFAULT) {
 221                 cw = min(qinfo->tqi_cwmin, 1024U);
 222                 qi->tqi_cwmin = 1;
 223                 while (qi->tqi_cwmin < cw)
 224                         qi->tqi_cwmin = (qi->tqi_cwmin << 1) | 1;
 225         } else
 226                 qi->tqi_cwmin = qinfo->tqi_cwmin;
 227         if (qinfo->tqi_cwmax != ATH9K_TXQ_USEDEFAULT) {
 228                 cw = min(qinfo->tqi_cwmax, 1024U);
 229                 qi->tqi_cwmax = 1;
 230                 while (qi->tqi_cwmax < cw)
 231                         qi->tqi_cwmax = (qi->tqi_cwmax << 1) | 1;
 232         } else
 233                 qi->tqi_cwmax = INIT_CWMAX;
 234 
 235         if (qinfo->tqi_shretry != 0)
 236                 qi->tqi_shretry = min((u32) qinfo->tqi_shretry, 15U);
 237         else
 238                 qi->tqi_shretry = INIT_SH_RETRY;
 239         if (qinfo->tqi_lgretry != 0)
 240                 qi->tqi_lgretry = min((u32) qinfo->tqi_lgretry, 15U);
 241         else
 242                 qi->tqi_lgretry = INIT_LG_RETRY;
 243         qi->tqi_cbrPeriod = qinfo->tqi_cbrPeriod;
 244         qi->tqi_cbrOverflowLimit = qinfo->tqi_cbrOverflowLimit;
 245         qi->tqi_burstTime = qinfo->tqi_burstTime;
 246         qi->tqi_readyTime = qinfo->tqi_readyTime;
 247 
 248         switch (qinfo->tqi_subtype) {
 249         case ATH9K_WME_UPSD:
 250                 if (qi->tqi_type == ATH9K_TX_QUEUE_DATA)
 251                         qi->tqi_intFlags = ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS;
 252                 break;
 253         default:
 254                 break;
 255         }
 256 
 257         return true;
 258 }
 259 EXPORT_SYMBOL(ath9k_hw_set_txq_props);
 260 
 261 bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
 262                             struct ath9k_tx_queue_info *qinfo)
 263 {
 264         struct ath_common *common = ath9k_hw_common(ah);
 265         struct ath9k_tx_queue_info *qi;
 266 
 267         qi = &ah->txq[q];
 268         if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
 269                 ath_dbg(common, QUEUE,
 270                         "Get TXQ properties, inactive queue: %u\n", q);
 271                 return false;
 272         }
 273 
 274         qinfo->tqi_qflags = qi->tqi_qflags;
 275         qinfo->tqi_ver = qi->tqi_ver;
 276         qinfo->tqi_subtype = qi->tqi_subtype;
 277         qinfo->tqi_qflags = qi->tqi_qflags;
 278         qinfo->tqi_priority = qi->tqi_priority;
 279         qinfo->tqi_aifs = qi->tqi_aifs;
 280         qinfo->tqi_cwmin = qi->tqi_cwmin;
 281         qinfo->tqi_cwmax = qi->tqi_cwmax;
 282         qinfo->tqi_shretry = qi->tqi_shretry;
 283         qinfo->tqi_lgretry = qi->tqi_lgretry;
 284         qinfo->tqi_cbrPeriod = qi->tqi_cbrPeriod;
 285         qinfo->tqi_cbrOverflowLimit = qi->tqi_cbrOverflowLimit;
 286         qinfo->tqi_burstTime = qi->tqi_burstTime;
 287         qinfo->tqi_readyTime = qi->tqi_readyTime;
 288 
 289         return true;
 290 }
 291 EXPORT_SYMBOL(ath9k_hw_get_txq_props);
 292 
 293 int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
 294                           const struct ath9k_tx_queue_info *qinfo)
 295 {
 296         struct ath_common *common = ath9k_hw_common(ah);
 297         struct ath9k_tx_queue_info *qi;
 298         int q;
 299 
 300         switch (type) {
 301         case ATH9K_TX_QUEUE_BEACON:
 302                 q = ATH9K_NUM_TX_QUEUES - 1;
 303                 break;
 304         case ATH9K_TX_QUEUE_CAB:
 305                 q = ATH9K_NUM_TX_QUEUES - 2;
 306                 break;
 307         case ATH9K_TX_QUEUE_PSPOLL:
 308                 q = 1;
 309                 break;
 310         case ATH9K_TX_QUEUE_UAPSD:
 311                 q = ATH9K_NUM_TX_QUEUES - 3;
 312                 break;
 313         case ATH9K_TX_QUEUE_DATA:
 314                 q = qinfo->tqi_subtype;
 315                 break;
 316         default:
 317                 ath_err(common, "Invalid TX queue type: %u\n", type);
 318                 return -1;
 319         }
 320 
 321         ath_dbg(common, QUEUE, "Setup TX queue: %u\n", q);
 322 
 323         qi = &ah->txq[q];
 324         if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
 325                 ath_err(common, "TX queue: %u already active\n", q);
 326                 return -1;
 327         }
 328         memset(qi, 0, sizeof(struct ath9k_tx_queue_info));
 329         qi->tqi_type = type;
 330         qi->tqi_physCompBuf = qinfo->tqi_physCompBuf;
 331         (void) ath9k_hw_set_txq_props(ah, q, qinfo);
 332 
 333         return q;
 334 }
 335 EXPORT_SYMBOL(ath9k_hw_setuptxqueue);
 336 
 337 static void ath9k_hw_clear_queue_interrupts(struct ath_hw *ah, u32 q)
 338 {
 339         ah->txok_interrupt_mask &= ~(1 << q);
 340         ah->txerr_interrupt_mask &= ~(1 << q);
 341         ah->txdesc_interrupt_mask &= ~(1 << q);
 342         ah->txeol_interrupt_mask &= ~(1 << q);
 343         ah->txurn_interrupt_mask &= ~(1 << q);
 344 }
 345 
 346 bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
 347 {
 348         struct ath_common *common = ath9k_hw_common(ah);
 349         struct ath9k_tx_queue_info *qi;
 350 
 351         qi = &ah->txq[q];
 352         if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
 353                 ath_dbg(common, QUEUE, "Release TXQ, inactive queue: %u\n", q);
 354                 return false;
 355         }
 356 
 357         ath_dbg(common, QUEUE, "Release TX queue: %u\n", q);
 358 
 359         qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
 360         ath9k_hw_clear_queue_interrupts(ah, q);
 361         ath9k_hw_set_txq_interrupts(ah, qi);
 362 
 363         return true;
 364 }
 365 EXPORT_SYMBOL(ath9k_hw_releasetxqueue);
 366 
 367 bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
 368 {
 369         struct ath_common *common = ath9k_hw_common(ah);
 370         struct ath9k_tx_queue_info *qi;
 371         u32 cwMin, chanCwMin, value;
 372 
 373         qi = &ah->txq[q];
 374         if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
 375                 ath_dbg(common, QUEUE, "Reset TXQ, inactive queue: %u\n", q);
 376                 return true;
 377         }
 378 
 379         ath_dbg(common, QUEUE, "Reset TX queue: %u\n", q);
 380 
 381         if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
 382                 chanCwMin = INIT_CWMIN;
 383 
 384                 for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1);
 385         } else
 386                 cwMin = qi->tqi_cwmin;
 387 
 388         ENABLE_REGWRITE_BUFFER(ah);
 389 
 390         REG_WRITE(ah, AR_DLCL_IFS(q),
 391                   SM(cwMin, AR_D_LCL_IFS_CWMIN) |
 392                   SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) |
 393                   SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
 394 
 395         REG_WRITE(ah, AR_DRETRY_LIMIT(q),
 396                   SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH) |
 397                   SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG) |
 398                   SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH));
 399 
 400         REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ);
 401 
 402         if (AR_SREV_9340(ah) && !AR_SREV_9340_13_OR_LATER(ah))
 403                 REG_WRITE(ah, AR_DMISC(q),
 404                           AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x1);
 405         else
 406                 REG_WRITE(ah, AR_DMISC(q),
 407                           AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2);
 408 
 409         if (qi->tqi_cbrPeriod) {
 410                 REG_WRITE(ah, AR_QCBRCFG(q),
 411                           SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) |
 412                           SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_OVF_THRESH));
 413                 REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_FSP_CBR |
 414                             (qi->tqi_cbrOverflowLimit ?
 415                              AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0));
 416         }
 417         if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) {
 418                 REG_WRITE(ah, AR_QRDYTIMECFG(q),
 419                           SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) |
 420                           AR_Q_RDYTIMECFG_EN);
 421         }
 422 
 423         REG_WRITE(ah, AR_DCHNTIME(q),
 424                   SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) |
 425                   (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
 426 
 427         if (qi->tqi_burstTime
 428             && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE))
 429                 REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_RDYTIME_EXP_POLICY);
 430 
 431         if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE)
 432                 REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_POST_FR_BKOFF_DIS);
 433 
 434         REGWRITE_BUFFER_FLUSH(ah);
 435 
 436         if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
 437                 REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_FRAG_BKOFF_EN);
 438 
 439         switch (qi->tqi_type) {
 440         case ATH9K_TX_QUEUE_BEACON:
 441                 ENABLE_REGWRITE_BUFFER(ah);
 442 
 443                 REG_SET_BIT(ah, AR_QMISC(q),
 444                             AR_Q_MISC_FSP_DBA_GATED
 445                             | AR_Q_MISC_BEACON_USE
 446                             | AR_Q_MISC_CBR_INCR_DIS1);
 447 
 448                 REG_SET_BIT(ah, AR_DMISC(q),
 449                             (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
 450                              AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
 451                             | AR_D_MISC_BEACON_USE
 452                             | AR_D_MISC_POST_FR_BKOFF_DIS);
 453 
 454                 REGWRITE_BUFFER_FLUSH(ah);
 455 
 456                 /*
 457                  * cwmin and cwmax should be 0 for beacon queue
 458                  * but not for IBSS as we would create an imbalance
 459                  * on beaconing fairness for participating nodes.
 460                  */
 461                 if (AR_SREV_9300_20_OR_LATER(ah) &&
 462                     ah->opmode != NL80211_IFTYPE_ADHOC) {
 463                         REG_WRITE(ah, AR_DLCL_IFS(q), SM(0, AR_D_LCL_IFS_CWMIN)
 464                                   | SM(0, AR_D_LCL_IFS_CWMAX)
 465                                   | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
 466                 }
 467                 break;
 468         case ATH9K_TX_QUEUE_CAB:
 469                 ENABLE_REGWRITE_BUFFER(ah);
 470 
 471                 REG_SET_BIT(ah, AR_QMISC(q),
 472                             AR_Q_MISC_FSP_DBA_GATED
 473                             | AR_Q_MISC_CBR_INCR_DIS1
 474                             | AR_Q_MISC_CBR_INCR_DIS0);
 475                 value = (qi->tqi_readyTime -
 476                          (ah->config.sw_beacon_response_time -
 477                           ah->config.dma_beacon_response_time)) * 1024;
 478                 REG_WRITE(ah, AR_QRDYTIMECFG(q),
 479                           value | AR_Q_RDYTIMECFG_EN);
 480                 REG_SET_BIT(ah, AR_DMISC(q),
 481                             (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
 482                              AR_D_MISC_ARB_LOCKOUT_CNTRL_S));
 483 
 484                 REGWRITE_BUFFER_FLUSH(ah);
 485 
 486                 break;
 487         case ATH9K_TX_QUEUE_PSPOLL:
 488                 REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_CBR_INCR_DIS1);
 489                 break;
 490         case ATH9K_TX_QUEUE_UAPSD:
 491                 REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_POST_FR_BKOFF_DIS);
 492                 break;
 493         default:
 494                 break;
 495         }
 496 
 497         if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) {
 498                 REG_SET_BIT(ah, AR_DMISC(q),
 499                             SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
 500                                AR_D_MISC_ARB_LOCKOUT_CNTRL) |
 501                             AR_D_MISC_POST_FR_BKOFF_DIS);
 502         }
 503 
 504         if (AR_SREV_9300_20_OR_LATER(ah))
 505                 REG_WRITE(ah, AR_Q_DESC_CRCCHK, AR_Q_DESC_CRCCHK_EN);
 506 
 507         ath9k_hw_clear_queue_interrupts(ah, q);
 508         if (qi->tqi_qflags & TXQ_FLAG_TXINT_ENABLE) {
 509                 ah->txok_interrupt_mask |= 1 << q;
 510                 ah->txerr_interrupt_mask |= 1 << q;
 511         }
 512         if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE)
 513                 ah->txdesc_interrupt_mask |= 1 << q;
 514         if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE)
 515                 ah->txeol_interrupt_mask |= 1 << q;
 516         if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE)
 517                 ah->txurn_interrupt_mask |= 1 << q;
 518         ath9k_hw_set_txq_interrupts(ah, qi);
 519 
 520         return true;
 521 }
 522 EXPORT_SYMBOL(ath9k_hw_resettxqueue);
 523 
 524 int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
 525                         struct ath_rx_status *rs)
 526 {
 527         struct ar5416_desc ads;
 528         struct ar5416_desc *adsp = AR5416DESC(ds);
 529         u32 phyerr;
 530 
 531         if ((adsp->ds_rxstatus8 & AR_RxDone) == 0)
 532                 return -EINPROGRESS;
 533 
 534         ads.u.rx = adsp->u.rx;
 535 
 536         rs->rs_status = 0;
 537         rs->rs_flags = 0;
 538         rs->enc_flags = 0;
 539         rs->bw = RATE_INFO_BW_20;
 540 
 541         rs->rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
 542         rs->rs_tstamp = ads.AR_RcvTimestamp;
 543 
 544         if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) {
 545                 rs->rs_rssi = ATH9K_RSSI_BAD;
 546                 rs->rs_rssi_ctl[0] = ATH9K_RSSI_BAD;
 547                 rs->rs_rssi_ctl[1] = ATH9K_RSSI_BAD;
 548                 rs->rs_rssi_ctl[2] = ATH9K_RSSI_BAD;
 549                 rs->rs_rssi_ext[0] = ATH9K_RSSI_BAD;
 550                 rs->rs_rssi_ext[1] = ATH9K_RSSI_BAD;
 551                 rs->rs_rssi_ext[2] = ATH9K_RSSI_BAD;
 552         } else {
 553                 rs->rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
 554                 rs->rs_rssi_ctl[0] = MS(ads.ds_rxstatus0,
 555                                                 AR_RxRSSIAnt00);
 556                 rs->rs_rssi_ctl[1] = MS(ads.ds_rxstatus0,
 557                                                 AR_RxRSSIAnt01);
 558                 rs->rs_rssi_ctl[2] = MS(ads.ds_rxstatus0,
 559                                                 AR_RxRSSIAnt02);
 560                 rs->rs_rssi_ext[0] = MS(ads.ds_rxstatus4,
 561                                                 AR_RxRSSIAnt10);
 562                 rs->rs_rssi_ext[1] = MS(ads.ds_rxstatus4,
 563                                                 AR_RxRSSIAnt11);
 564                 rs->rs_rssi_ext[2] = MS(ads.ds_rxstatus4,
 565                                                 AR_RxRSSIAnt12);
 566         }
 567         if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
 568                 rs->rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
 569         else
 570                 rs->rs_keyix = ATH9K_RXKEYIX_INVALID;
 571 
 572         rs->rs_rate = MS(ads.ds_rxstatus0, AR_RxRate);
 573         rs->rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
 574 
 575         rs->rs_firstaggr = (ads.ds_rxstatus8 & AR_RxFirstAggr) ? 1 : 0;
 576         rs->rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
 577         rs->rs_moreaggr = (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
 578         rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
 579 
 580         /* directly mapped flags for ieee80211_rx_status */
 581         rs->enc_flags |=
 582                 (ads.ds_rxstatus3 & AR_GI) ? RX_ENC_FLAG_SHORT_GI : 0;
 583         rs->bw = (ads.ds_rxstatus3 & AR_2040) ? RATE_INFO_BW_40 :
 584                                                 RATE_INFO_BW_20;
 585         if (AR_SREV_9280_20_OR_LATER(ah))
 586                 rs->enc_flags |=
 587                         (ads.ds_rxstatus3 & AR_STBC) ?
 588                                 /* we can only Nss=1 STBC */
 589                                 (1 << RX_ENC_FLAG_STBC_SHIFT) : 0;
 590 
 591         if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
 592                 rs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
 593         if (ads.ds_rxstatus8 & AR_PostDelimCRCErr)
 594                 rs->rs_flags |= ATH9K_RX_DELIM_CRC_POST;
 595         if (ads.ds_rxstatus8 & AR_DecryptBusyErr)
 596                 rs->rs_flags |= ATH9K_RX_DECRYPT_BUSY;
 597 
 598         if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
 599                 /*
 600                  * Treat these errors as mutually exclusive to avoid spurious
 601                  * extra error reports from the hardware. If a CRC error is
 602                  * reported, then decryption and MIC errors are irrelevant,
 603                  * the frame is going to be dropped either way
 604                  */
 605                 if (ads.ds_rxstatus8 & AR_PHYErr) {
 606                         rs->rs_status |= ATH9K_RXERR_PHY;
 607                         phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
 608                         rs->rs_phyerr = phyerr;
 609                 } else if (ads.ds_rxstatus8 & AR_CRCErr)
 610                         rs->rs_status |= ATH9K_RXERR_CRC;
 611                 else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
 612                         rs->rs_status |= ATH9K_RXERR_DECRYPT;
 613                 else if (ads.ds_rxstatus8 & AR_MichaelErr)
 614                         rs->rs_status |= ATH9K_RXERR_MIC;
 615         } else {
 616                 if (ads.ds_rxstatus8 &
 617                     (AR_CRCErr | AR_PHYErr | AR_DecryptCRCErr | AR_MichaelErr))
 618                         rs->rs_status |= ATH9K_RXERR_CORRUPT_DESC;
 619 
 620                 /* Only up to MCS16 supported, everything above is invalid */
 621                 if (rs->rs_rate >= 0x90)
 622                         rs->rs_status |= ATH9K_RXERR_CORRUPT_DESC;
 623         }
 624 
 625         if (ads.ds_rxstatus8 & AR_KeyMiss)
 626                 rs->rs_status |= ATH9K_RXERR_KEYMISS;
 627 
 628         return 0;
 629 }
 630 EXPORT_SYMBOL(ath9k_hw_rxprocdesc);
 631 
 632 /*
 633  * This can stop or re-enables RX.
 634  *
 635  * If bool is set this will kill any frame which is currently being
 636  * transferred between the MAC and baseband and also prevent any new
 637  * frames from getting started.
 638  */
 639 bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set)
 640 {
 641         u32 reg;
 642 
 643         if (set) {
 644                 REG_SET_BIT(ah, AR_DIAG_SW,
 645                             (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
 646 
 647                 if (!ath9k_hw_wait(ah, AR_OBS_BUS_1, AR_OBS_BUS_1_RX_STATE,
 648                                    0, AH_WAIT_TIMEOUT)) {
 649                         REG_CLR_BIT(ah, AR_DIAG_SW,
 650                                     (AR_DIAG_RX_DIS |
 651                                      AR_DIAG_RX_ABORT));
 652 
 653                         reg = REG_READ(ah, AR_OBS_BUS_1);
 654                         ath_err(ath9k_hw_common(ah),
 655                                 "RX failed to go idle in 10 ms RXSM=0x%x\n",
 656                                 reg);
 657 
 658                         return false;
 659                 }
 660         } else {
 661                 REG_CLR_BIT(ah, AR_DIAG_SW,
 662                             (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
 663         }
 664 
 665         return true;
 666 }
 667 EXPORT_SYMBOL(ath9k_hw_setrxabort);
 668 
 669 void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp)
 670 {
 671         REG_WRITE(ah, AR_RXDP, rxdp);
 672 }
 673 EXPORT_SYMBOL(ath9k_hw_putrxbuf);
 674 
 675 void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning)
 676 {
 677         ath9k_enable_mib_counters(ah);
 678 
 679         ath9k_ani_reset(ah, is_scanning);
 680 
 681         REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
 682 }
 683 EXPORT_SYMBOL(ath9k_hw_startpcureceive);
 684 
 685 void ath9k_hw_abortpcurecv(struct ath_hw *ah)
 686 {
 687         REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_ABORT | AR_DIAG_RX_DIS);
 688 
 689         ath9k_hw_disable_mib_counters(ah);
 690 }
 691 EXPORT_SYMBOL(ath9k_hw_abortpcurecv);
 692 
 693 bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset)
 694 {
 695 #define AH_RX_STOP_DMA_TIMEOUT 10000   /* usec */
 696         struct ath_common *common = ath9k_hw_common(ah);
 697         u32 mac_status, last_mac_status = 0;
 698         int i;
 699 
 700         /* Enable access to the DMA observation bus */
 701         REG_WRITE(ah, AR_MACMISC,
 702                   ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) |
 703                    (AR_MACMISC_MISC_OBS_BUS_1 <<
 704                     AR_MACMISC_MISC_OBS_BUS_MSB_S)));
 705 
 706         REG_WRITE(ah, AR_CR, AR_CR_RXD);
 707 
 708         /* Wait for rx enable bit to go low */
 709         for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) {
 710                 if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0)
 711                         break;
 712 
 713                 if (!AR_SREV_9300_20_OR_LATER(ah)) {
 714                         mac_status = REG_READ(ah, AR_DMADBG_7) & 0x7f0;
 715                         if (mac_status == 0x1c0 && mac_status == last_mac_status) {
 716                                 *reset = true;
 717                                 break;
 718                         }
 719 
 720                         last_mac_status = mac_status;
 721                 }
 722 
 723                 udelay(AH_TIME_QUANTUM);
 724         }
 725 
 726         if (i == 0) {
 727                 ath_err(common,
 728                         "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x DMADBG_7=0x%08x\n",
 729                         AH_RX_STOP_DMA_TIMEOUT / 1000,
 730                         REG_READ(ah, AR_CR),
 731                         REG_READ(ah, AR_DIAG_SW),
 732                         REG_READ(ah, AR_DMADBG_7));
 733                 return false;
 734         } else {
 735                 return true;
 736         }
 737 
 738 #undef AH_RX_STOP_DMA_TIMEOUT
 739 }
 740 EXPORT_SYMBOL(ath9k_hw_stopdmarecv);
 741 
 742 int ath9k_hw_beaconq_setup(struct ath_hw *ah)
 743 {
 744         struct ath9k_tx_queue_info qi;
 745 
 746         memset(&qi, 0, sizeof(qi));
 747         qi.tqi_aifs = 1;
 748         qi.tqi_cwmin = 0;
 749         qi.tqi_cwmax = 0;
 750 
 751         if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
 752                 qi.tqi_qflags = TXQ_FLAG_TXINT_ENABLE;
 753 
 754         return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi);
 755 }
 756 EXPORT_SYMBOL(ath9k_hw_beaconq_setup);
 757 
 758 bool ath9k_hw_intrpend(struct ath_hw *ah)
 759 {
 760         u32 host_isr;
 761 
 762         if (AR_SREV_9100(ah))
 763                 return true;
 764 
 765         host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
 766 
 767         if (((host_isr & AR_INTR_MAC_IRQ) ||
 768              (host_isr & AR_INTR_ASYNC_MASK_MCI)) &&
 769             (host_isr != AR_INTR_SPURIOUS))
 770                 return true;
 771 
 772         host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
 773         if ((host_isr & AR_INTR_SYNC_DEFAULT)
 774             && (host_isr != AR_INTR_SPURIOUS))
 775                 return true;
 776 
 777         return false;
 778 }
 779 EXPORT_SYMBOL(ath9k_hw_intrpend);
 780 
 781 void ath9k_hw_kill_interrupts(struct ath_hw *ah)
 782 {
 783         struct ath_common *common = ath9k_hw_common(ah);
 784 
 785         ath_dbg(common, INTERRUPT, "disable IER\n");
 786         REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
 787         (void) REG_READ(ah, AR_IER);
 788         if (!AR_SREV_9100(ah)) {
 789                 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
 790                 (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);
 791 
 792                 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
 793                 (void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
 794         }
 795 }
 796 EXPORT_SYMBOL(ath9k_hw_kill_interrupts);
 797 
 798 void ath9k_hw_disable_interrupts(struct ath_hw *ah)
 799 {
 800         if (!(ah->imask & ATH9K_INT_GLOBAL))
 801                 atomic_set(&ah->intr_ref_cnt, -1);
 802         else
 803                 atomic_dec(&ah->intr_ref_cnt);
 804 
 805         ath9k_hw_kill_interrupts(ah);
 806 }
 807 EXPORT_SYMBOL(ath9k_hw_disable_interrupts);
 808 
 809 static void __ath9k_hw_enable_interrupts(struct ath_hw *ah)
 810 {
 811         struct ath_common *common = ath9k_hw_common(ah);
 812         u32 sync_default = AR_INTR_SYNC_DEFAULT;
 813         u32 async_mask;
 814 
 815         if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah) ||
 816             AR_SREV_9561(ah))
 817                 sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
 818 
 819         async_mask = AR_INTR_MAC_IRQ;
 820 
 821         if (ah->imask & ATH9K_INT_MCI)
 822                 async_mask |= AR_INTR_ASYNC_MASK_MCI;
 823 
 824         ath_dbg(common, INTERRUPT, "enable IER\n");
 825         REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
 826         if (!AR_SREV_9100(ah)) {
 827                 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, async_mask);
 828                 REG_WRITE(ah, AR_INTR_ASYNC_MASK, async_mask);
 829 
 830                 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default);
 831                 REG_WRITE(ah, AR_INTR_SYNC_MASK, sync_default);
 832         }
 833         ath_dbg(common, INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
 834                 REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
 835 
 836         if (ah->msi_enabled) {
 837                 u32 _msi_reg = 0;
 838                 u32 i = 0;
 839                 u32 msi_pend_addr_mask = AR_PCIE_MSI_HW_INT_PENDING_ADDR_MSI_64;
 840 
 841                 ath_dbg(ath9k_hw_common(ah), INTERRUPT,
 842                         "Enabling MSI, msi_mask=0x%X\n", ah->msi_mask);
 843 
 844                 REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE, ah->msi_mask);
 845                 REG_WRITE(ah, AR_INTR_PRIO_ASYNC_MASK, ah->msi_mask);
 846                 ath_dbg(ath9k_hw_common(ah), INTERRUPT,
 847                         "AR_INTR_PRIO_ASYNC_ENABLE=0x%X, AR_INTR_PRIO_ASYNC_MASK=0x%X\n",
 848                         REG_READ(ah, AR_INTR_PRIO_ASYNC_ENABLE),
 849                         REG_READ(ah, AR_INTR_PRIO_ASYNC_MASK));
 850 
 851                 if (ah->msi_reg == 0)
 852                         ah->msi_reg = REG_READ(ah, AR_PCIE_MSI);
 853 
 854                 ath_dbg(ath9k_hw_common(ah), INTERRUPT,
 855                         "AR_PCIE_MSI=0x%X, ah->msi_reg = 0x%X\n",
 856                         AR_PCIE_MSI, ah->msi_reg);
 857 
 858                 i = 0;
 859                 do {
 860                         REG_WRITE(ah, AR_PCIE_MSI,
 861                                   (ah->msi_reg | AR_PCIE_MSI_ENABLE)
 862                                   & msi_pend_addr_mask);
 863                         _msi_reg = REG_READ(ah, AR_PCIE_MSI);
 864                         i++;
 865                 } while ((_msi_reg & AR_PCIE_MSI_ENABLE) == 0 && i < 200);
 866 
 867                 if (i >= 200)
 868                         ath_err(ath9k_hw_common(ah),
 869                                 "%s: _msi_reg = 0x%X\n",
 870                                 __func__, _msi_reg);
 871         }
 872 }
 873 
 874 void ath9k_hw_resume_interrupts(struct ath_hw *ah)
 875 {
 876         struct ath_common *common = ath9k_hw_common(ah);
 877 
 878         if (!(ah->imask & ATH9K_INT_GLOBAL))
 879                 return;
 880 
 881         if (atomic_read(&ah->intr_ref_cnt) != 0) {
 882                 ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n",
 883                         atomic_read(&ah->intr_ref_cnt));
 884                 return;
 885         }
 886 
 887         __ath9k_hw_enable_interrupts(ah);
 888 }
 889 EXPORT_SYMBOL(ath9k_hw_resume_interrupts);
 890 
 891 void ath9k_hw_enable_interrupts(struct ath_hw *ah)
 892 {
 893         struct ath_common *common = ath9k_hw_common(ah);
 894 
 895         if (!(ah->imask & ATH9K_INT_GLOBAL))
 896                 return;
 897 
 898         if (!atomic_inc_and_test(&ah->intr_ref_cnt)) {
 899                 ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n",
 900                         atomic_read(&ah->intr_ref_cnt));
 901                 return;
 902         }
 903 
 904         __ath9k_hw_enable_interrupts(ah);
 905 }
 906 EXPORT_SYMBOL(ath9k_hw_enable_interrupts);
 907 
 908 void ath9k_hw_set_interrupts(struct ath_hw *ah)
 909 {
 910         enum ath9k_int ints = ah->imask;
 911         u32 mask, mask2;
 912         struct ath9k_hw_capabilities *pCap = &ah->caps;
 913         struct ath_common *common = ath9k_hw_common(ah);
 914 
 915         if (!(ints & ATH9K_INT_GLOBAL))
 916                 ath9k_hw_disable_interrupts(ah);
 917 
 918         if (ah->msi_enabled) {
 919                 ath_dbg(common, INTERRUPT, "Clearing AR_INTR_PRIO_ASYNC_ENABLE\n");
 920 
 921                 REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE, 0);
 922                 REG_READ(ah, AR_INTR_PRIO_ASYNC_ENABLE);
 923         }
 924 
 925         ath_dbg(common, INTERRUPT, "New interrupt mask 0x%x\n", ints);
 926 
 927         mask = ints & ATH9K_INT_COMMON;
 928         mask2 = 0;
 929 
 930         ah->msi_mask = 0;
 931         if (ints & ATH9K_INT_TX) {
 932                 ah->msi_mask |= AR_INTR_PRIO_TX;
 933                 if (ah->config.tx_intr_mitigation)
 934                         mask |= AR_IMR_TXMINTR | AR_IMR_TXINTM;
 935                 else {
 936                         if (ah->txok_interrupt_mask)
 937                                 mask |= AR_IMR_TXOK;
 938                         if (ah->txdesc_interrupt_mask)
 939                                 mask |= AR_IMR_TXDESC;
 940                 }
 941                 if (ah->txerr_interrupt_mask)
 942                         mask |= AR_IMR_TXERR;
 943                 if (ah->txeol_interrupt_mask)
 944                         mask |= AR_IMR_TXEOL;
 945         }
 946         if (ints & ATH9K_INT_RX) {
 947                 ah->msi_mask |= AR_INTR_PRIO_RXLP | AR_INTR_PRIO_RXHP;
 948                 if (AR_SREV_9300_20_OR_LATER(ah)) {
 949                         mask |= AR_IMR_RXERR | AR_IMR_RXOK_HP;
 950                         if (ah->config.rx_intr_mitigation) {
 951                                 mask &= ~AR_IMR_RXOK_LP;
 952                                 mask |=  AR_IMR_RXMINTR | AR_IMR_RXINTM;
 953                         } else {
 954                                 mask |= AR_IMR_RXOK_LP;
 955                         }
 956                 } else {
 957                         if (ah->config.rx_intr_mitigation)
 958                                 mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
 959                         else
 960                                 mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
 961                 }
 962                 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
 963                         mask |= AR_IMR_GENTMR;
 964         }
 965 
 966         if (ints & ATH9K_INT_GENTIMER)
 967                 mask |= AR_IMR_GENTMR;
 968 
 969         if (ints & (ATH9K_INT_BMISC)) {
 970                 mask |= AR_IMR_BCNMISC;
 971                 if (ints & ATH9K_INT_TIM)
 972                         mask2 |= AR_IMR_S2_TIM;
 973                 if (ints & ATH9K_INT_DTIM)
 974                         mask2 |= AR_IMR_S2_DTIM;
 975                 if (ints & ATH9K_INT_DTIMSYNC)
 976                         mask2 |= AR_IMR_S2_DTIMSYNC;
 977                 if (ints & ATH9K_INT_CABEND)
 978                         mask2 |= AR_IMR_S2_CABEND;
 979                 if (ints & ATH9K_INT_TSFOOR)
 980                         mask2 |= AR_IMR_S2_TSFOOR;
 981         }
 982 
 983         if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) {
 984                 mask |= AR_IMR_BCNMISC;
 985                 if (ints & ATH9K_INT_GTT)
 986                         mask2 |= AR_IMR_S2_GTT;
 987                 if (ints & ATH9K_INT_CST)
 988                         mask2 |= AR_IMR_S2_CST;
 989         }
 990 
 991         if (ah->config.hw_hang_checks & HW_BB_WATCHDOG) {
 992                 if (ints & ATH9K_INT_BB_WATCHDOG) {
 993                         mask |= AR_IMR_BCNMISC;
 994                         mask2 |= AR_IMR_S2_BB_WATCHDOG;
 995                 }
 996         }
 997 
 998         ath_dbg(common, INTERRUPT, "new IMR 0x%x\n", mask);
 999         REG_WRITE(ah, AR_IMR, mask);
1000         ah->imrs2_reg &= ~(AR_IMR_S2_TIM |
1001                            AR_IMR_S2_DTIM |
1002                            AR_IMR_S2_DTIMSYNC |
1003                            AR_IMR_S2_CABEND |
1004                            AR_IMR_S2_CABTO |
1005                            AR_IMR_S2_TSFOOR |
1006                            AR_IMR_S2_GTT |
1007                            AR_IMR_S2_CST);
1008 
1009         if (ah->config.hw_hang_checks & HW_BB_WATCHDOG) {
1010                 if (ints & ATH9K_INT_BB_WATCHDOG)
1011                         ah->imrs2_reg &= ~AR_IMR_S2_BB_WATCHDOG;
1012         }
1013 
1014         ah->imrs2_reg |= mask2;
1015         REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
1016 
1017         if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
1018                 if (ints & ATH9K_INT_TIM_TIMER)
1019                         REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
1020                 else
1021                         REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
1022         }
1023 
1024         return;
1025 }
1026 EXPORT_SYMBOL(ath9k_hw_set_interrupts);
1027 
1028 #define ATH9K_HW_MAX_DCU       10
1029 #define ATH9K_HW_SLICE_PER_DCU 16
1030 #define ATH9K_HW_BIT_IN_SLICE  16
1031 void ath9k_hw_set_tx_filter(struct ath_hw *ah, u8 destidx, bool set)
1032 {
1033         int dcu_idx;
1034         u32 filter;
1035 
1036         for (dcu_idx = 0; dcu_idx < 10; dcu_idx++) {
1037                 filter = SM(set, AR_D_TXBLK_WRITE_COMMAND);
1038                 filter |= SM(dcu_idx, AR_D_TXBLK_WRITE_DCU);
1039                 filter |= SM((destidx / ATH9K_HW_SLICE_PER_DCU),
1040                              AR_D_TXBLK_WRITE_SLICE);
1041                 filter |= BIT(destidx % ATH9K_HW_BIT_IN_SLICE);
1042                 ath_dbg(ath9k_hw_common(ah), PS,
1043                         "DCU%d staid %d set %d txfilter %08x\n",
1044                         dcu_idx, destidx, set, filter);
1045                 REG_WRITE(ah, AR_D_TXBLK_BASE, filter);
1046         }
1047 }
1048 EXPORT_SYMBOL(ath9k_hw_set_tx_filter);

/* [<][>][^][v][top][bottom][index][help] */