1/****************************************************************************** 2 * 3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 4 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 5 * 6 * Portions of this file are derived from the ipw3945 project, as well 7 * as portions of the ieee80211 subsystem header files. 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but WITHOUT 14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 16 * more details. 17 * 18 * You should have received a copy of the GNU General Public License along with 19 * this program; if not, write to the Free Software Foundation, Inc., 20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA 21 * 22 * The full GNU General Public License is included in this distribution in the 23 * file called LICENSE. 24 * 25 * Contact Information: 26 * Intel Linux Wireless <ilw@linux.intel.com> 27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 28 * 29 *****************************************************************************/ 30#include <linux/etherdevice.h> 31#include <linux/slab.h> 32#include <linux/sched.h> 33 34#include "iwl-debug.h" 35#include "iwl-csr.h" 36#include "iwl-prph.h" 37#include "iwl-io.h" 38#include "iwl-scd.h" 39#include "iwl-op-mode.h" 40#include "internal.h" 41/* FIXME: need to abstract out TX command (once we know what it looks like) */ 42#include "dvm/commands.h" 43 44#define IWL_TX_CRC_SIZE 4 45#define IWL_TX_DELIMITER_SIZE 4 46 47/*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** 48 * DMA services 49 * 50 * Theory of operation 51 * 52 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer 53 * of buffer descriptors, each of which points to one or more data buffers for 54 * the device to read from or fill. Driver and device exchange status of each 55 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty 56 * entries in each circular buffer, to protect against confusing empty and full 57 * queue states. 58 * 59 * The device reads or writes the data in the queues via the device's several 60 * DMA/FIFO channels. Each queue is mapped to a single DMA channel. 61 * 62 * For Tx queue, there are low mark and high mark limits. If, after queuing 63 * the packet for Tx, free space become < low mark, Tx queue stopped. When 64 * reclaiming packets (on 'tx done IRQ), if free space become > high mark, 65 * Tx queue resumed. 66 * 67 ***************************************************/ 68static int iwl_queue_space(const struct iwl_queue *q) 69{ 70 unsigned int max; 71 unsigned int used; 72 73 /* 74 * To avoid ambiguity between empty and completely full queues, there 75 * should always be less than TFD_QUEUE_SIZE_MAX elements in the queue. 76 * If q->n_window is smaller than TFD_QUEUE_SIZE_MAX, there is no need 77 * to reserve any queue entries for this purpose. 78 */ 79 if (q->n_window < TFD_QUEUE_SIZE_MAX) 80 max = q->n_window; 81 else 82 max = TFD_QUEUE_SIZE_MAX - 1; 83 84 /* 85 * TFD_QUEUE_SIZE_MAX is a power of 2, so the following is equivalent to 86 * modulo by TFD_QUEUE_SIZE_MAX and is well defined. 87 */ 88 used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1); 89 90 if (WARN_ON(used > max)) 91 return 0; 92 93 return max - used; 94} 95 96/* 97 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes 98 */ 99static int iwl_queue_init(struct iwl_queue *q, int slots_num, u32 id) 100{ 101 q->n_window = slots_num; 102 q->id = id; 103 104 /* slots_num must be power-of-two size, otherwise 105 * get_cmd_index is broken. */ 106 if (WARN_ON(!is_power_of_2(slots_num))) 107 return -EINVAL; 108 109 q->low_mark = q->n_window / 4; 110 if (q->low_mark < 4) 111 q->low_mark = 4; 112 113 q->high_mark = q->n_window / 8; 114 if (q->high_mark < 2) 115 q->high_mark = 2; 116 117 q->write_ptr = 0; 118 q->read_ptr = 0; 119 120 return 0; 121} 122 123static int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, 124 struct iwl_dma_ptr *ptr, size_t size) 125{ 126 if (WARN_ON(ptr->addr)) 127 return -EINVAL; 128 129 ptr->addr = dma_alloc_coherent(trans->dev, size, 130 &ptr->dma, GFP_KERNEL); 131 if (!ptr->addr) 132 return -ENOMEM; 133 ptr->size = size; 134 return 0; 135} 136 137static void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, 138 struct iwl_dma_ptr *ptr) 139{ 140 if (unlikely(!ptr->addr)) 141 return; 142 143 dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma); 144 memset(ptr, 0, sizeof(*ptr)); 145} 146 147static void iwl_pcie_txq_stuck_timer(unsigned long data) 148{ 149 struct iwl_txq *txq = (void *)data; 150 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; 151 struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie); 152 u32 scd_sram_addr = trans_pcie->scd_base_addr + 153 SCD_TX_STTS_QUEUE_OFFSET(txq->q.id); 154 u8 buf[16]; 155 int i; 156 157 spin_lock(&txq->lock); 158 /* check if triggered erroneously */ 159 if (txq->q.read_ptr == txq->q.write_ptr) { 160 spin_unlock(&txq->lock); 161 return; 162 } 163 spin_unlock(&txq->lock); 164 165 IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id, 166 jiffies_to_msecs(txq->wd_timeout)); 167 IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n", 168 txq->q.read_ptr, txq->q.write_ptr); 169 170 iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf)); 171 172 iwl_print_hex_error(trans, buf, sizeof(buf)); 173 174 for (i = 0; i < FH_TCSR_CHNL_NUM; i++) 175 IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i, 176 iwl_read_direct32(trans, FH_TX_TRB_REG(i))); 177 178 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { 179 u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i)); 180 u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; 181 bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); 182 u32 tbl_dw = 183 iwl_trans_read_mem32(trans, 184 trans_pcie->scd_base_addr + 185 SCD_TRANS_TBL_OFFSET_QUEUE(i)); 186 187 if (i & 0x1) 188 tbl_dw = (tbl_dw & 0xFFFF0000) >> 16; 189 else 190 tbl_dw = tbl_dw & 0x0000FFFF; 191 192 IWL_ERR(trans, 193 "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n", 194 i, active ? "" : "in", fifo, tbl_dw, 195 iwl_read_prph(trans, SCD_QUEUE_RDPTR(i)) & 196 (TFD_QUEUE_SIZE_MAX - 1), 197 iwl_read_prph(trans, SCD_QUEUE_WRPTR(i))); 198 } 199 200 iwl_force_nmi(trans); 201} 202 203/* 204 * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array 205 */ 206static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, 207 struct iwl_txq *txq, u16 byte_cnt) 208{ 209 struct iwlagn_scd_bc_tbl *scd_bc_tbl; 210 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 211 int write_ptr = txq->q.write_ptr; 212 int txq_id = txq->q.id; 213 u8 sec_ctl = 0; 214 u8 sta_id = 0; 215 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; 216 __le16 bc_ent; 217 struct iwl_tx_cmd *tx_cmd = 218 (void *) txq->entries[txq->q.write_ptr].cmd->payload; 219 220 scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; 221 222 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX); 223 224 sta_id = tx_cmd->sta_id; 225 sec_ctl = tx_cmd->sec_ctl; 226 227 switch (sec_ctl & TX_CMD_SEC_MSK) { 228 case TX_CMD_SEC_CCM: 229 len += IEEE80211_CCMP_MIC_LEN; 230 break; 231 case TX_CMD_SEC_TKIP: 232 len += IEEE80211_TKIP_ICV_LEN; 233 break; 234 case TX_CMD_SEC_WEP: 235 len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN; 236 break; 237 } 238 239 if (trans_pcie->bc_table_dword) 240 len = DIV_ROUND_UP(len, 4); 241 242 bc_ent = cpu_to_le16(len | (sta_id << 12)); 243 244 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; 245 246 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) 247 scd_bc_tbl[txq_id]. 248 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; 249} 250 251static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, 252 struct iwl_txq *txq) 253{ 254 struct iwl_trans_pcie *trans_pcie = 255 IWL_TRANS_GET_PCIE_TRANS(trans); 256 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; 257 int txq_id = txq->q.id; 258 int read_ptr = txq->q.read_ptr; 259 u8 sta_id = 0; 260 __le16 bc_ent; 261 struct iwl_tx_cmd *tx_cmd = 262 (void *)txq->entries[txq->q.read_ptr].cmd->payload; 263 264 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); 265 266 if (txq_id != trans_pcie->cmd_queue) 267 sta_id = tx_cmd->sta_id; 268 269 bc_ent = cpu_to_le16(1 | (sta_id << 12)); 270 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; 271 272 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) 273 scd_bc_tbl[txq_id]. 274 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; 275} 276 277/* 278 * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware 279 */ 280static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, 281 struct iwl_txq *txq) 282{ 283 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 284 u32 reg = 0; 285 int txq_id = txq->q.id; 286 287 lockdep_assert_held(&txq->lock); 288 289 /* 290 * explicitly wake up the NIC if: 291 * 1. shadow registers aren't enabled 292 * 2. NIC is woken up for CMD regardless of shadow outside this function 293 * 3. there is a chance that the NIC is asleep 294 */ 295 if (!trans->cfg->base_params->shadow_reg_enable && 296 txq_id != trans_pcie->cmd_queue && 297 test_bit(STATUS_TPOWER_PMI, &trans->status)) { 298 /* 299 * wake up nic if it's powered down ... 300 * uCode will wake up, and interrupt us again, so next 301 * time we'll skip this part. 302 */ 303 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); 304 305 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 306 IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n", 307 txq_id, reg); 308 iwl_set_bit(trans, CSR_GP_CNTRL, 309 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 310 txq->need_update = true; 311 return; 312 } 313 } 314 315 /* 316 * if not in power-save mode, uCode will never sleep when we're 317 * trying to tx (during RFKILL, we're not trying to tx). 318 */ 319 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr); 320 iwl_write32(trans, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); 321} 322 323void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) 324{ 325 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 326 int i; 327 328 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { 329 struct iwl_txq *txq = &trans_pcie->txq[i]; 330 331 spin_lock_bh(&txq->lock); 332 if (trans_pcie->txq[i].need_update) { 333 iwl_pcie_txq_inc_wr_ptr(trans, txq); 334 trans_pcie->txq[i].need_update = false; 335 } 336 spin_unlock_bh(&txq->lock); 337 } 338} 339 340static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) 341{ 342 struct iwl_tfd_tb *tb = &tfd->tbs[idx]; 343 344 dma_addr_t addr = get_unaligned_le32(&tb->lo); 345 if (sizeof(dma_addr_t) > sizeof(u32)) 346 addr |= 347 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16; 348 349 return addr; 350} 351 352static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx, 353 dma_addr_t addr, u16 len) 354{ 355 struct iwl_tfd_tb *tb = &tfd->tbs[idx]; 356 u16 hi_n_len = len << 4; 357 358 put_unaligned_le32(addr, &tb->lo); 359 if (sizeof(dma_addr_t) > sizeof(u32)) 360 hi_n_len |= ((addr >> 16) >> 16) & 0xF; 361 362 tb->hi_n_len = cpu_to_le16(hi_n_len); 363 364 tfd->num_tbs = idx + 1; 365} 366 367static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd) 368{ 369 return tfd->num_tbs & 0x1f; 370} 371 372static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, 373 struct iwl_cmd_meta *meta, 374 struct iwl_tfd *tfd) 375{ 376 int i; 377 int num_tbs; 378 379 /* Sanity check on number of chunks */ 380 num_tbs = iwl_pcie_tfd_get_num_tbs(tfd); 381 382 if (num_tbs >= IWL_NUM_OF_TBS) { 383 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); 384 /* @todo issue fatal error, it is quite serious situation */ 385 return; 386 } 387 388 /* first TB is never freed - it's the scratchbuf data */ 389 390 for (i = 1; i < num_tbs; i++) 391 dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i), 392 iwl_pcie_tfd_tb_get_len(tfd, i), 393 DMA_TO_DEVICE); 394 395 tfd->num_tbs = 0; 396} 397 398/* 399 * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] 400 * @trans - transport private data 401 * @txq - tx queue 402 * @dma_dir - the direction of the DMA mapping 403 * 404 * Does NOT advance any TFD circular buffer read/write indexes 405 * Does NOT free the TFD itself (which is within circular buffer) 406 */ 407static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) 408{ 409 struct iwl_tfd *tfd_tmp = txq->tfds; 410 411 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and 412 * idx is bounded by n_window 413 */ 414 int rd_ptr = txq->q.read_ptr; 415 int idx = get_cmd_index(&txq->q, rd_ptr); 416 417 lockdep_assert_held(&txq->lock); 418 419 /* We have only q->n_window txq->entries, but we use 420 * TFD_QUEUE_SIZE_MAX tfds 421 */ 422 iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]); 423 424 /* free SKB */ 425 if (txq->entries) { 426 struct sk_buff *skb; 427 428 skb = txq->entries[idx].skb; 429 430 /* Can be called from irqs-disabled context 431 * If skb is not NULL, it means that the whole queue is being 432 * freed and that the queue is not empty - free the skb 433 */ 434 if (skb) { 435 iwl_op_mode_free_skb(trans->op_mode, skb); 436 txq->entries[idx].skb = NULL; 437 } 438 } 439} 440 441static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, 442 dma_addr_t addr, u16 len, bool reset) 443{ 444 struct iwl_queue *q; 445 struct iwl_tfd *tfd, *tfd_tmp; 446 u32 num_tbs; 447 448 q = &txq->q; 449 tfd_tmp = txq->tfds; 450 tfd = &tfd_tmp[q->write_ptr]; 451 452 if (reset) 453 memset(tfd, 0, sizeof(*tfd)); 454 455 num_tbs = iwl_pcie_tfd_get_num_tbs(tfd); 456 457 /* Each TFD can point to a maximum 20 Tx buffers */ 458 if (num_tbs >= IWL_NUM_OF_TBS) { 459 IWL_ERR(trans, "Error can not send more than %d chunks\n", 460 IWL_NUM_OF_TBS); 461 return -EINVAL; 462 } 463 464 if (WARN(addr & ~IWL_TX_DMA_MASK, 465 "Unaligned address = %llx\n", (unsigned long long)addr)) 466 return -EINVAL; 467 468 iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len); 469 470 return 0; 471} 472 473static int iwl_pcie_txq_alloc(struct iwl_trans *trans, 474 struct iwl_txq *txq, int slots_num, 475 u32 txq_id) 476{ 477 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 478 size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX; 479 size_t scratchbuf_sz; 480 int i; 481 482 if (WARN_ON(txq->entries || txq->tfds)) 483 return -EINVAL; 484 485 setup_timer(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, 486 (unsigned long)txq); 487 txq->trans_pcie = trans_pcie; 488 489 txq->q.n_window = slots_num; 490 491 txq->entries = kcalloc(slots_num, 492 sizeof(struct iwl_pcie_txq_entry), 493 GFP_KERNEL); 494 495 if (!txq->entries) 496 goto error; 497 498 if (txq_id == trans_pcie->cmd_queue) 499 for (i = 0; i < slots_num; i++) { 500 txq->entries[i].cmd = 501 kmalloc(sizeof(struct iwl_device_cmd), 502 GFP_KERNEL); 503 if (!txq->entries[i].cmd) 504 goto error; 505 } 506 507 /* Circular buffer of transmit frame descriptors (TFDs), 508 * shared with device */ 509 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, 510 &txq->q.dma_addr, GFP_KERNEL); 511 if (!txq->tfds) 512 goto error; 513 514 BUILD_BUG_ON(IWL_HCMD_SCRATCHBUF_SIZE != sizeof(*txq->scratchbufs)); 515 BUILD_BUG_ON(offsetof(struct iwl_pcie_txq_scratch_buf, scratch) != 516 sizeof(struct iwl_cmd_header) + 517 offsetof(struct iwl_tx_cmd, scratch)); 518 519 scratchbuf_sz = sizeof(*txq->scratchbufs) * slots_num; 520 521 txq->scratchbufs = dma_alloc_coherent(trans->dev, scratchbuf_sz, 522 &txq->scratchbufs_dma, 523 GFP_KERNEL); 524 if (!txq->scratchbufs) 525 goto err_free_tfds; 526 527 txq->q.id = txq_id; 528 529 return 0; 530err_free_tfds: 531 dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->q.dma_addr); 532error: 533 if (txq->entries && txq_id == trans_pcie->cmd_queue) 534 for (i = 0; i < slots_num; i++) 535 kfree(txq->entries[i].cmd); 536 kfree(txq->entries); 537 txq->entries = NULL; 538 539 return -ENOMEM; 540 541} 542 543static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, 544 int slots_num, u32 txq_id) 545{ 546 int ret; 547 548 txq->need_update = false; 549 550 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise 551 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ 552 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); 553 554 /* Initialize queue's high/low-water marks, and head/tail indexes */ 555 ret = iwl_queue_init(&txq->q, slots_num, txq_id); 556 if (ret) 557 return ret; 558 559 spin_lock_init(&txq->lock); 560 561 /* 562 * Tell nic where to find circular buffer of Tx Frame Descriptors for 563 * given Tx queue, and enable the DMA channel used for that queue. 564 * Circular buffer (TFD queue in DRAM) physical base address */ 565 iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id), 566 txq->q.dma_addr >> 8); 567 568 return 0; 569} 570 571/* 572 * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's 573 */ 574static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) 575{ 576 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 577 struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 578 struct iwl_queue *q = &txq->q; 579 580 spin_lock_bh(&txq->lock); 581 while (q->write_ptr != q->read_ptr) { 582 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", 583 txq_id, q->read_ptr); 584 iwl_pcie_txq_free_tfd(trans, txq); 585 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr); 586 } 587 txq->active = false; 588 spin_unlock_bh(&txq->lock); 589 590 /* just in case - this queue may have been stopped */ 591 iwl_wake_queue(trans, txq); 592} 593 594/* 595 * iwl_pcie_txq_free - Deallocate DMA queue. 596 * @txq: Transmit queue to deallocate. 597 * 598 * Empty queue by removing and destroying all BD's. 599 * Free all buffers. 600 * 0-fill, but do not free "txq" descriptor structure. 601 */ 602static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) 603{ 604 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 605 struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 606 struct device *dev = trans->dev; 607 int i; 608 609 if (WARN_ON(!txq)) 610 return; 611 612 iwl_pcie_txq_unmap(trans, txq_id); 613 614 /* De-alloc array of command/tx buffers */ 615 if (txq_id == trans_pcie->cmd_queue) 616 for (i = 0; i < txq->q.n_window; i++) { 617 kzfree(txq->entries[i].cmd); 618 kzfree(txq->entries[i].free_buf); 619 } 620 621 /* De-alloc circular buffer of TFDs */ 622 if (txq->tfds) { 623 dma_free_coherent(dev, 624 sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX, 625 txq->tfds, txq->q.dma_addr); 626 txq->q.dma_addr = 0; 627 txq->tfds = NULL; 628 629 dma_free_coherent(dev, 630 sizeof(*txq->scratchbufs) * txq->q.n_window, 631 txq->scratchbufs, txq->scratchbufs_dma); 632 } 633 634 kfree(txq->entries); 635 txq->entries = NULL; 636 637 del_timer_sync(&txq->stuck_timer); 638 639 /* 0-fill queue descriptor structure */ 640 memset(txq, 0, sizeof(*txq)); 641} 642 643void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) 644{ 645 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 646 int nq = trans->cfg->base_params->num_of_queues; 647 int chan; 648 u32 reg_val; 649 int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) - 650 SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32); 651 652 /* make sure all queue are not stopped/used */ 653 memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); 654 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); 655 656 trans_pcie->scd_base_addr = 657 iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); 658 659 WARN_ON(scd_base_addr != 0 && 660 scd_base_addr != trans_pcie->scd_base_addr); 661 662 /* reset context data, TX status and translation data */ 663 iwl_trans_write_mem(trans, trans_pcie->scd_base_addr + 664 SCD_CONTEXT_MEM_LOWER_BOUND, 665 NULL, clear_dwords); 666 667 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, 668 trans_pcie->scd_bc_tbls.dma >> 10); 669 670 /* The chain extension of the SCD doesn't work well. This feature is 671 * enabled by default by the HW, so we need to disable it manually. 672 */ 673 if (trans->cfg->base_params->scd_chain_ext_wa) 674 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); 675 676 iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue, 677 trans_pcie->cmd_fifo, 678 trans_pcie->cmd_q_wdg_timeout); 679 680 /* Activate all Tx DMA/FIFO channels */ 681 iwl_scd_activate_fifos(trans); 682 683 /* Enable DMA channel */ 684 for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++) 685 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), 686 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 687 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); 688 689 /* Update FH chicken bits */ 690 reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG); 691 iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG, 692 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); 693 694 /* Enable L1-Active */ 695 if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) 696 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, 697 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 698} 699 700void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) 701{ 702 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 703 int txq_id; 704 705 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; 706 txq_id++) { 707 struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 708 709 iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id), 710 txq->q.dma_addr >> 8); 711 iwl_pcie_txq_unmap(trans, txq_id); 712 txq->q.read_ptr = 0; 713 txq->q.write_ptr = 0; 714 } 715 716 /* Tell NIC where to find the "keep warm" buffer */ 717 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, 718 trans_pcie->kw.dma >> 4); 719 720 /* 721 * Send 0 as the scd_base_addr since the device may have be reset 722 * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will 723 * contain garbage. 724 */ 725 iwl_pcie_tx_start(trans, 0); 726} 727 728static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans) 729{ 730 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 731 unsigned long flags; 732 int ch, ret; 733 u32 mask = 0; 734 735 spin_lock(&trans_pcie->irq_lock); 736 737 if (!iwl_trans_grab_nic_access(trans, false, &flags)) 738 goto out; 739 740 /* Stop each Tx DMA channel */ 741 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { 742 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); 743 mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch); 744 } 745 746 /* Wait for DMA channels to be idle */ 747 ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000); 748 if (ret < 0) 749 IWL_ERR(trans, 750 "Failing on timeout while stopping DMA channel %d [0x%08x]\n", 751 ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG)); 752 753 iwl_trans_release_nic_access(trans, &flags); 754 755out: 756 spin_unlock(&trans_pcie->irq_lock); 757} 758 759/* 760 * iwl_pcie_tx_stop - Stop all Tx DMA channels 761 */ 762int iwl_pcie_tx_stop(struct iwl_trans *trans) 763{ 764 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 765 int txq_id; 766 767 /* Turn off all Tx DMA fifos */ 768 iwl_scd_deactivate_fifos(trans); 769 770 /* Turn off all Tx DMA channels */ 771 iwl_pcie_tx_stop_fh(trans); 772 773 /* 774 * This function can be called before the op_mode disabled the 775 * queues. This happens when we have an rfkill interrupt. 776 * Since we stop Tx altogether - mark the queues as stopped. 777 */ 778 memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); 779 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); 780 781 /* This can happen: start_hw, stop_device */ 782 if (!trans_pcie->txq) 783 return 0; 784 785 /* Unmap DMA from host system and free skb's */ 786 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; 787 txq_id++) 788 iwl_pcie_txq_unmap(trans, txq_id); 789 790 return 0; 791} 792 793/* 794 * iwl_trans_tx_free - Free TXQ Context 795 * 796 * Destroy all TX DMA queues and structures 797 */ 798void iwl_pcie_tx_free(struct iwl_trans *trans) 799{ 800 int txq_id; 801 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 802 803 /* Tx queues */ 804 if (trans_pcie->txq) { 805 for (txq_id = 0; 806 txq_id < trans->cfg->base_params->num_of_queues; txq_id++) 807 iwl_pcie_txq_free(trans, txq_id); 808 } 809 810 kfree(trans_pcie->txq); 811 trans_pcie->txq = NULL; 812 813 iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw); 814 815 iwl_pcie_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls); 816} 817 818/* 819 * iwl_pcie_tx_alloc - allocate TX context 820 * Allocate all Tx DMA structures and initialize them 821 */ 822static int iwl_pcie_tx_alloc(struct iwl_trans *trans) 823{ 824 int ret; 825 int txq_id, slots_num; 826 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 827 828 u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues * 829 sizeof(struct iwlagn_scd_bc_tbl); 830 831 /*It is not allowed to alloc twice, so warn when this happens. 832 * We cannot rely on the previous allocation, so free and fail */ 833 if (WARN_ON(trans_pcie->txq)) { 834 ret = -EINVAL; 835 goto error; 836 } 837 838 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls, 839 scd_bc_tbls_size); 840 if (ret) { 841 IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); 842 goto error; 843 } 844 845 /* Alloc keep-warm buffer */ 846 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE); 847 if (ret) { 848 IWL_ERR(trans, "Keep Warm allocation failed\n"); 849 goto error; 850 } 851 852 trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues, 853 sizeof(struct iwl_txq), GFP_KERNEL); 854 if (!trans_pcie->txq) { 855 IWL_ERR(trans, "Not enough memory for txq\n"); 856 ret = -ENOMEM; 857 goto error; 858 } 859 860 /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 861 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; 862 txq_id++) { 863 slots_num = (txq_id == trans_pcie->cmd_queue) ? 864 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 865 ret = iwl_pcie_txq_alloc(trans, &trans_pcie->txq[txq_id], 866 slots_num, txq_id); 867 if (ret) { 868 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); 869 goto error; 870 } 871 } 872 873 return 0; 874 875error: 876 iwl_pcie_tx_free(trans); 877 878 return ret; 879} 880int iwl_pcie_tx_init(struct iwl_trans *trans) 881{ 882 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 883 int ret; 884 int txq_id, slots_num; 885 bool alloc = false; 886 887 if (!trans_pcie->txq) { 888 ret = iwl_pcie_tx_alloc(trans); 889 if (ret) 890 goto error; 891 alloc = true; 892 } 893 894 spin_lock(&trans_pcie->irq_lock); 895 896 /* Turn off all Tx DMA fifos */ 897 iwl_scd_deactivate_fifos(trans); 898 899 /* Tell NIC where to find the "keep warm" buffer */ 900 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, 901 trans_pcie->kw.dma >> 4); 902 903 spin_unlock(&trans_pcie->irq_lock); 904 905 /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 906 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; 907 txq_id++) { 908 slots_num = (txq_id == trans_pcie->cmd_queue) ? 909 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 910 ret = iwl_pcie_txq_init(trans, &trans_pcie->txq[txq_id], 911 slots_num, txq_id); 912 if (ret) { 913 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); 914 goto error; 915 } 916 } 917 918 if (trans->cfg->base_params->num_of_queues > 20) 919 iwl_set_bits_prph(trans, SCD_GP_CTRL, 920 SCD_GP_CTRL_ENABLE_31_QUEUES); 921 922 return 0; 923error: 924 /*Upon error, free only if we allocated something */ 925 if (alloc) 926 iwl_pcie_tx_free(trans); 927 return ret; 928} 929 930static inline void iwl_pcie_txq_progress(struct iwl_txq *txq) 931{ 932 lockdep_assert_held(&txq->lock); 933 934 if (!txq->wd_timeout) 935 return; 936 937 /* 938 * station is asleep and we send data - that must 939 * be uAPSD or PS-Poll. Don't rearm the timer. 940 */ 941 if (txq->frozen) 942 return; 943 944 /* 945 * if empty delete timer, otherwise move timer forward 946 * since we're making progress on this queue 947 */ 948 if (txq->q.read_ptr == txq->q.write_ptr) 949 del_timer(&txq->stuck_timer); 950 else 951 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 952} 953 954/* Frees buffers until index _not_ inclusive */ 955void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, 956 struct sk_buff_head *skbs) 957{ 958 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 959 struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 960 int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1); 961 struct iwl_queue *q = &txq->q; 962 int last_to_free; 963 964 /* This function is not meant to release cmd queue*/ 965 if (WARN_ON(txq_id == trans_pcie->cmd_queue)) 966 return; 967 968 spin_lock_bh(&txq->lock); 969 970 if (!txq->active) { 971 IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", 972 txq_id, ssn); 973 goto out; 974 } 975 976 if (txq->q.read_ptr == tfd_num) 977 goto out; 978 979 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n", 980 txq_id, txq->q.read_ptr, tfd_num, ssn); 981 982 /*Since we free until index _not_ inclusive, the one before index is 983 * the last we will free. This one must be used */ 984 last_to_free = iwl_queue_dec_wrap(tfd_num); 985 986 if (!iwl_queue_used(q, last_to_free)) { 987 IWL_ERR(trans, 988 "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", 989 __func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX, 990 q->write_ptr, q->read_ptr); 991 goto out; 992 } 993 994 if (WARN_ON(!skb_queue_empty(skbs))) 995 goto out; 996 997 for (; 998 q->read_ptr != tfd_num; 999 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) { 1000 1001 if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL)) 1002 continue; 1003 1004 __skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb); 1005 1006 txq->entries[txq->q.read_ptr].skb = NULL; 1007 1008 iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq); 1009 1010 iwl_pcie_txq_free_tfd(trans, txq); 1011 } 1012 1013 iwl_pcie_txq_progress(txq); 1014 1015 if (iwl_queue_space(&txq->q) > txq->q.low_mark) 1016 iwl_wake_queue(trans, txq); 1017 1018 if (q->read_ptr == q->write_ptr) { 1019 IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", q->id); 1020 iwl_trans_pcie_unref(trans); 1021 } 1022 1023out: 1024 spin_unlock_bh(&txq->lock); 1025} 1026 1027static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, 1028 const struct iwl_host_cmd *cmd) 1029{ 1030 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1031 int ret; 1032 1033 lockdep_assert_held(&trans_pcie->reg_lock); 1034 1035 if (!(cmd->flags & CMD_SEND_IN_IDLE) && 1036 !trans_pcie->ref_cmd_in_flight) { 1037 trans_pcie->ref_cmd_in_flight = true; 1038 IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n"); 1039 iwl_trans_pcie_ref(trans); 1040 } 1041 1042 /* 1043 * wake up the NIC to make sure that the firmware will see the host 1044 * command - we will let the NIC sleep once all the host commands 1045 * returned. This needs to be done only on NICs that have 1046 * apmg_wake_up_wa set. 1047 */ 1048 if (trans->cfg->base_params->apmg_wake_up_wa && 1049 !trans_pcie->cmd_hold_nic_awake) { 1050 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, 1051 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1052 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) 1053 udelay(2); 1054 1055 ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 1056 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, 1057 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | 1058 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 1059 15000); 1060 if (ret < 0) { 1061 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 1062 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1063 IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); 1064 return -EIO; 1065 } 1066 trans_pcie->cmd_hold_nic_awake = true; 1067 } 1068 1069 return 0; 1070} 1071 1072static int iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) 1073{ 1074 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1075 1076 lockdep_assert_held(&trans_pcie->reg_lock); 1077 1078 if (trans_pcie->ref_cmd_in_flight) { 1079 trans_pcie->ref_cmd_in_flight = false; 1080 IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n"); 1081 iwl_trans_pcie_unref(trans); 1082 } 1083 1084 if (trans->cfg->base_params->apmg_wake_up_wa) { 1085 if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) 1086 return 0; 1087 1088 trans_pcie->cmd_hold_nic_awake = false; 1089 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 1090 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1091 } 1092 return 0; 1093} 1094 1095/* 1096 * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd 1097 * 1098 * When FW advances 'R' index, all entries between old and new 'R' index 1099 * need to be reclaimed. As result, some free space forms. If there is 1100 * enough free space (> low mark), wake the stack that feeds us. 1101 */ 1102static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) 1103{ 1104 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1105 struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 1106 struct iwl_queue *q = &txq->q; 1107 unsigned long flags; 1108 int nfreed = 0; 1109 1110 lockdep_assert_held(&txq->lock); 1111 1112 if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(q, idx))) { 1113 IWL_ERR(trans, 1114 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", 1115 __func__, txq_id, idx, TFD_QUEUE_SIZE_MAX, 1116 q->write_ptr, q->read_ptr); 1117 return; 1118 } 1119 1120 for (idx = iwl_queue_inc_wrap(idx); q->read_ptr != idx; 1121 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) { 1122 1123 if (nfreed++ > 0) { 1124 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", 1125 idx, q->write_ptr, q->read_ptr); 1126 iwl_force_nmi(trans); 1127 } 1128 } 1129 1130 if (q->read_ptr == q->write_ptr) { 1131 spin_lock_irqsave(&trans_pcie->reg_lock, flags); 1132 iwl_pcie_clear_cmd_in_flight(trans); 1133 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 1134 } 1135 1136 iwl_pcie_txq_progress(txq); 1137} 1138 1139static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, 1140 u16 txq_id) 1141{ 1142 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1143 u32 tbl_dw_addr; 1144 u32 tbl_dw; 1145 u16 scd_q2ratid; 1146 1147 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK; 1148 1149 tbl_dw_addr = trans_pcie->scd_base_addr + 1150 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id); 1151 1152 tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr); 1153 1154 if (txq_id & 0x1) 1155 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); 1156 else 1157 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); 1158 1159 iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw); 1160 1161 return 0; 1162} 1163 1164/* Receiver address (actually, Rx station's index into station table), 1165 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ 1166#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) 1167 1168void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, 1169 const struct iwl_trans_txq_scd_cfg *cfg, 1170 unsigned int wdg_timeout) 1171{ 1172 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1173 struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 1174 int fifo = -1; 1175 1176 if (test_and_set_bit(txq_id, trans_pcie->queue_used)) 1177 WARN_ONCE(1, "queue %d already used - expect issues", txq_id); 1178 1179 txq->wd_timeout = msecs_to_jiffies(wdg_timeout); 1180 1181 if (cfg) { 1182 fifo = cfg->fifo; 1183 1184 /* Disable the scheduler prior configuring the cmd queue */ 1185 if (txq_id == trans_pcie->cmd_queue && 1186 trans_pcie->scd_set_active) 1187 iwl_scd_enable_set_active(trans, 0); 1188 1189 /* Stop this Tx queue before configuring it */ 1190 iwl_scd_txq_set_inactive(trans, txq_id); 1191 1192 /* Set this queue as a chain-building queue unless it is CMD */ 1193 if (txq_id != trans_pcie->cmd_queue) 1194 iwl_scd_txq_set_chain(trans, txq_id); 1195 1196 if (cfg->aggregate) { 1197 u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid); 1198 1199 /* Map receiver-address / traffic-ID to this queue */ 1200 iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id); 1201 1202 /* enable aggregations for the queue */ 1203 iwl_scd_txq_enable_agg(trans, txq_id); 1204 txq->ampdu = true; 1205 } else { 1206 /* 1207 * disable aggregations for the queue, this will also 1208 * make the ra_tid mapping configuration irrelevant 1209 * since it is now a non-AGG queue. 1210 */ 1211 iwl_scd_txq_disable_agg(trans, txq_id); 1212 1213 ssn = txq->q.read_ptr; 1214 } 1215 } 1216 1217 /* Place first TFD at index corresponding to start sequence number. 1218 * Assumes that ssn_idx is valid (!= 0xFFF) */ 1219 txq->q.read_ptr = (ssn & 0xff); 1220 txq->q.write_ptr = (ssn & 0xff); 1221 iwl_write_direct32(trans, HBUS_TARG_WRPTR, 1222 (ssn & 0xff) | (txq_id << 8)); 1223 1224 if (cfg) { 1225 u8 frame_limit = cfg->frame_limit; 1226 1227 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn); 1228 1229 /* Set up Tx window size and frame limit for this queue */ 1230 iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + 1231 SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0); 1232 iwl_trans_write_mem32(trans, 1233 trans_pcie->scd_base_addr + 1234 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), 1235 ((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & 1236 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | 1237 ((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & 1238 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); 1239 1240 /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */ 1241 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), 1242 (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) | 1243 (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) | 1244 (1 << SCD_QUEUE_STTS_REG_POS_WSL) | 1245 SCD_QUEUE_STTS_REG_MSK); 1246 1247 /* enable the scheduler for this queue (only) */ 1248 if (txq_id == trans_pcie->cmd_queue && 1249 trans_pcie->scd_set_active) 1250 iwl_scd_enable_set_active(trans, BIT(txq_id)); 1251 1252 IWL_DEBUG_TX_QUEUES(trans, 1253 "Activate queue %d on FIFO %d WrPtr: %d\n", 1254 txq_id, fifo, ssn & 0xff); 1255 } else { 1256 IWL_DEBUG_TX_QUEUES(trans, 1257 "Activate queue %d WrPtr: %d\n", 1258 txq_id, ssn & 0xff); 1259 } 1260 1261 txq->active = true; 1262} 1263 1264void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, 1265 bool configure_scd) 1266{ 1267 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1268 u32 stts_addr = trans_pcie->scd_base_addr + 1269 SCD_TX_STTS_QUEUE_OFFSET(txq_id); 1270 static const u32 zero_val[4] = {}; 1271 1272 trans_pcie->txq[txq_id].frozen_expiry_remainder = 0; 1273 trans_pcie->txq[txq_id].frozen = false; 1274 1275 /* 1276 * Upon HW Rfkill - we stop the device, and then stop the queues 1277 * in the op_mode. Just for the sake of the simplicity of the op_mode, 1278 * allow the op_mode to call txq_disable after it already called 1279 * stop_device. 1280 */ 1281 if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) { 1282 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), 1283 "queue %d not used", txq_id); 1284 return; 1285 } 1286 1287 if (configure_scd) { 1288 iwl_scd_txq_set_inactive(trans, txq_id); 1289 1290 iwl_trans_write_mem(trans, stts_addr, (void *)zero_val, 1291 ARRAY_SIZE(zero_val)); 1292 } 1293 1294 iwl_pcie_txq_unmap(trans, txq_id); 1295 trans_pcie->txq[txq_id].ampdu = false; 1296 1297 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); 1298} 1299 1300/*************** HOST COMMAND QUEUE FUNCTIONS *****/ 1301 1302/* 1303 * iwl_pcie_enqueue_hcmd - enqueue a uCode command 1304 * @priv: device private data point 1305 * @cmd: a pointer to the ucode command structure 1306 * 1307 * The function returns < 0 values to indicate the operation 1308 * failed. On success, it returns the index (>= 0) of command in the 1309 * command queue. 1310 */ 1311static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, 1312 struct iwl_host_cmd *cmd) 1313{ 1314 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1315 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; 1316 struct iwl_queue *q = &txq->q; 1317 struct iwl_device_cmd *out_cmd; 1318 struct iwl_cmd_meta *out_meta; 1319 unsigned long flags; 1320 void *dup_buf = NULL; 1321 dma_addr_t phys_addr; 1322 int idx; 1323 u16 copy_size, cmd_size, scratch_size; 1324 bool had_nocopy = false; 1325 int i, ret; 1326 u32 cmd_pos; 1327 const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; 1328 u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; 1329 1330 copy_size = sizeof(out_cmd->hdr); 1331 cmd_size = sizeof(out_cmd->hdr); 1332 1333 /* need one for the header if the first is NOCOPY */ 1334 BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1); 1335 1336 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1337 cmddata[i] = cmd->data[i]; 1338 cmdlen[i] = cmd->len[i]; 1339 1340 if (!cmd->len[i]) 1341 continue; 1342 1343 /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */ 1344 if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) { 1345 int copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size; 1346 1347 if (copy > cmdlen[i]) 1348 copy = cmdlen[i]; 1349 cmdlen[i] -= copy; 1350 cmddata[i] += copy; 1351 copy_size += copy; 1352 } 1353 1354 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { 1355 had_nocopy = true; 1356 if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { 1357 idx = -EINVAL; 1358 goto free_dup_buf; 1359 } 1360 } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) { 1361 /* 1362 * This is also a chunk that isn't copied 1363 * to the static buffer so set had_nocopy. 1364 */ 1365 had_nocopy = true; 1366 1367 /* only allowed once */ 1368 if (WARN_ON(dup_buf)) { 1369 idx = -EINVAL; 1370 goto free_dup_buf; 1371 } 1372 1373 dup_buf = kmemdup(cmddata[i], cmdlen[i], 1374 GFP_ATOMIC); 1375 if (!dup_buf) 1376 return -ENOMEM; 1377 } else { 1378 /* NOCOPY must not be followed by normal! */ 1379 if (WARN_ON(had_nocopy)) { 1380 idx = -EINVAL; 1381 goto free_dup_buf; 1382 } 1383 copy_size += cmdlen[i]; 1384 } 1385 cmd_size += cmd->len[i]; 1386 } 1387 1388 /* 1389 * If any of the command structures end up being larger than 1390 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically 1391 * allocated into separate TFDs, then we will need to 1392 * increase the size of the buffers. 1393 */ 1394 if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, 1395 "Command %s (%#x) is too large (%d bytes)\n", 1396 get_cmd_string(trans_pcie, cmd->id), cmd->id, copy_size)) { 1397 idx = -EINVAL; 1398 goto free_dup_buf; 1399 } 1400 1401 spin_lock_bh(&txq->lock); 1402 1403 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { 1404 spin_unlock_bh(&txq->lock); 1405 1406 IWL_ERR(trans, "No space in command queue\n"); 1407 iwl_op_mode_cmd_queue_full(trans->op_mode); 1408 idx = -ENOSPC; 1409 goto free_dup_buf; 1410 } 1411 1412 idx = get_cmd_index(q, q->write_ptr); 1413 out_cmd = txq->entries[idx].cmd; 1414 out_meta = &txq->entries[idx].meta; 1415 1416 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ 1417 if (cmd->flags & CMD_WANT_SKB) 1418 out_meta->source = cmd; 1419 1420 /* set up the header */ 1421 1422 out_cmd->hdr.cmd = cmd->id; 1423 out_cmd->hdr.flags = 0; 1424 out_cmd->hdr.sequence = 1425 cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | 1426 INDEX_TO_SEQ(q->write_ptr)); 1427 1428 /* and copy the data that needs to be copied */ 1429 cmd_pos = offsetof(struct iwl_device_cmd, payload); 1430 copy_size = sizeof(out_cmd->hdr); 1431 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1432 int copy; 1433 1434 if (!cmd->len[i]) 1435 continue; 1436 1437 /* copy everything if not nocopy/dup */ 1438 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1439 IWL_HCMD_DFL_DUP))) { 1440 copy = cmd->len[i]; 1441 1442 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 1443 cmd_pos += copy; 1444 copy_size += copy; 1445 continue; 1446 } 1447 1448 /* 1449 * Otherwise we need at least IWL_HCMD_SCRATCHBUF_SIZE copied 1450 * in total (for the scratchbuf handling), but copy up to what 1451 * we can fit into the payload for debug dump purposes. 1452 */ 1453 copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]); 1454 1455 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 1456 cmd_pos += copy; 1457 1458 /* However, treat copy_size the proper way, we need it below */ 1459 if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) { 1460 copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size; 1461 1462 if (copy > cmd->len[i]) 1463 copy = cmd->len[i]; 1464 copy_size += copy; 1465 } 1466 } 1467 1468 IWL_DEBUG_HC(trans, 1469 "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", 1470 get_cmd_string(trans_pcie, out_cmd->hdr.cmd), 1471 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), 1472 cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue); 1473 1474 /* start the TFD with the scratchbuf */ 1475 scratch_size = min_t(int, copy_size, IWL_HCMD_SCRATCHBUF_SIZE); 1476 memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size); 1477 iwl_pcie_txq_build_tfd(trans, txq, 1478 iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr), 1479 scratch_size, true); 1480 1481 /* map first command fragment, if any remains */ 1482 if (copy_size > scratch_size) { 1483 phys_addr = dma_map_single(trans->dev, 1484 ((u8 *)&out_cmd->hdr) + scratch_size, 1485 copy_size - scratch_size, 1486 DMA_TO_DEVICE); 1487 if (dma_mapping_error(trans->dev, phys_addr)) { 1488 iwl_pcie_tfd_unmap(trans, out_meta, 1489 &txq->tfds[q->write_ptr]); 1490 idx = -ENOMEM; 1491 goto out; 1492 } 1493 1494 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, 1495 copy_size - scratch_size, false); 1496 } 1497 1498 /* map the remaining (adjusted) nocopy/dup fragments */ 1499 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1500 const void *data = cmddata[i]; 1501 1502 if (!cmdlen[i]) 1503 continue; 1504 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1505 IWL_HCMD_DFL_DUP))) 1506 continue; 1507 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) 1508 data = dup_buf; 1509 phys_addr = dma_map_single(trans->dev, (void *)data, 1510 cmdlen[i], DMA_TO_DEVICE); 1511 if (dma_mapping_error(trans->dev, phys_addr)) { 1512 iwl_pcie_tfd_unmap(trans, out_meta, 1513 &txq->tfds[q->write_ptr]); 1514 idx = -ENOMEM; 1515 goto out; 1516 } 1517 1518 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false); 1519 } 1520 1521 out_meta->flags = cmd->flags; 1522 if (WARN_ON_ONCE(txq->entries[idx].free_buf)) 1523 kzfree(txq->entries[idx].free_buf); 1524 txq->entries[idx].free_buf = dup_buf; 1525 1526 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr); 1527 1528 /* start timer if queue currently empty */ 1529 if (q->read_ptr == q->write_ptr && txq->wd_timeout) 1530 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 1531 1532 spin_lock_irqsave(&trans_pcie->reg_lock, flags); 1533 ret = iwl_pcie_set_cmd_in_flight(trans, cmd); 1534 if (ret < 0) { 1535 idx = ret; 1536 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 1537 goto out; 1538 } 1539 1540 /* Increment and update queue's write index */ 1541 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr); 1542 iwl_pcie_txq_inc_wr_ptr(trans, txq); 1543 1544 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 1545 1546 out: 1547 spin_unlock_bh(&txq->lock); 1548 free_dup_buf: 1549 if (idx < 0) 1550 kfree(dup_buf); 1551 return idx; 1552} 1553 1554/* 1555 * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them 1556 * @rxb: Rx buffer to reclaim 1557 * @handler_status: return value of the handler of the command 1558 * (put in setup_rx_handlers) 1559 * 1560 * If an Rx buffer has an async callback associated with it the callback 1561 * will be executed. The attached skb (if present) will only be freed 1562 * if the callback returns 1 1563 */ 1564void iwl_pcie_hcmd_complete(struct iwl_trans *trans, 1565 struct iwl_rx_cmd_buffer *rxb, int handler_status) 1566{ 1567 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1568 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 1569 int txq_id = SEQ_TO_QUEUE(sequence); 1570 int index = SEQ_TO_INDEX(sequence); 1571 int cmd_index; 1572 struct iwl_device_cmd *cmd; 1573 struct iwl_cmd_meta *meta; 1574 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1575 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; 1576 1577 /* If a Tx command is being handled and it isn't in the actual 1578 * command queue then there a command routing bug has been introduced 1579 * in the queue management code. */ 1580 if (WARN(txq_id != trans_pcie->cmd_queue, 1581 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", 1582 txq_id, trans_pcie->cmd_queue, sequence, 1583 trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr, 1584 trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) { 1585 iwl_print_hex_error(trans, pkt, 32); 1586 return; 1587 } 1588 1589 spin_lock_bh(&txq->lock); 1590 1591 cmd_index = get_cmd_index(&txq->q, index); 1592 cmd = txq->entries[cmd_index].cmd; 1593 meta = &txq->entries[cmd_index].meta; 1594 1595 iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index]); 1596 1597 /* Input error checking is done when commands are added to queue. */ 1598 if (meta->flags & CMD_WANT_SKB) { 1599 struct page *p = rxb_steal_page(rxb); 1600 1601 meta->source->resp_pkt = pkt; 1602 meta->source->_rx_page_addr = (unsigned long)page_address(p); 1603 meta->source->_rx_page_order = trans_pcie->rx_page_order; 1604 meta->source->handler_status = handler_status; 1605 } 1606 1607 iwl_pcie_cmdq_reclaim(trans, txq_id, index); 1608 1609 if (!(meta->flags & CMD_ASYNC)) { 1610 if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) { 1611 IWL_WARN(trans, 1612 "HCMD_ACTIVE already clear for command %s\n", 1613 get_cmd_string(trans_pcie, cmd->hdr.cmd)); 1614 } 1615 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1616 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 1617 get_cmd_string(trans_pcie, cmd->hdr.cmd)); 1618 wake_up(&trans_pcie->wait_command_queue); 1619 } 1620 1621 meta->flags = 0; 1622 1623 spin_unlock_bh(&txq->lock); 1624} 1625 1626#define HOST_COMPLETE_TIMEOUT (2 * HZ) 1627 1628static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans, 1629 struct iwl_host_cmd *cmd) 1630{ 1631 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1632 int ret; 1633 1634 /* An asynchronous command can not expect an SKB to be set. */ 1635 if (WARN_ON(cmd->flags & CMD_WANT_SKB)) 1636 return -EINVAL; 1637 1638 ret = iwl_pcie_enqueue_hcmd(trans, cmd); 1639 if (ret < 0) { 1640 IWL_ERR(trans, 1641 "Error sending %s: enqueue_hcmd failed: %d\n", 1642 get_cmd_string(trans_pcie, cmd->id), ret); 1643 return ret; 1644 } 1645 return 0; 1646} 1647 1648static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, 1649 struct iwl_host_cmd *cmd) 1650{ 1651 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1652 int cmd_idx; 1653 int ret; 1654 1655 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", 1656 get_cmd_string(trans_pcie, cmd->id)); 1657 1658 if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE, 1659 &trans->status), 1660 "Command %s: a command is already active!\n", 1661 get_cmd_string(trans_pcie, cmd->id))) 1662 return -EIO; 1663 1664 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", 1665 get_cmd_string(trans_pcie, cmd->id)); 1666 1667 cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd); 1668 if (cmd_idx < 0) { 1669 ret = cmd_idx; 1670 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1671 IWL_ERR(trans, 1672 "Error sending %s: enqueue_hcmd failed: %d\n", 1673 get_cmd_string(trans_pcie, cmd->id), ret); 1674 return ret; 1675 } 1676 1677 ret = wait_event_timeout(trans_pcie->wait_command_queue, 1678 !test_bit(STATUS_SYNC_HCMD_ACTIVE, 1679 &trans->status), 1680 HOST_COMPLETE_TIMEOUT); 1681 if (!ret) { 1682 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; 1683 struct iwl_queue *q = &txq->q; 1684 1685 IWL_ERR(trans, "Error sending %s: time out after %dms.\n", 1686 get_cmd_string(trans_pcie, cmd->id), 1687 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); 1688 1689 IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", 1690 q->read_ptr, q->write_ptr); 1691 1692 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1693 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 1694 get_cmd_string(trans_pcie, cmd->id)); 1695 ret = -ETIMEDOUT; 1696 1697 iwl_force_nmi(trans); 1698 iwl_trans_fw_error(trans); 1699 1700 goto cancel; 1701 } 1702 1703 if (test_bit(STATUS_FW_ERROR, &trans->status)) { 1704 IWL_ERR(trans, "FW error in SYNC CMD %s\n", 1705 get_cmd_string(trans_pcie, cmd->id)); 1706 dump_stack(); 1707 ret = -EIO; 1708 goto cancel; 1709 } 1710 1711 if (!(cmd->flags & CMD_SEND_IN_RFKILL) && 1712 test_bit(STATUS_RFKILL, &trans->status)) { 1713 IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); 1714 ret = -ERFKILL; 1715 goto cancel; 1716 } 1717 1718 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { 1719 IWL_ERR(trans, "Error: Response NULL in '%s'\n", 1720 get_cmd_string(trans_pcie, cmd->id)); 1721 ret = -EIO; 1722 goto cancel; 1723 } 1724 1725 return 0; 1726 1727cancel: 1728 if (cmd->flags & CMD_WANT_SKB) { 1729 /* 1730 * Cancel the CMD_WANT_SKB flag for the cmd in the 1731 * TX cmd queue. Otherwise in case the cmd comes 1732 * in later, it will possibly set an invalid 1733 * address (cmd->meta.source). 1734 */ 1735 trans_pcie->txq[trans_pcie->cmd_queue]. 1736 entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; 1737 } 1738 1739 if (cmd->resp_pkt) { 1740 iwl_free_resp(cmd); 1741 cmd->resp_pkt = NULL; 1742 } 1743 1744 return ret; 1745} 1746 1747int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) 1748{ 1749 if (!(cmd->flags & CMD_SEND_IN_RFKILL) && 1750 test_bit(STATUS_RFKILL, &trans->status)) { 1751 IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", 1752 cmd->id); 1753 return -ERFKILL; 1754 } 1755 1756 if (cmd->flags & CMD_ASYNC) 1757 return iwl_pcie_send_hcmd_async(trans, cmd); 1758 1759 /* We still can fail on RFKILL that can be asserted while we wait */ 1760 return iwl_pcie_send_hcmd_sync(trans, cmd); 1761} 1762 1763int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 1764 struct iwl_device_cmd *dev_cmd, int txq_id) 1765{ 1766 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1767 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1768 struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; 1769 struct iwl_cmd_meta *out_meta; 1770 struct iwl_txq *txq; 1771 struct iwl_queue *q; 1772 dma_addr_t tb0_phys, tb1_phys, scratch_phys; 1773 void *tb1_addr; 1774 u16 len, tb1_len, tb2_len; 1775 bool wait_write_ptr; 1776 __le16 fc = hdr->frame_control; 1777 u8 hdr_len = ieee80211_hdrlen(fc); 1778 u16 wifi_seq; 1779 1780 txq = &trans_pcie->txq[txq_id]; 1781 q = &txq->q; 1782 1783 if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used), 1784 "TX on unused queue %d\n", txq_id)) 1785 return -EINVAL; 1786 1787 spin_lock(&txq->lock); 1788 1789 /* In AGG mode, the index in the ring must correspond to the WiFi 1790 * sequence number. This is a HW requirements to help the SCD to parse 1791 * the BA. 1792 * Check here that the packets are in the right place on the ring. 1793 */ 1794 wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 1795 WARN_ONCE(txq->ampdu && 1796 (wifi_seq & 0xff) != q->write_ptr, 1797 "Q: %d WiFi Seq %d tfdNum %d", 1798 txq_id, wifi_seq, q->write_ptr); 1799 1800 /* Set up driver data for this TFD */ 1801 txq->entries[q->write_ptr].skb = skb; 1802 txq->entries[q->write_ptr].cmd = dev_cmd; 1803 1804 dev_cmd->hdr.sequence = 1805 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | 1806 INDEX_TO_SEQ(q->write_ptr))); 1807 1808 tb0_phys = iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr); 1809 scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) + 1810 offsetof(struct iwl_tx_cmd, scratch); 1811 1812 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); 1813 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); 1814 1815 /* Set up first empty entry in queue's array of Tx/cmd buffers */ 1816 out_meta = &txq->entries[q->write_ptr].meta; 1817 1818 /* 1819 * The second TB (tb1) points to the remainder of the TX command 1820 * and the 802.11 header - dword aligned size 1821 * (This calculation modifies the TX command, so do it before the 1822 * setup of the first TB) 1823 */ 1824 len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + 1825 hdr_len - IWL_HCMD_SCRATCHBUF_SIZE; 1826 tb1_len = ALIGN(len, 4); 1827 1828 /* Tell NIC about any 2-byte padding after MAC header */ 1829 if (tb1_len != len) 1830 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; 1831 1832 /* The first TB points to the scratchbuf data - min_copy bytes */ 1833 memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr, 1834 IWL_HCMD_SCRATCHBUF_SIZE); 1835 iwl_pcie_txq_build_tfd(trans, txq, tb0_phys, 1836 IWL_HCMD_SCRATCHBUF_SIZE, true); 1837 1838 /* there must be data left over for TB1 or this code must be changed */ 1839 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_HCMD_SCRATCHBUF_SIZE); 1840 1841 /* map the data for TB1 */ 1842 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_HCMD_SCRATCHBUF_SIZE; 1843 tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); 1844 if (unlikely(dma_mapping_error(trans->dev, tb1_phys))) 1845 goto out_err; 1846 iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); 1847 1848 /* 1849 * Set up TFD's third entry to point directly to remainder 1850 * of skb, if any (802.11 null frames have no payload). 1851 */ 1852 tb2_len = skb->len - hdr_len; 1853 if (tb2_len > 0) { 1854 dma_addr_t tb2_phys = dma_map_single(trans->dev, 1855 skb->data + hdr_len, 1856 tb2_len, DMA_TO_DEVICE); 1857 if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) { 1858 iwl_pcie_tfd_unmap(trans, out_meta, 1859 &txq->tfds[q->write_ptr]); 1860 goto out_err; 1861 } 1862 iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false); 1863 } 1864 1865 /* Set up entry for this TFD in Tx byte-count array */ 1866 iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len)); 1867 1868 trace_iwlwifi_dev_tx(trans->dev, skb, 1869 &txq->tfds[txq->q.write_ptr], 1870 sizeof(struct iwl_tfd), 1871 &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len, 1872 skb->data + hdr_len, tb2_len); 1873 trace_iwlwifi_dev_tx_data(trans->dev, skb, 1874 skb->data + hdr_len, tb2_len); 1875 1876 wait_write_ptr = ieee80211_has_morefrags(fc); 1877 1878 /* start timer if queue currently empty */ 1879 if (q->read_ptr == q->write_ptr) { 1880 if (txq->wd_timeout) 1881 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 1882 IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id); 1883 iwl_trans_pcie_ref(trans); 1884 } 1885 1886 /* Tell device the write index *just past* this latest filled TFD */ 1887 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr); 1888 if (!wait_write_ptr) 1889 iwl_pcie_txq_inc_wr_ptr(trans, txq); 1890 1891 /* 1892 * At this point the frame is "transmitted" successfully 1893 * and we will get a TX status notification eventually. 1894 */ 1895 if (iwl_queue_space(q) < q->high_mark) { 1896 if (wait_write_ptr) 1897 iwl_pcie_txq_inc_wr_ptr(trans, txq); 1898 else 1899 iwl_stop_queue(trans, txq); 1900 } 1901 spin_unlock(&txq->lock); 1902 return 0; 1903out_err: 1904 spin_unlock(&txq->lock); 1905 return -1; 1906} 1907