This source file includes following definitions.
- iwl_rxq_space
- iwl_pcie_dma_addr2rbd_ptr
- iwl_pcie_rx_stop
- iwl_pcie_rxq_inc_wr_ptr
- iwl_pcie_rxq_check_wrptr
- iwl_pcie_restock_bd
- iwl_pcie_rxmq_restock
- iwl_pcie_rxsq_restock
- iwl_pcie_rxq_restock
- iwl_pcie_rx_alloc_page
- iwl_pcie_rxq_alloc_rbs
- iwl_pcie_free_rbs_pool
- iwl_pcie_rx_allocator
- iwl_pcie_rx_allocator_get
- iwl_pcie_rx_allocator_work
- iwl_pcie_free_bd_size
- iwl_pcie_free_rxq_dma
- iwl_pcie_alloc_rxq_dma
- iwl_pcie_rx_alloc
- iwl_pcie_rx_hw_init
- iwl_pcie_rx_mq_hw_init
- iwl_pcie_rx_init_rxb_lists
- iwl_pcie_dummy_napi_poll
- _iwl_pcie_rx_init
- iwl_pcie_rx_init
- iwl_pcie_gen2_rx_init
- iwl_pcie_rx_free
- iwl_pcie_rx_move_to_allocator
- iwl_pcie_rx_reuse_rbd
- iwl_pcie_rx_handle_rb
- iwl_pcie_get_rxb
- iwl_pcie_rx_handle
- iwl_pcie_get_trans_pcie
- iwl_pcie_irq_rx_msix_handler
- iwl_pcie_irq_handle_error
- iwl_pcie_int_cause_non_ict
- iwl_pcie_int_cause_ict
- iwl_pcie_handle_rfkill_irq
- iwl_pcie_irq_handler
- iwl_pcie_free_ict
- iwl_pcie_alloc_ict
- iwl_pcie_reset_ict
- iwl_pcie_disable_ict
- iwl_pcie_isr
- iwl_pcie_msix_isr
- iwl_pcie_irq_msix_handler
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64 #include <linux/sched.h>
65 #include <linux/wait.h>
66 #include <linux/gfp.h>
67
68 #include "iwl-prph.h"
69 #include "iwl-io.h"
70 #include "internal.h"
71 #include "iwl-op-mode.h"
72 #include "iwl-context-info-gen3.h"
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176 static int iwl_rxq_space(const struct iwl_rxq *rxq)
177 {
178
179 WARN_ON(rxq->queue_size & (rxq->queue_size - 1));
180
181
182
183
184
185
186
187 return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1);
188 }
189
190
191
192
193 static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
194 {
195 return cpu_to_le32((u32)(dma_addr >> 8));
196 }
197
198
199
200
201 int iwl_pcie_rx_stop(struct iwl_trans *trans)
202 {
203 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
204
205 iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0);
206 return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_GEN3,
207 RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
208 } else if (trans->trans_cfg->mq_rx_supported) {
209 iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
210 return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
211 RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
212 } else {
213 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
214 return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
215 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
216 1000);
217 }
218 }
219
220
221
222
223 static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
224 struct iwl_rxq *rxq)
225 {
226 u32 reg;
227
228 lockdep_assert_held(&rxq->lock);
229
230
231
232
233
234
235 if (!trans->trans_cfg->base_params->shadow_reg_enable &&
236 test_bit(STATUS_TPOWER_PMI, &trans->status)) {
237 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
238
239 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
240 IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
241 reg);
242 iwl_set_bit(trans, CSR_GP_CNTRL,
243 BIT(trans->trans_cfg->csr->flag_mac_access_req));
244 rxq->need_update = true;
245 return;
246 }
247 }
248
249 rxq->write_actual = round_down(rxq->write, 8);
250 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22560)
251 iwl_write32(trans, HBUS_TARG_WRPTR,
252 (rxq->write_actual |
253 ((FIRST_RX_QUEUE + rxq->id) << 16)));
254 else if (trans->trans_cfg->mq_rx_supported)
255 iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
256 rxq->write_actual);
257 else
258 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
259 }
260
261 static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
262 {
263 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
264 int i;
265
266 for (i = 0; i < trans->num_rx_queues; i++) {
267 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
268
269 if (!rxq->need_update)
270 continue;
271 spin_lock(&rxq->lock);
272 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
273 rxq->need_update = false;
274 spin_unlock(&rxq->lock);
275 }
276 }
277
278 static void iwl_pcie_restock_bd(struct iwl_trans *trans,
279 struct iwl_rxq *rxq,
280 struct iwl_rx_mem_buffer *rxb)
281 {
282 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
283 struct iwl_rx_transfer_desc *bd = rxq->bd;
284
285 BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64));
286
287 bd[rxq->write].addr = cpu_to_le64(rxb->page_dma);
288 bd[rxq->write].rbid = cpu_to_le16(rxb->vid);
289 } else {
290 __le64 *bd = rxq->bd;
291
292 bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
293 }
294
295 IWL_DEBUG_RX(trans, "Assigned virtual RB ID %u to queue %d index %d\n",
296 (u32)rxb->vid, rxq->id, rxq->write);
297 }
298
299
300
301
302 static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
303 struct iwl_rxq *rxq)
304 {
305 struct iwl_rx_mem_buffer *rxb;
306
307
308
309
310
311
312
313
314
315 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
316 return;
317
318 spin_lock(&rxq->lock);
319 while (rxq->free_count) {
320
321 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
322 list);
323 list_del(&rxb->list);
324 rxb->invalid = false;
325
326 WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
327
328 iwl_pcie_restock_bd(trans, rxq, rxb);
329 rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK;
330 rxq->free_count--;
331 }
332 spin_unlock(&rxq->lock);
333
334
335
336
337
338 if (rxq->write_actual != (rxq->write & ~0x7)) {
339 spin_lock(&rxq->lock);
340 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
341 spin_unlock(&rxq->lock);
342 }
343 }
344
345
346
347
348 static void iwl_pcie_rxsq_restock(struct iwl_trans *trans,
349 struct iwl_rxq *rxq)
350 {
351 struct iwl_rx_mem_buffer *rxb;
352
353
354
355
356
357
358
359
360
361 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
362 return;
363
364 spin_lock(&rxq->lock);
365 while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
366 __le32 *bd = (__le32 *)rxq->bd;
367
368 rxb = rxq->queue[rxq->write];
369 BUG_ON(rxb && rxb->page);
370
371
372 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
373 list);
374 list_del(&rxb->list);
375 rxb->invalid = false;
376
377
378 bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
379 rxq->queue[rxq->write] = rxb;
380 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
381 rxq->free_count--;
382 }
383 spin_unlock(&rxq->lock);
384
385
386
387 if (rxq->write_actual != (rxq->write & ~0x7)) {
388 spin_lock(&rxq->lock);
389 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
390 spin_unlock(&rxq->lock);
391 }
392 }
393
394
395
396
397
398
399
400
401
402
403
404
405 static
406 void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
407 {
408 if (trans->trans_cfg->mq_rx_supported)
409 iwl_pcie_rxmq_restock(trans, rxq);
410 else
411 iwl_pcie_rxsq_restock(trans, rxq);
412 }
413
414
415
416
417
418 static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
419 gfp_t priority)
420 {
421 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
422 struct page *page;
423 gfp_t gfp_mask = priority;
424
425 if (trans_pcie->rx_page_order > 0)
426 gfp_mask |= __GFP_COMP;
427
428
429 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
430 if (!page) {
431 if (net_ratelimit())
432 IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
433 trans_pcie->rx_page_order);
434
435
436
437
438 if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit())
439 IWL_CRIT(trans,
440 "Failed to alloc_pages\n");
441 return NULL;
442 }
443 return page;
444 }
445
446
447
448
449
450
451
452
453
454
455 void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
456 struct iwl_rxq *rxq)
457 {
458 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
459 struct iwl_rx_mem_buffer *rxb;
460 struct page *page;
461
462 while (1) {
463 spin_lock(&rxq->lock);
464 if (list_empty(&rxq->rx_used)) {
465 spin_unlock(&rxq->lock);
466 return;
467 }
468 spin_unlock(&rxq->lock);
469
470
471 page = iwl_pcie_rx_alloc_page(trans, priority);
472 if (!page)
473 return;
474
475 spin_lock(&rxq->lock);
476
477 if (list_empty(&rxq->rx_used)) {
478 spin_unlock(&rxq->lock);
479 __free_pages(page, trans_pcie->rx_page_order);
480 return;
481 }
482 rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
483 list);
484 list_del(&rxb->list);
485 spin_unlock(&rxq->lock);
486
487 BUG_ON(rxb->page);
488 rxb->page = page;
489
490 rxb->page_dma =
491 dma_map_page(trans->dev, page, 0,
492 PAGE_SIZE << trans_pcie->rx_page_order,
493 DMA_FROM_DEVICE);
494 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
495 rxb->page = NULL;
496 spin_lock(&rxq->lock);
497 list_add(&rxb->list, &rxq->rx_used);
498 spin_unlock(&rxq->lock);
499 __free_pages(page, trans_pcie->rx_page_order);
500 return;
501 }
502
503 spin_lock(&rxq->lock);
504
505 list_add_tail(&rxb->list, &rxq->rx_free);
506 rxq->free_count++;
507
508 spin_unlock(&rxq->lock);
509 }
510 }
511
512 void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
513 {
514 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
515 int i;
516
517 for (i = 0; i < RX_POOL_SIZE; i++) {
518 if (!trans_pcie->rx_pool[i].page)
519 continue;
520 dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
521 PAGE_SIZE << trans_pcie->rx_page_order,
522 DMA_FROM_DEVICE);
523 __free_pages(trans_pcie->rx_pool[i].page,
524 trans_pcie->rx_page_order);
525 trans_pcie->rx_pool[i].page = NULL;
526 }
527 }
528
529
530
531
532
533
534
535 static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
536 {
537 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
538 struct iwl_rb_allocator *rba = &trans_pcie->rba;
539 struct list_head local_empty;
540 int pending = atomic_read(&rba->req_pending);
541
542 IWL_DEBUG_TPT(trans, "Pending allocation requests = %d\n", pending);
543
544
545 spin_lock(&rba->lock);
546
547 list_replace_init(&rba->rbd_empty, &local_empty);
548 spin_unlock(&rba->lock);
549
550 while (pending) {
551 int i;
552 LIST_HEAD(local_allocated);
553 gfp_t gfp_mask = GFP_KERNEL;
554
555
556 if (pending < RX_PENDING_WATERMARK)
557 gfp_mask |= __GFP_NOWARN;
558
559 for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
560 struct iwl_rx_mem_buffer *rxb;
561 struct page *page;
562
563
564
565
566
567
568 BUG_ON(list_empty(&local_empty));
569
570 rxb = list_first_entry(&local_empty,
571 struct iwl_rx_mem_buffer, list);
572 BUG_ON(rxb->page);
573
574
575 page = iwl_pcie_rx_alloc_page(trans, gfp_mask);
576 if (!page)
577 continue;
578 rxb->page = page;
579
580
581 rxb->page_dma = dma_map_page(trans->dev, page, 0,
582 PAGE_SIZE << trans_pcie->rx_page_order,
583 DMA_FROM_DEVICE);
584 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
585 rxb->page = NULL;
586 __free_pages(page, trans_pcie->rx_page_order);
587 continue;
588 }
589
590
591 list_move(&rxb->list, &local_allocated);
592 i++;
593 }
594
595 atomic_dec(&rba->req_pending);
596 pending--;
597
598 if (!pending) {
599 pending = atomic_read(&rba->req_pending);
600 if (pending)
601 IWL_DEBUG_TPT(trans,
602 "Got more pending allocation requests = %d\n",
603 pending);
604 }
605
606 spin_lock(&rba->lock);
607
608 list_splice_tail(&local_allocated, &rba->rbd_allocated);
609
610 list_splice_tail_init(&rba->rbd_empty, &local_empty);
611 spin_unlock(&rba->lock);
612
613 atomic_inc(&rba->req_ready);
614
615 }
616
617 spin_lock(&rba->lock);
618
619 list_splice_tail(&local_empty, &rba->rbd_empty);
620 spin_unlock(&rba->lock);
621
622 IWL_DEBUG_TPT(trans, "%s, exit.\n", __func__);
623 }
624
625
626
627
628
629
630
631
632
633 static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
634 struct iwl_rxq *rxq)
635 {
636 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
637 struct iwl_rb_allocator *rba = &trans_pcie->rba;
638 int i;
639
640 lockdep_assert_held(&rxq->lock);
641
642
643
644
645
646
647
648
649
650 if (atomic_dec_if_positive(&rba->req_ready) < 0)
651 return;
652
653 spin_lock(&rba->lock);
654 for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
655
656 struct iwl_rx_mem_buffer *rxb =
657 list_first_entry(&rba->rbd_allocated,
658 struct iwl_rx_mem_buffer, list);
659
660 list_move(&rxb->list, &rxq->rx_free);
661 }
662 spin_unlock(&rba->lock);
663
664 rxq->used_count -= RX_CLAIM_REQ_ALLOC;
665 rxq->free_count += RX_CLAIM_REQ_ALLOC;
666 }
667
668 void iwl_pcie_rx_allocator_work(struct work_struct *data)
669 {
670 struct iwl_rb_allocator *rba_p =
671 container_of(data, struct iwl_rb_allocator, rx_alloc);
672 struct iwl_trans_pcie *trans_pcie =
673 container_of(rba_p, struct iwl_trans_pcie, rba);
674
675 iwl_pcie_rx_allocator(trans_pcie->trans);
676 }
677
678 static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td)
679 {
680 struct iwl_rx_transfer_desc *rx_td;
681
682 if (use_rx_td)
683 return sizeof(*rx_td);
684 else
685 return trans->trans_cfg->mq_rx_supported ? sizeof(__le64) :
686 sizeof(__le32);
687 }
688
689 static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
690 struct iwl_rxq *rxq)
691 {
692 struct device *dev = trans->dev;
693 bool use_rx_td = (trans->trans_cfg->device_family >=
694 IWL_DEVICE_FAMILY_22560);
695 int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
696
697 if (rxq->bd)
698 dma_free_coherent(trans->dev,
699 free_size * rxq->queue_size,
700 rxq->bd, rxq->bd_dma);
701 rxq->bd_dma = 0;
702 rxq->bd = NULL;
703
704 rxq->rb_stts_dma = 0;
705 rxq->rb_stts = NULL;
706
707 if (rxq->used_bd)
708 dma_free_coherent(trans->dev,
709 (use_rx_td ? sizeof(*rxq->cd) :
710 sizeof(__le32)) * rxq->queue_size,
711 rxq->used_bd, rxq->used_bd_dma);
712 rxq->used_bd_dma = 0;
713 rxq->used_bd = NULL;
714
715 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_22560)
716 return;
717
718 if (rxq->tr_tail)
719 dma_free_coherent(dev, sizeof(__le16),
720 rxq->tr_tail, rxq->tr_tail_dma);
721 rxq->tr_tail_dma = 0;
722 rxq->tr_tail = NULL;
723
724 if (rxq->cr_tail)
725 dma_free_coherent(dev, sizeof(__le16),
726 rxq->cr_tail, rxq->cr_tail_dma);
727 rxq->cr_tail_dma = 0;
728 rxq->cr_tail = NULL;
729 }
730
731 static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
732 struct iwl_rxq *rxq)
733 {
734 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
735 struct device *dev = trans->dev;
736 int i;
737 int free_size;
738 bool use_rx_td = (trans->trans_cfg->device_family >=
739 IWL_DEVICE_FAMILY_22560);
740 size_t rb_stts_size = use_rx_td ? sizeof(__le16) :
741 sizeof(struct iwl_rb_status);
742
743 spin_lock_init(&rxq->lock);
744 if (trans->trans_cfg->mq_rx_supported)
745 rxq->queue_size = MQ_RX_TABLE_SIZE;
746 else
747 rxq->queue_size = RX_QUEUE_SIZE;
748
749 free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
750
751
752
753
754
755 rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size,
756 &rxq->bd_dma, GFP_KERNEL);
757 if (!rxq->bd)
758 goto err;
759
760 if (trans->trans_cfg->mq_rx_supported) {
761 rxq->used_bd = dma_alloc_coherent(dev,
762 (use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size,
763 &rxq->used_bd_dma,
764 GFP_KERNEL);
765 if (!rxq->used_bd)
766 goto err;
767 }
768
769 rxq->rb_stts = trans_pcie->base_rb_stts + rxq->id * rb_stts_size;
770 rxq->rb_stts_dma =
771 trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size;
772
773 if (!use_rx_td)
774 return 0;
775
776
777 rxq->tr_tail = dma_alloc_coherent(dev, sizeof(__le16),
778 &rxq->tr_tail_dma, GFP_KERNEL);
779 if (!rxq->tr_tail)
780 goto err;
781
782
783 rxq->cr_tail = dma_alloc_coherent(dev, sizeof(__le16),
784 &rxq->cr_tail_dma, GFP_KERNEL);
785 if (!rxq->cr_tail)
786 goto err;
787
788
789
790
791 *rxq->cr_tail = cpu_to_le16(500);
792
793 return 0;
794
795 err:
796 for (i = 0; i < trans->num_rx_queues; i++) {
797 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
798
799 iwl_pcie_free_rxq_dma(trans, rxq);
800 }
801
802 return -ENOMEM;
803 }
804
805 int iwl_pcie_rx_alloc(struct iwl_trans *trans)
806 {
807 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
808 struct iwl_rb_allocator *rba = &trans_pcie->rba;
809 int i, ret;
810 size_t rb_stts_size = trans->trans_cfg->device_family >=
811 IWL_DEVICE_FAMILY_22560 ?
812 sizeof(__le16) : sizeof(struct iwl_rb_status);
813
814 if (WARN_ON(trans_pcie->rxq))
815 return -EINVAL;
816
817 trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
818 GFP_KERNEL);
819 if (!trans_pcie->rxq)
820 return -ENOMEM;
821
822 spin_lock_init(&rba->lock);
823
824
825
826
827
828 trans_pcie->base_rb_stts =
829 dma_alloc_coherent(trans->dev,
830 rb_stts_size * trans->num_rx_queues,
831 &trans_pcie->base_rb_stts_dma,
832 GFP_KERNEL);
833 if (!trans_pcie->base_rb_stts) {
834 ret = -ENOMEM;
835 goto err;
836 }
837
838 for (i = 0; i < trans->num_rx_queues; i++) {
839 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
840
841 rxq->id = i;
842 ret = iwl_pcie_alloc_rxq_dma(trans, rxq);
843 if (ret)
844 goto err;
845 }
846 return 0;
847
848 err:
849 if (trans_pcie->base_rb_stts) {
850 dma_free_coherent(trans->dev,
851 rb_stts_size * trans->num_rx_queues,
852 trans_pcie->base_rb_stts,
853 trans_pcie->base_rb_stts_dma);
854 trans_pcie->base_rb_stts = NULL;
855 trans_pcie->base_rb_stts_dma = 0;
856 }
857 kfree(trans_pcie->rxq);
858
859 return ret;
860 }
861
862 static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
863 {
864 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
865 u32 rb_size;
866 unsigned long flags;
867 const u32 rfdnlog = RX_QUEUE_SIZE_LOG;
868
869 switch (trans_pcie->rx_buf_size) {
870 case IWL_AMSDU_4K:
871 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
872 break;
873 case IWL_AMSDU_8K:
874 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
875 break;
876 case IWL_AMSDU_12K:
877 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K;
878 break;
879 default:
880 WARN_ON(1);
881 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
882 }
883
884 if (!iwl_trans_grab_nic_access(trans, &flags))
885 return;
886
887
888 iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
889
890 iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
891 iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
892 iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
893
894
895 iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
896
897
898 iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
899 (u32)(rxq->bd_dma >> 8));
900
901
902 iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
903 rxq->rb_stts_dma >> 4);
904
905
906
907
908
909
910
911
912
913 iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
914 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
915 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
916 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
917 rb_size |
918 (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
919 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
920
921 iwl_trans_release_nic_access(trans, &flags);
922
923
924 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
925
926
927 if (trans->cfg->host_interrupt_operation_mode)
928 iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
929 }
930
931 static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
932 {
933 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
934 u32 rb_size, enabled = 0;
935 unsigned long flags;
936 int i;
937
938 switch (trans_pcie->rx_buf_size) {
939 case IWL_AMSDU_2K:
940 rb_size = RFH_RXF_DMA_RB_SIZE_2K;
941 break;
942 case IWL_AMSDU_4K:
943 rb_size = RFH_RXF_DMA_RB_SIZE_4K;
944 break;
945 case IWL_AMSDU_8K:
946 rb_size = RFH_RXF_DMA_RB_SIZE_8K;
947 break;
948 case IWL_AMSDU_12K:
949 rb_size = RFH_RXF_DMA_RB_SIZE_12K;
950 break;
951 default:
952 WARN_ON(1);
953 rb_size = RFH_RXF_DMA_RB_SIZE_4K;
954 }
955
956 if (!iwl_trans_grab_nic_access(trans, &flags))
957 return;
958
959
960 iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0);
961
962 iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0);
963
964 for (i = 0; i < trans->num_rx_queues; i++) {
965
966 iwl_write_prph64_no_grab(trans,
967 RFH_Q_FRBDCB_BA_LSB(i),
968 trans_pcie->rxq[i].bd_dma);
969
970 iwl_write_prph64_no_grab(trans,
971 RFH_Q_URBDCB_BA_LSB(i),
972 trans_pcie->rxq[i].used_bd_dma);
973
974 iwl_write_prph64_no_grab(trans,
975 RFH_Q_URBD_STTS_WPTR_LSB(i),
976 trans_pcie->rxq[i].rb_stts_dma);
977
978 iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0);
979 iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0);
980 iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0);
981
982 enabled |= BIT(i) | BIT(i + 16);
983 }
984
985
986
987
988
989
990
991
992 iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG,
993 RFH_DMA_EN_ENABLE_VAL | rb_size |
994 RFH_RXF_DMA_MIN_RB_4_8 |
995 RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
996 RFH_RXF_DMA_RBDCB_SIZE_512);
997
998
999
1000
1001
1002
1003 iwl_write_prph_no_grab(trans, RFH_GEN_CFG,
1004 RFH_GEN_CFG_RFH_DMA_SNOOP |
1005 RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) |
1006 RFH_GEN_CFG_SERVICE_DMA_SNOOP |
1007 RFH_GEN_CFG_VAL(RB_CHUNK_SIZE,
1008 trans->cfg->integrated ?
1009 RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
1010 RFH_GEN_CFG_RB_CHUNK_SIZE_128));
1011
1012 iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled);
1013
1014 iwl_trans_release_nic_access(trans, &flags);
1015
1016
1017 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
1018 }
1019
1020 void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
1021 {
1022 lockdep_assert_held(&rxq->lock);
1023
1024 INIT_LIST_HEAD(&rxq->rx_free);
1025 INIT_LIST_HEAD(&rxq->rx_used);
1026 rxq->free_count = 0;
1027 rxq->used_count = 0;
1028 }
1029
1030 int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
1031 {
1032 WARN_ON(1);
1033 return 0;
1034 }
1035
1036 int _iwl_pcie_rx_init(struct iwl_trans *trans)
1037 {
1038 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1039 struct iwl_rxq *def_rxq;
1040 struct iwl_rb_allocator *rba = &trans_pcie->rba;
1041 int i, err, queue_size, allocator_pool_size, num_alloc;
1042
1043 if (!trans_pcie->rxq) {
1044 err = iwl_pcie_rx_alloc(trans);
1045 if (err)
1046 return err;
1047 }
1048 def_rxq = trans_pcie->rxq;
1049
1050 cancel_work_sync(&rba->rx_alloc);
1051
1052 spin_lock(&rba->lock);
1053 atomic_set(&rba->req_pending, 0);
1054 atomic_set(&rba->req_ready, 0);
1055 INIT_LIST_HEAD(&rba->rbd_allocated);
1056 INIT_LIST_HEAD(&rba->rbd_empty);
1057 spin_unlock(&rba->lock);
1058
1059
1060 iwl_pcie_free_rbs_pool(trans);
1061
1062 for (i = 0; i < RX_QUEUE_SIZE; i++)
1063 def_rxq->queue[i] = NULL;
1064
1065 for (i = 0; i < trans->num_rx_queues; i++) {
1066 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1067
1068 spin_lock(&rxq->lock);
1069
1070
1071
1072
1073
1074 rxq->read = 0;
1075 rxq->write = 0;
1076 rxq->write_actual = 0;
1077 memset(rxq->rb_stts, 0, (trans->trans_cfg->device_family >=
1078 IWL_DEVICE_FAMILY_22560) ?
1079 sizeof(__le16) : sizeof(struct iwl_rb_status));
1080
1081 iwl_pcie_rx_init_rxb_lists(rxq);
1082
1083 if (!rxq->napi.poll)
1084 netif_napi_add(&trans_pcie->napi_dev, &rxq->napi,
1085 iwl_pcie_dummy_napi_poll, 64);
1086
1087 spin_unlock(&rxq->lock);
1088 }
1089
1090
1091 queue_size = trans->trans_cfg->mq_rx_supported ?
1092 MQ_RX_NUM_RBDS : RX_QUEUE_SIZE;
1093 allocator_pool_size = trans->num_rx_queues *
1094 (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
1095 num_alloc = queue_size + allocator_pool_size;
1096 BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) !=
1097 ARRAY_SIZE(trans_pcie->rx_pool));
1098 for (i = 0; i < num_alloc; i++) {
1099 struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
1100
1101 if (i < allocator_pool_size)
1102 list_add(&rxb->list, &rba->rbd_empty);
1103 else
1104 list_add(&rxb->list, &def_rxq->rx_used);
1105 trans_pcie->global_table[i] = rxb;
1106 rxb->vid = (u16)(i + 1);
1107 rxb->invalid = true;
1108 }
1109
1110 iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);
1111
1112 return 0;
1113 }
1114
1115 int iwl_pcie_rx_init(struct iwl_trans *trans)
1116 {
1117 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1118 int ret = _iwl_pcie_rx_init(trans);
1119
1120 if (ret)
1121 return ret;
1122
1123 if (trans->trans_cfg->mq_rx_supported)
1124 iwl_pcie_rx_mq_hw_init(trans);
1125 else
1126 iwl_pcie_rx_hw_init(trans, trans_pcie->rxq);
1127
1128 iwl_pcie_rxq_restock(trans, trans_pcie->rxq);
1129
1130 spin_lock(&trans_pcie->rxq->lock);
1131 iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq);
1132 spin_unlock(&trans_pcie->rxq->lock);
1133
1134 return 0;
1135 }
1136
1137 int iwl_pcie_gen2_rx_init(struct iwl_trans *trans)
1138 {
1139
1140 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
1141
1142
1143
1144
1145
1146 return _iwl_pcie_rx_init(trans);
1147 }
1148
1149 void iwl_pcie_rx_free(struct iwl_trans *trans)
1150 {
1151 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1152 struct iwl_rb_allocator *rba = &trans_pcie->rba;
1153 int i;
1154 size_t rb_stts_size = trans->trans_cfg->device_family >=
1155 IWL_DEVICE_FAMILY_22560 ?
1156 sizeof(__le16) : sizeof(struct iwl_rb_status);
1157
1158
1159
1160
1161
1162 if (!trans_pcie->rxq) {
1163 IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
1164 return;
1165 }
1166
1167 cancel_work_sync(&rba->rx_alloc);
1168
1169 iwl_pcie_free_rbs_pool(trans);
1170
1171 if (trans_pcie->base_rb_stts) {
1172 dma_free_coherent(trans->dev,
1173 rb_stts_size * trans->num_rx_queues,
1174 trans_pcie->base_rb_stts,
1175 trans_pcie->base_rb_stts_dma);
1176 trans_pcie->base_rb_stts = NULL;
1177 trans_pcie->base_rb_stts_dma = 0;
1178 }
1179
1180 for (i = 0; i < trans->num_rx_queues; i++) {
1181 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1182
1183 iwl_pcie_free_rxq_dma(trans, rxq);
1184
1185 if (rxq->napi.poll)
1186 netif_napi_del(&rxq->napi);
1187 }
1188 kfree(trans_pcie->rxq);
1189 }
1190
1191 static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq,
1192 struct iwl_rb_allocator *rba)
1193 {
1194 spin_lock(&rba->lock);
1195 list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
1196 spin_unlock(&rba->lock);
1197 }
1198
1199
1200
1201
1202
1203
1204
1205 static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
1206 struct iwl_rx_mem_buffer *rxb,
1207 struct iwl_rxq *rxq, bool emergency)
1208 {
1209 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1210 struct iwl_rb_allocator *rba = &trans_pcie->rba;
1211
1212
1213
1214 list_add_tail(&rxb->list, &rxq->rx_used);
1215
1216 if (unlikely(emergency))
1217 return;
1218
1219
1220 rxq->used_count++;
1221
1222
1223
1224
1225
1226
1227 if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
1228
1229
1230 iwl_pcie_rx_move_to_allocator(rxq, rba);
1231
1232 atomic_inc(&rba->req_pending);
1233 queue_work(rba->alloc_wq, &rba->rx_alloc);
1234 }
1235 }
1236
1237 static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
1238 struct iwl_rxq *rxq,
1239 struct iwl_rx_mem_buffer *rxb,
1240 bool emergency,
1241 int i)
1242 {
1243 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1244 struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
1245 bool page_stolen = false;
1246 int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
1247 u32 offset = 0;
1248
1249 if (WARN_ON(!rxb))
1250 return;
1251
1252 dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
1253
1254 while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
1255 struct iwl_rx_packet *pkt;
1256 u16 sequence;
1257 bool reclaim;
1258 int index, cmd_index, len;
1259 struct iwl_rx_cmd_buffer rxcb = {
1260 ._offset = offset,
1261 ._rx_page_order = trans_pcie->rx_page_order,
1262 ._page = rxb->page,
1263 ._page_stolen = false,
1264 .truesize = max_len,
1265 };
1266
1267 pkt = rxb_addr(&rxcb);
1268
1269 if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) {
1270 IWL_DEBUG_RX(trans,
1271 "Q %d: RB end marker at offset %d\n",
1272 rxq->id, offset);
1273 break;
1274 }
1275
1276 WARN((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
1277 FH_RSCSR_RXQ_POS != rxq->id,
1278 "frame on invalid queue - is on %d and indicates %d\n",
1279 rxq->id,
1280 (le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
1281 FH_RSCSR_RXQ_POS);
1282
1283 IWL_DEBUG_RX(trans,
1284 "Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n",
1285 rxq->id, offset,
1286 iwl_get_cmd_string(trans,
1287 iwl_cmd_id(pkt->hdr.cmd,
1288 pkt->hdr.group_id,
1289 0)),
1290 pkt->hdr.group_id, pkt->hdr.cmd,
1291 le16_to_cpu(pkt->hdr.sequence));
1292
1293 len = iwl_rx_packet_len(pkt);
1294 len += sizeof(u32);
1295 trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
1296 trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
1297
1298
1299
1300
1301
1302
1303
1304 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
1305 if (reclaim && !pkt->hdr.group_id) {
1306 int i;
1307
1308 for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
1309 if (trans_pcie->no_reclaim_cmds[i] ==
1310 pkt->hdr.cmd) {
1311 reclaim = false;
1312 break;
1313 }
1314 }
1315 }
1316
1317 sequence = le16_to_cpu(pkt->hdr.sequence);
1318 index = SEQ_TO_INDEX(sequence);
1319 cmd_index = iwl_pcie_get_cmd_index(txq, index);
1320
1321 if (rxq->id == trans_pcie->def_rx_queue)
1322 iwl_op_mode_rx(trans->op_mode, &rxq->napi,
1323 &rxcb);
1324 else
1325 iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi,
1326 &rxcb, rxq->id);
1327
1328 if (reclaim) {
1329 kzfree(txq->entries[cmd_index].free_buf);
1330 txq->entries[cmd_index].free_buf = NULL;
1331 }
1332
1333
1334
1335
1336
1337
1338 if (reclaim) {
1339
1340
1341
1342
1343 if (!rxcb._page_stolen)
1344 iwl_pcie_hcmd_complete(trans, &rxcb);
1345 else
1346 IWL_WARN(trans, "Claim null rxb?\n");
1347 }
1348
1349 page_stolen |= rxcb._page_stolen;
1350 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560)
1351 break;
1352 offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
1353 }
1354
1355
1356 if (page_stolen) {
1357 __free_pages(rxb->page, trans_pcie->rx_page_order);
1358 rxb->page = NULL;
1359 }
1360
1361
1362
1363
1364 if (rxb->page != NULL) {
1365 rxb->page_dma =
1366 dma_map_page(trans->dev, rxb->page, 0,
1367 PAGE_SIZE << trans_pcie->rx_page_order,
1368 DMA_FROM_DEVICE);
1369 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
1370
1371
1372
1373
1374
1375 __free_pages(rxb->page, trans_pcie->rx_page_order);
1376 rxb->page = NULL;
1377 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1378 } else {
1379 list_add_tail(&rxb->list, &rxq->rx_free);
1380 rxq->free_count++;
1381 }
1382 } else
1383 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1384 }
1385
1386 static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
1387 struct iwl_rxq *rxq, int i)
1388 {
1389 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1390 struct iwl_rx_mem_buffer *rxb;
1391 u16 vid;
1392
1393 BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32);
1394
1395 if (!trans->trans_cfg->mq_rx_supported) {
1396 rxb = rxq->queue[i];
1397 rxq->queue[i] = NULL;
1398 return rxb;
1399 }
1400
1401
1402 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560)
1403 vid = le16_to_cpu(rxq->cd[i].rbid) & 0x0FFF;
1404 else
1405 vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF;
1406
1407 if (!vid || vid > ARRAY_SIZE(trans_pcie->global_table))
1408 goto out_err;
1409
1410 rxb = trans_pcie->global_table[vid - 1];
1411 if (rxb->invalid)
1412 goto out_err;
1413
1414 IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (u32)rxb->vid);
1415
1416 rxb->invalid = true;
1417
1418 return rxb;
1419
1420 out_err:
1421 WARN(1, "Invalid rxb from HW %u\n", (u32)vid);
1422 iwl_force_nmi(trans);
1423 return NULL;
1424 }
1425
1426
1427
1428
1429 static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
1430 {
1431 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1432 struct napi_struct *napi;
1433 struct iwl_rxq *rxq;
1434 u32 r, i, count = 0;
1435 bool emergency = false;
1436
1437 if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd))
1438 return;
1439
1440 rxq = &trans_pcie->rxq[queue];
1441
1442 restart:
1443 spin_lock(&rxq->lock);
1444
1445
1446 r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
1447 i = rxq->read;
1448
1449
1450 r &= (rxq->queue_size - 1);
1451
1452
1453 if (i == r)
1454 IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
1455
1456 while (i != r) {
1457 struct iwl_rb_allocator *rba = &trans_pcie->rba;
1458 struct iwl_rx_mem_buffer *rxb;
1459
1460 u32 rb_pending_alloc =
1461 atomic_read(&trans_pcie->rba.req_pending) *
1462 RX_CLAIM_REQ_ALLOC;
1463
1464 if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 &&
1465 !emergency)) {
1466 iwl_pcie_rx_move_to_allocator(rxq, rba);
1467 emergency = true;
1468 IWL_DEBUG_TPT(trans,
1469 "RX path is in emergency. Pending allocations %d\n",
1470 rb_pending_alloc);
1471 }
1472
1473 IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
1474
1475 rxb = iwl_pcie_get_rxb(trans, rxq, i);
1476 if (!rxb)
1477 goto out;
1478
1479 iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i);
1480
1481 i = (i + 1) & (rxq->queue_size - 1);
1482
1483
1484
1485
1486
1487
1488
1489
1490 if (rxq->used_count >= RX_CLAIM_REQ_ALLOC)
1491 iwl_pcie_rx_allocator_get(trans, rxq);
1492
1493 if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {
1494
1495 iwl_pcie_rx_move_to_allocator(rxq, rba);
1496 } else if (emergency) {
1497 count++;
1498 if (count == 8) {
1499 count = 0;
1500 if (rb_pending_alloc < rxq->queue_size / 3) {
1501 IWL_DEBUG_TPT(trans,
1502 "RX path exited emergency. Pending allocations %d\n",
1503 rb_pending_alloc);
1504 emergency = false;
1505 }
1506
1507 rxq->read = i;
1508 spin_unlock(&rxq->lock);
1509 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1510 iwl_pcie_rxq_restock(trans, rxq);
1511 goto restart;
1512 }
1513 }
1514 }
1515 out:
1516
1517 rxq->read = i;
1518
1519 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560)
1520 *rxq->cr_tail = cpu_to_le16(r);
1521 spin_unlock(&rxq->lock);
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535 if (unlikely(emergency && count))
1536 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1537
1538 napi = &rxq->napi;
1539 if (napi->poll) {
1540 napi_gro_flush(napi, false);
1541
1542 if (napi->rx_count) {
1543 netif_receive_skb_list(&napi->rx_list);
1544 INIT_LIST_HEAD(&napi->rx_list);
1545 napi->rx_count = 0;
1546 }
1547 }
1548
1549 iwl_pcie_rxq_restock(trans, rxq);
1550 }
1551
1552 static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
1553 {
1554 u8 queue = entry->entry;
1555 struct msix_entry *entries = entry - queue;
1556
1557 return container_of(entries, struct iwl_trans_pcie, msix_entries[0]);
1558 }
1559
1560
1561
1562
1563
1564 irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
1565 {
1566 struct msix_entry *entry = dev_id;
1567 struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
1568 struct iwl_trans *trans = trans_pcie->trans;
1569
1570 trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0);
1571
1572 if (WARN_ON(entry->entry >= trans->num_rx_queues))
1573 return IRQ_NONE;
1574
1575 lock_map_acquire(&trans->sync_cmd_lockdep_map);
1576
1577 local_bh_disable();
1578 iwl_pcie_rx_handle(trans, entry->entry);
1579 local_bh_enable();
1580
1581 iwl_pcie_clear_irq(trans, entry);
1582
1583 lock_map_release(&trans->sync_cmd_lockdep_map);
1584
1585 return IRQ_HANDLED;
1586 }
1587
1588
1589
1590
1591 static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
1592 {
1593 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1594 int i;
1595
1596
1597 if (trans->cfg->internal_wimax_coex &&
1598 !trans->cfg->apmg_not_supported &&
1599 (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
1600 APMS_CLK_VAL_MRB_FUNC_MODE) ||
1601 (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
1602 APMG_PS_CTRL_VAL_RESET_REQ))) {
1603 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1604 iwl_op_mode_wimax_active(trans->op_mode);
1605 wake_up(&trans_pcie->wait_command_queue);
1606 return;
1607 }
1608
1609 for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
1610 if (!trans_pcie->txq[i])
1611 continue;
1612 del_timer(&trans_pcie->txq[i]->stuck_timer);
1613 }
1614
1615
1616
1617 iwl_trans_fw_error(trans);
1618
1619 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1620 wake_up(&trans_pcie->wait_command_queue);
1621 }
1622
1623 static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
1624 {
1625 u32 inta;
1626
1627 lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
1628
1629 trace_iwlwifi_dev_irq(trans->dev);
1630
1631
1632 inta = iwl_read32(trans, CSR_INT);
1633
1634
1635 return inta;
1636 }
1637
1638
1639 #define ICT_SHIFT 12
1640 #define ICT_SIZE (1 << ICT_SHIFT)
1641 #define ICT_COUNT (ICT_SIZE / sizeof(u32))
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651 static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
1652 {
1653 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1654 u32 inta;
1655 u32 val = 0;
1656 u32 read;
1657
1658 trace_iwlwifi_dev_irq(trans->dev);
1659
1660
1661
1662
1663 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1664 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
1665 if (!read)
1666 return 0;
1667
1668
1669
1670
1671
1672 do {
1673 val |= read;
1674 IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
1675 trans_pcie->ict_index, read);
1676 trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
1677 trans_pcie->ict_index =
1678 ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
1679
1680 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1681 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
1682 read);
1683 } while (read);
1684
1685
1686 if (val == 0xffffffff)
1687 val = 0;
1688
1689
1690
1691
1692
1693
1694
1695
1696 if (val & 0xC0000)
1697 val |= 0x8000;
1698
1699 inta = (0xff & val) | ((0xff00 & val) << 16);
1700 return inta;
1701 }
1702
1703 void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans)
1704 {
1705 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1706 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1707 bool hw_rfkill, prev, report;
1708
1709 mutex_lock(&trans_pcie->mutex);
1710 prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1711 hw_rfkill = iwl_is_rfkill_set(trans);
1712 if (hw_rfkill) {
1713 set_bit(STATUS_RFKILL_OPMODE, &trans->status);
1714 set_bit(STATUS_RFKILL_HW, &trans->status);
1715 }
1716 if (trans_pcie->opmode_down)
1717 report = hw_rfkill;
1718 else
1719 report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1720
1721 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
1722 hw_rfkill ? "disable radio" : "enable radio");
1723
1724 isr_stats->rfkill++;
1725
1726 if (prev != report)
1727 iwl_trans_pcie_rf_kill(trans, report);
1728 mutex_unlock(&trans_pcie->mutex);
1729
1730 if (hw_rfkill) {
1731 if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
1732 &trans->status))
1733 IWL_DEBUG_RF_KILL(trans,
1734 "Rfkill while SYNC HCMD in flight\n");
1735 wake_up(&trans_pcie->wait_command_queue);
1736 } else {
1737 clear_bit(STATUS_RFKILL_HW, &trans->status);
1738 if (trans_pcie->opmode_down)
1739 clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
1740 }
1741 }
1742
1743 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
1744 {
1745 struct iwl_trans *trans = dev_id;
1746 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1747 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1748 u32 inta = 0;
1749 u32 handled = 0;
1750
1751 lock_map_acquire(&trans->sync_cmd_lockdep_map);
1752
1753 spin_lock(&trans_pcie->irq_lock);
1754
1755
1756
1757
1758 if (likely(trans_pcie->use_ict))
1759 inta = iwl_pcie_int_cause_ict(trans);
1760 else
1761 inta = iwl_pcie_int_cause_non_ict(trans);
1762
1763 if (iwl_have_debug_level(IWL_DL_ISR)) {
1764 IWL_DEBUG_ISR(trans,
1765 "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
1766 inta, trans_pcie->inta_mask,
1767 iwl_read32(trans, CSR_INT_MASK),
1768 iwl_read32(trans, CSR_FH_INT_STATUS));
1769 if (inta & (~trans_pcie->inta_mask))
1770 IWL_DEBUG_ISR(trans,
1771 "We got a masked interrupt (0x%08x)\n",
1772 inta & (~trans_pcie->inta_mask));
1773 }
1774
1775 inta &= trans_pcie->inta_mask;
1776
1777
1778
1779
1780
1781
1782 if (unlikely(!inta)) {
1783 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1784
1785
1786
1787
1788 if (test_bit(STATUS_INT_ENABLED, &trans->status))
1789 _iwl_enable_interrupts(trans);
1790 spin_unlock(&trans_pcie->irq_lock);
1791 lock_map_release(&trans->sync_cmd_lockdep_map);
1792 return IRQ_NONE;
1793 }
1794
1795 if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
1796
1797
1798
1799
1800 IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1801 spin_unlock(&trans_pcie->irq_lock);
1802 goto out;
1803 }
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816 iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
1817
1818 if (iwl_have_debug_level(IWL_DL_ISR))
1819 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
1820 inta, iwl_read32(trans, CSR_INT_MASK));
1821
1822 spin_unlock(&trans_pcie->irq_lock);
1823
1824
1825 if (inta & CSR_INT_BIT_HW_ERR) {
1826 IWL_ERR(trans, "Hardware error detected. Restarting.\n");
1827
1828
1829 iwl_disable_interrupts(trans);
1830
1831 isr_stats->hw++;
1832 iwl_pcie_irq_handle_error(trans);
1833
1834 handled |= CSR_INT_BIT_HW_ERR;
1835
1836 goto out;
1837 }
1838
1839
1840 if (inta & CSR_INT_BIT_SCD) {
1841 IWL_DEBUG_ISR(trans,
1842 "Scheduler finished to transmit the frame/frames.\n");
1843 isr_stats->sch++;
1844 }
1845
1846
1847 if (inta & CSR_INT_BIT_ALIVE) {
1848 IWL_DEBUG_ISR(trans, "Alive interrupt\n");
1849 isr_stats->alive++;
1850 if (trans->trans_cfg->gen2) {
1851
1852
1853
1854
1855 iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
1856 }
1857
1858 handled |= CSR_INT_BIT_ALIVE;
1859 }
1860
1861
1862 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
1863
1864
1865 if (inta & CSR_INT_BIT_RF_KILL) {
1866 iwl_pcie_handle_rfkill_irq(trans);
1867 handled |= CSR_INT_BIT_RF_KILL;
1868 }
1869
1870
1871 if (inta & CSR_INT_BIT_CT_KILL) {
1872 IWL_ERR(trans, "Microcode CT kill error detected.\n");
1873 isr_stats->ctkill++;
1874 handled |= CSR_INT_BIT_CT_KILL;
1875 }
1876
1877
1878 if (inta & CSR_INT_BIT_SW_ERR) {
1879 IWL_ERR(trans, "Microcode SW error detected. "
1880 " Restarting 0x%X.\n", inta);
1881 isr_stats->sw++;
1882 iwl_pcie_irq_handle_error(trans);
1883 handled |= CSR_INT_BIT_SW_ERR;
1884 }
1885
1886
1887 if (inta & CSR_INT_BIT_WAKEUP) {
1888 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
1889 iwl_pcie_rxq_check_wrptr(trans);
1890 iwl_pcie_txq_check_wrptrs(trans);
1891
1892 isr_stats->wakeup++;
1893
1894 handled |= CSR_INT_BIT_WAKEUP;
1895 }
1896
1897
1898
1899
1900 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
1901 CSR_INT_BIT_RX_PERIODIC)) {
1902 IWL_DEBUG_ISR(trans, "Rx interrupt\n");
1903 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1904 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1905 iwl_write32(trans, CSR_FH_INT_STATUS,
1906 CSR_FH_INT_RX_MASK);
1907 }
1908 if (inta & CSR_INT_BIT_RX_PERIODIC) {
1909 handled |= CSR_INT_BIT_RX_PERIODIC;
1910 iwl_write32(trans,
1911 CSR_INT, CSR_INT_BIT_RX_PERIODIC);
1912 }
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925 iwl_write8(trans, CSR_INT_PERIODIC_REG,
1926 CSR_INT_PERIODIC_DIS);
1927
1928
1929
1930
1931
1932
1933
1934
1935 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
1936 iwl_write8(trans, CSR_INT_PERIODIC_REG,
1937 CSR_INT_PERIODIC_ENA);
1938
1939 isr_stats->rx++;
1940
1941 local_bh_disable();
1942 iwl_pcie_rx_handle(trans, 0);
1943 local_bh_enable();
1944 }
1945
1946
1947 if (inta & CSR_INT_BIT_FH_TX) {
1948 iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
1949 IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
1950 isr_stats->tx++;
1951 handled |= CSR_INT_BIT_FH_TX;
1952
1953 trans_pcie->ucode_write_complete = true;
1954 wake_up(&trans_pcie->ucode_write_waitq);
1955 }
1956
1957 if (inta & ~handled) {
1958 IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
1959 isr_stats->unhandled++;
1960 }
1961
1962 if (inta & ~(trans_pcie->inta_mask)) {
1963 IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
1964 inta & ~trans_pcie->inta_mask);
1965 }
1966
1967 spin_lock(&trans_pcie->irq_lock);
1968
1969 if (test_bit(STATUS_INT_ENABLED, &trans->status))
1970 _iwl_enable_interrupts(trans);
1971
1972 else if (handled & CSR_INT_BIT_FH_TX)
1973 iwl_enable_fw_load_int(trans);
1974
1975 else if (handled & CSR_INT_BIT_RF_KILL)
1976 iwl_enable_rfkill_int(trans);
1977
1978 else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX))
1979 iwl_enable_fw_load_int_ctx_info(trans);
1980 spin_unlock(&trans_pcie->irq_lock);
1981
1982 out:
1983 lock_map_release(&trans->sync_cmd_lockdep_map);
1984 return IRQ_HANDLED;
1985 }
1986
1987
1988
1989
1990
1991
1992
1993
1994 void iwl_pcie_free_ict(struct iwl_trans *trans)
1995 {
1996 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1997
1998 if (trans_pcie->ict_tbl) {
1999 dma_free_coherent(trans->dev, ICT_SIZE,
2000 trans_pcie->ict_tbl,
2001 trans_pcie->ict_tbl_dma);
2002 trans_pcie->ict_tbl = NULL;
2003 trans_pcie->ict_tbl_dma = 0;
2004 }
2005 }
2006
2007
2008
2009
2010
2011
2012 int iwl_pcie_alloc_ict(struct iwl_trans *trans)
2013 {
2014 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2015
2016 trans_pcie->ict_tbl =
2017 dma_alloc_coherent(trans->dev, ICT_SIZE,
2018 &trans_pcie->ict_tbl_dma, GFP_KERNEL);
2019 if (!trans_pcie->ict_tbl)
2020 return -ENOMEM;
2021
2022
2023 if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
2024 iwl_pcie_free_ict(trans);
2025 return -EINVAL;
2026 }
2027
2028 return 0;
2029 }
2030
2031
2032
2033
2034 void iwl_pcie_reset_ict(struct iwl_trans *trans)
2035 {
2036 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2037 u32 val;
2038
2039 if (!trans_pcie->ict_tbl)
2040 return;
2041
2042 spin_lock(&trans_pcie->irq_lock);
2043 _iwl_disable_interrupts(trans);
2044
2045 memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
2046
2047 val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
2048
2049 val |= CSR_DRAM_INT_TBL_ENABLE |
2050 CSR_DRAM_INIT_TBL_WRAP_CHECK |
2051 CSR_DRAM_INIT_TBL_WRITE_POINTER;
2052
2053 IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
2054
2055 iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
2056 trans_pcie->use_ict = true;
2057 trans_pcie->ict_index = 0;
2058 iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
2059 _iwl_enable_interrupts(trans);
2060 spin_unlock(&trans_pcie->irq_lock);
2061 }
2062
2063
2064 void iwl_pcie_disable_ict(struct iwl_trans *trans)
2065 {
2066 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2067
2068 spin_lock(&trans_pcie->irq_lock);
2069 trans_pcie->use_ict = false;
2070 spin_unlock(&trans_pcie->irq_lock);
2071 }
2072
2073 irqreturn_t iwl_pcie_isr(int irq, void *data)
2074 {
2075 struct iwl_trans *trans = data;
2076
2077 if (!trans)
2078 return IRQ_NONE;
2079
2080
2081
2082
2083
2084
2085 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
2086
2087 return IRQ_WAKE_THREAD;
2088 }
2089
2090 irqreturn_t iwl_pcie_msix_isr(int irq, void *data)
2091 {
2092 return IRQ_WAKE_THREAD;
2093 }
2094
2095 irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
2096 {
2097 struct msix_entry *entry = dev_id;
2098 struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
2099 struct iwl_trans *trans = trans_pcie->trans;
2100 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2101 u32 inta_fh, inta_hw;
2102
2103 lock_map_acquire(&trans->sync_cmd_lockdep_map);
2104
2105 spin_lock(&trans_pcie->irq_lock);
2106 inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
2107 inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
2108
2109
2110
2111 iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
2112 iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
2113 spin_unlock(&trans_pcie->irq_lock);
2114
2115 trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw);
2116
2117 if (unlikely(!(inta_fh | inta_hw))) {
2118 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
2119 lock_map_release(&trans->sync_cmd_lockdep_map);
2120 return IRQ_NONE;
2121 }
2122
2123 if (iwl_have_debug_level(IWL_DL_ISR)) {
2124 IWL_DEBUG_ISR(trans,
2125 "ISR inta_fh 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
2126 inta_fh, trans_pcie->fh_mask,
2127 iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
2128 if (inta_fh & ~trans_pcie->fh_mask)
2129 IWL_DEBUG_ISR(trans,
2130 "We got a masked interrupt (0x%08x)\n",
2131 inta_fh & ~trans_pcie->fh_mask);
2132 }
2133
2134 inta_fh &= trans_pcie->fh_mask;
2135
2136 if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) &&
2137 inta_fh & MSIX_FH_INT_CAUSES_Q0) {
2138 local_bh_disable();
2139 iwl_pcie_rx_handle(trans, 0);
2140 local_bh_enable();
2141 }
2142
2143 if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) &&
2144 inta_fh & MSIX_FH_INT_CAUSES_Q1) {
2145 local_bh_disable();
2146 iwl_pcie_rx_handle(trans, 1);
2147 local_bh_enable();
2148 }
2149
2150
2151 if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
2152 IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
2153 isr_stats->tx++;
2154
2155
2156
2157
2158 trans_pcie->ucode_write_complete = true;
2159 wake_up(&trans_pcie->ucode_write_waitq);
2160 }
2161
2162
2163 if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) ||
2164 (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
2165 (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
2166 IWL_ERR(trans,
2167 "Microcode SW error detected. Restarting 0x%X.\n",
2168 inta_fh);
2169 isr_stats->sw++;
2170 iwl_pcie_irq_handle_error(trans);
2171 }
2172
2173
2174 if (iwl_have_debug_level(IWL_DL_ISR)) {
2175 IWL_DEBUG_ISR(trans,
2176 "ISR inta_hw 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
2177 inta_hw, trans_pcie->hw_mask,
2178 iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD));
2179 if (inta_hw & ~trans_pcie->hw_mask)
2180 IWL_DEBUG_ISR(trans,
2181 "We got a masked interrupt 0x%08x\n",
2182 inta_hw & ~trans_pcie->hw_mask);
2183 }
2184
2185 inta_hw &= trans_pcie->hw_mask;
2186
2187
2188 if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
2189 IWL_DEBUG_ISR(trans, "Alive interrupt\n");
2190 isr_stats->alive++;
2191 if (trans->trans_cfg->gen2) {
2192
2193 iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
2194 }
2195 }
2196
2197 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22560 &&
2198 inta_hw & MSIX_HW_INT_CAUSES_REG_IPC) {
2199
2200 int res = iwl_read32(trans, CSR_IML_RESP_ADDR);
2201
2202 IWL_DEBUG_ISR(trans, "IML transfer status: %d\n", res);
2203 if (res == IWL_IMAGE_RESP_FAIL) {
2204 isr_stats->sw++;
2205 iwl_pcie_irq_handle_error(trans);
2206 }
2207 } else if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
2208 u32 sleep_notif =
2209 le32_to_cpu(trans_pcie->prph_info->sleep_notif);
2210 if (sleep_notif == IWL_D3_SLEEP_STATUS_SUSPEND ||
2211 sleep_notif == IWL_D3_SLEEP_STATUS_RESUME) {
2212 IWL_DEBUG_ISR(trans,
2213 "Sx interrupt: sleep notification = 0x%x\n",
2214 sleep_notif);
2215 trans_pcie->sx_complete = true;
2216 wake_up(&trans_pcie->sx_waitq);
2217 } else {
2218
2219 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
2220 iwl_pcie_rxq_check_wrptr(trans);
2221 iwl_pcie_txq_check_wrptrs(trans);
2222
2223 isr_stats->wakeup++;
2224 }
2225 }
2226
2227 if (inta_hw & MSIX_HW_INT_CAUSES_REG_IML) {
2228
2229 int res = iwl_read32(trans, CSR_IML_RESP_ADDR);
2230
2231 IWL_DEBUG_ISR(trans, "IML transfer status: %d\n", res);
2232 if (res == IWL_IMAGE_RESP_FAIL) {
2233 isr_stats->sw++;
2234 iwl_pcie_irq_handle_error(trans);
2235 }
2236 }
2237
2238
2239 if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) {
2240 IWL_ERR(trans, "Microcode CT kill error detected.\n");
2241 isr_stats->ctkill++;
2242 }
2243
2244
2245 if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL)
2246 iwl_pcie_handle_rfkill_irq(trans);
2247
2248 if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {
2249 IWL_ERR(trans,
2250 "Hardware error detected. Restarting.\n");
2251
2252 isr_stats->hw++;
2253 trans->dbg.hw_error = true;
2254 iwl_pcie_irq_handle_error(trans);
2255 }
2256
2257 iwl_pcie_clear_irq(trans, entry);
2258
2259 lock_map_release(&trans->sync_cmd_lockdep_map);
2260
2261 return IRQ_HANDLED;
2262 }