This source file includes following definitions.
- iwl_queue_inc_wrap
- iwl_get_closed_rb_stts
- iwl_queue_dec_wrap
- iwl_pcie_get_first_tb_dma
- IWL_TRANS_GET_PCIE_TRANS
- iwl_pcie_clear_irq
- iwl_trans_pcie_get_trans
- iwl_pcie_tfd_tb_get_len
- _iwl_disable_interrupts
- iwl_pcie_get_num_sections
- iwl_pcie_ctxt_info_alloc_dma
- iwl_pcie_ctxt_info_free_fw_img
- iwl_disable_interrupts
- _iwl_enable_interrupts
- iwl_enable_interrupts
- iwl_enable_hw_int_msk_msix
- iwl_enable_fh_int_msk_msix
- iwl_enable_fw_load_int
- iwl_enable_fw_load_int_ctx_info
- iwl_pcie_get_cmd_index
- iwl_pcie_get_tfd
- queue_name
- iwl_enable_rfkill_int
- iwl_wake_queue
- iwl_stop_queue
- iwl_queue_used
- iwl_is_rfkill_set
- __iwl_trans_pcie_set_bits_mask
- __iwl_trans_pcie_clear_bit
- __iwl_trans_pcie_set_bit
- iwl_pcie_dbg_on
- iwl_trans_pcie_dbgfs_register
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64 #ifndef __iwl_trans_int_pcie_h__
65 #define __iwl_trans_int_pcie_h__
66
67 #include <linux/spinlock.h>
68 #include <linux/interrupt.h>
69 #include <linux/skbuff.h>
70 #include <linux/wait.h>
71 #include <linux/pci.h>
72 #include <linux/timer.h>
73 #include <linux/cpu.h>
74
75 #include "iwl-fh.h"
76 #include "iwl-csr.h"
77 #include "iwl-trans.h"
78 #include "iwl-debug.h"
79 #include "iwl-io.h"
80 #include "iwl-op-mode.h"
81 #include "iwl-drv.h"
82
83
84
85
86
87 #define IWL_PCIE_MAX_FRAGS(x) (x->max_tbs - 3)
88
89
90
91
92 #define RX_NUM_QUEUES 1
93 #define RX_POST_REQ_ALLOC 2
94 #define RX_CLAIM_REQ_ALLOC 8
95 #define RX_PENDING_WATERMARK 16
96 #define FIRST_RX_QUEUE 512
97
98 struct iwl_host_cmd;
99
100
101
102
103
104
105
106
107
108
109
110 struct iwl_rx_mem_buffer {
111 dma_addr_t page_dma;
112 struct page *page;
113 u16 vid;
114 bool invalid;
115 struct list_head list;
116 };
117
118
119
120
121
122 struct isr_statistics {
123 u32 hw;
124 u32 sw;
125 u32 err_code;
126 u32 sch;
127 u32 alive;
128 u32 rfkill;
129 u32 ctkill;
130 u32 wakeup;
131 u32 rx;
132 u32 tx;
133 u32 unhandled;
134 };
135
136
137
138
139
140
141
142 struct iwl_rx_transfer_desc {
143 __le16 rbid;
144 __le16 reserved[3];
145 __le64 addr;
146 } __packed;
147
148 #define IWL_RX_CD_FLAGS_FRAGMENTED BIT(0)
149
150
151
152
153
154
155
156
157 struct iwl_rx_completion_desc {
158 __le32 reserved1;
159 __le16 rbid;
160 u8 flags;
161 u8 reserved2[25];
162 } __packed;
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192 struct iwl_rxq {
193 int id;
194 void *bd;
195 dma_addr_t bd_dma;
196 union {
197 void *used_bd;
198 __le32 *bd_32;
199 struct iwl_rx_completion_desc *cd;
200 };
201 dma_addr_t used_bd_dma;
202 __le16 *tr_tail;
203 dma_addr_t tr_tail_dma;
204 __le16 *cr_tail;
205 dma_addr_t cr_tail_dma;
206 u32 read;
207 u32 write;
208 u32 free_count;
209 u32 used_count;
210 u32 write_actual;
211 u32 queue_size;
212 struct list_head rx_free;
213 struct list_head rx_used;
214 bool need_update;
215 void *rb_stts;
216 dma_addr_t rb_stts_dma;
217 spinlock_t lock;
218 struct napi_struct napi;
219 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
220 };
221
222
223
224
225
226
227
228
229
230
231
232
233
234 struct iwl_rb_allocator {
235 atomic_t req_pending;
236 atomic_t req_ready;
237 struct list_head rbd_allocated;
238 struct list_head rbd_empty;
239 spinlock_t lock;
240 struct workqueue_struct *alloc_wq;
241 struct work_struct rx_alloc;
242 };
243
244 struct iwl_dma_ptr {
245 dma_addr_t dma;
246 void *addr;
247 size_t size;
248 };
249
250
251
252
253
254 static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index)
255 {
256 return ++index &
257 (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
258 }
259
260
261
262
263
264 static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
265 struct iwl_rxq *rxq)
266 {
267 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
268 __le16 *rb_stts = rxq->rb_stts;
269
270 return READ_ONCE(*rb_stts);
271 } else {
272 struct iwl_rb_status *rb_stts = rxq->rb_stts;
273
274 return READ_ONCE(rb_stts->closed_rb_num);
275 }
276 }
277
278
279
280
281
282 static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index)
283 {
284 return --index &
285 (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
286 }
287
288 struct iwl_cmd_meta {
289
290 struct iwl_host_cmd *source;
291 u32 flags;
292 u32 tbs;
293 };
294
295
296
297
298
299
300
301
302
303
304 #define IWL_FIRST_TB_SIZE 20
305 #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
306
307 struct iwl_pcie_txq_entry {
308 void *cmd;
309 struct sk_buff *skb;
310
311 const void *free_buf;
312 struct iwl_cmd_meta meta;
313 };
314
315 struct iwl_pcie_first_tb_buf {
316 u8 buf[IWL_FIRST_TB_SIZE_ALIGN];
317 };
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361 struct iwl_txq {
362 void *tfds;
363 struct iwl_pcie_first_tb_buf *first_tb_bufs;
364 dma_addr_t first_tb_dma;
365 struct iwl_pcie_txq_entry *entries;
366 spinlock_t lock;
367 unsigned long frozen_expiry_remainder;
368 struct timer_list stuck_timer;
369 struct iwl_trans_pcie *trans_pcie;
370 bool need_update;
371 bool frozen;
372 bool ampdu;
373 int block;
374 unsigned long wd_timeout;
375 struct sk_buff_head overflow_q;
376 struct iwl_dma_ptr bc_tbl;
377
378 int write_ptr;
379 int read_ptr;
380 dma_addr_t dma_addr;
381 int n_window;
382 u32 id;
383 int low_mark;
384 int high_mark;
385
386 bool overflow_tx;
387 };
388
389 static inline dma_addr_t
390 iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx)
391 {
392 return txq->first_tb_dma +
393 sizeof(struct iwl_pcie_first_tb_buf) * idx;
394 }
395
396 struct iwl_tso_hdr_page {
397 struct page *page;
398 u8 *pos;
399 };
400
401 #ifdef CONFIG_IWLWIFI_DEBUGFS
402
403
404
405
406
407
408
409
410
411 enum iwl_fw_mon_dbgfs_state {
412 IWL_FW_MON_DBGFS_STATE_CLOSED,
413 IWL_FW_MON_DBGFS_STATE_OPEN,
414 IWL_FW_MON_DBGFS_STATE_DISABLED,
415 };
416 #endif
417
418
419
420
421
422
423 enum iwl_shared_irq_flags {
424 IWL_SHARED_IRQ_NON_RX = BIT(0),
425 IWL_SHARED_IRQ_FIRST_RSS = BIT(1),
426 };
427
428
429
430
431
432
433
434 enum iwl_image_response_code {
435 IWL_IMAGE_RESP_DEF = 0,
436 IWL_IMAGE_RESP_SUCCESS = 1,
437 IWL_IMAGE_RESP_FAIL = 2,
438 };
439
440
441
442
443
444
445
446
447
448
449
450 #ifdef CONFIG_IWLWIFI_DEBUGFS
451 struct cont_rec {
452 u32 prev_wr_ptr;
453 u32 prev_wrap_cnt;
454 u8 state;
455
456 struct mutex mutex;
457 };
458 #endif
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514 struct iwl_trans_pcie {
515 struct iwl_rxq *rxq;
516 struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
517 struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
518 struct iwl_rb_allocator rba;
519 union {
520 struct iwl_context_info *ctxt_info;
521 struct iwl_context_info_gen3 *ctxt_info_gen3;
522 };
523 struct iwl_prph_info *prph_info;
524 struct iwl_prph_scratch *prph_scratch;
525 dma_addr_t ctxt_info_dma_addr;
526 dma_addr_t prph_info_dma_addr;
527 dma_addr_t prph_scratch_dma_addr;
528 dma_addr_t iml_dma_addr;
529 struct iwl_trans *trans;
530
531 struct net_device napi_dev;
532
533 struct __percpu iwl_tso_hdr_page *tso_hdr_page;
534
535
536 __le32 *ict_tbl;
537 dma_addr_t ict_tbl_dma;
538 int ict_index;
539 bool use_ict;
540 bool is_down, opmode_down;
541 s8 debug_rfkill;
542 struct isr_statistics isr_stats;
543
544 spinlock_t irq_lock;
545 struct mutex mutex;
546 u32 inta_mask;
547 u32 scd_base_addr;
548 struct iwl_dma_ptr scd_bc_tbls;
549 struct iwl_dma_ptr kw;
550
551 struct iwl_txq *txq_memory;
552 struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
553 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
554 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
555
556
557 struct pci_dev *pci_dev;
558 void __iomem *hw_base;
559
560 bool ucode_write_complete;
561 bool sx_complete;
562 wait_queue_head_t ucode_write_waitq;
563 wait_queue_head_t wait_command_queue;
564 wait_queue_head_t sx_waitq;
565
566 u8 page_offs, dev_cmd_offs;
567
568 u8 cmd_queue;
569 u8 def_rx_queue;
570 u8 cmd_fifo;
571 unsigned int cmd_q_wdg_timeout;
572 u8 n_no_reclaim_cmds;
573 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
574 u8 max_tbs;
575 u16 tfd_size;
576
577 enum iwl_amsdu_size rx_buf_size;
578 bool bc_table_dword;
579 bool scd_set_active;
580 bool sw_csum_tx;
581 bool pcie_dbg_dumped_once;
582 u32 rx_page_order;
583
584
585 spinlock_t reg_lock;
586 bool cmd_hold_nic_awake;
587
588 #ifdef CONFIG_IWLWIFI_DEBUGFS
589 struct cont_rec fw_mon_data;
590 #endif
591
592 struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
593 bool msix_enabled;
594 u8 shared_vec_mask;
595 u32 alloc_vecs;
596 u32 def_irq;
597 u32 fh_init_mask;
598 u32 hw_init_mask;
599 u32 fh_mask;
600 u32 hw_mask;
601 cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
602 u16 tx_cmd_queue_size;
603 bool in_rescan;
604
605 void *base_rb_stts;
606 dma_addr_t base_rb_stts_dma;
607 };
608
609 static inline struct iwl_trans_pcie *
610 IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
611 {
612 return (void *)trans->trans_specific;
613 }
614
615 static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
616 struct msix_entry *entry)
617 {
618
619
620
621
622
623
624
625
626 iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
627 }
628
629 static inline struct iwl_trans *
630 iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
631 {
632 return container_of((void *)trans_pcie, struct iwl_trans,
633 trans_specific);
634 }
635
636
637
638
639
640 struct iwl_trans
641 *iwl_trans_pcie_alloc(struct pci_dev *pdev,
642 const struct pci_device_id *ent,
643 const struct iwl_cfg_trans_params *cfg_trans);
644 void iwl_trans_pcie_free(struct iwl_trans *trans);
645
646
647
648
649 int _iwl_pcie_rx_init(struct iwl_trans *trans);
650 int iwl_pcie_rx_init(struct iwl_trans *trans);
651 int iwl_pcie_gen2_rx_init(struct iwl_trans *trans);
652 irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
653 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
654 irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
655 irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
656 int iwl_pcie_rx_stop(struct iwl_trans *trans);
657 void iwl_pcie_rx_free(struct iwl_trans *trans);
658 void iwl_pcie_free_rbs_pool(struct iwl_trans *trans);
659 void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
660 int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget);
661 void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
662 struct iwl_rxq *rxq);
663 int iwl_pcie_rx_alloc(struct iwl_trans *trans);
664
665
666
667
668 irqreturn_t iwl_pcie_isr(int irq, void *data);
669 int iwl_pcie_alloc_ict(struct iwl_trans *trans);
670 void iwl_pcie_free_ict(struct iwl_trans *trans);
671 void iwl_pcie_reset_ict(struct iwl_trans *trans);
672 void iwl_pcie_disable_ict(struct iwl_trans *trans);
673
674
675
676
677 int iwl_pcie_tx_init(struct iwl_trans *trans);
678 int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id,
679 int queue_size);
680 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
681 int iwl_pcie_tx_stop(struct iwl_trans *trans);
682 void iwl_pcie_tx_free(struct iwl_trans *trans);
683 bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
684 const struct iwl_trans_txq_scd_cfg *cfg,
685 unsigned int wdg_timeout);
686 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
687 bool configure_scd);
688 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
689 bool shared_mode);
690 void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
691 struct iwl_txq *txq);
692 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
693 struct iwl_device_tx_cmd *dev_cmd, int txq_id);
694 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
695 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
696 void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx);
697 void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans,
698 struct iwl_txq *txq);
699 void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
700 struct iwl_rx_cmd_buffer *rxb);
701 void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
702 struct sk_buff_head *skbs);
703 void iwl_trans_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr);
704 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
705 void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
706 struct iwl_txq *txq, u16 byte_cnt,
707 int num_tbs);
708
709 static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd,
710 u8 idx)
711 {
712 if (trans->trans_cfg->use_tfh) {
713 struct iwl_tfh_tfd *tfd = _tfd;
714 struct iwl_tfh_tb *tb = &tfd->tbs[idx];
715
716 return le16_to_cpu(tb->tb_len);
717 } else {
718 struct iwl_tfd *tfd = _tfd;
719 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
720
721 return le16_to_cpu(tb->hi_n_len) >> 4;
722 }
723 }
724
725
726
727
728 void iwl_pcie_dump_csr(struct iwl_trans *trans);
729
730
731
732
733 static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
734 {
735 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
736
737 clear_bit(STATUS_INT_ENABLED, &trans->status);
738 if (!trans_pcie->msix_enabled) {
739
740 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
741
742
743
744 iwl_write32(trans, CSR_INT, 0xffffffff);
745 iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
746 } else {
747
748 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
749 trans_pcie->fh_init_mask);
750 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
751 trans_pcie->hw_init_mask);
752 }
753 IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
754 }
755
756 #define IWL_NUM_OF_COMPLETION_RINGS 31
757 #define IWL_NUM_OF_TRANSFER_RINGS 527
758
759 static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
760 int start)
761 {
762 int i = 0;
763
764 while (start < fw->num_sec &&
765 fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
766 fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
767 start++;
768 i++;
769 }
770
771 return i;
772 }
773
774 static inline int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
775 const struct fw_desc *sec,
776 struct iwl_dram_data *dram)
777 {
778 dram->block = dma_alloc_coherent(trans->dev, sec->len,
779 &dram->physical,
780 GFP_KERNEL);
781 if (!dram->block)
782 return -ENOMEM;
783
784 dram->size = sec->len;
785 memcpy(dram->block, sec->data, sec->len);
786
787 return 0;
788 }
789
790 static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
791 {
792 struct iwl_self_init_dram *dram = &trans->init_dram;
793 int i;
794
795 if (!dram->fw) {
796 WARN_ON(dram->fw_cnt);
797 return;
798 }
799
800 for (i = 0; i < dram->fw_cnt; i++)
801 dma_free_coherent(trans->dev, dram->fw[i].size,
802 dram->fw[i].block, dram->fw[i].physical);
803
804 kfree(dram->fw);
805 dram->fw_cnt = 0;
806 dram->fw = NULL;
807 }
808
809 static inline void iwl_disable_interrupts(struct iwl_trans *trans)
810 {
811 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
812
813 spin_lock(&trans_pcie->irq_lock);
814 _iwl_disable_interrupts(trans);
815 spin_unlock(&trans_pcie->irq_lock);
816 }
817
818 static inline void _iwl_enable_interrupts(struct iwl_trans *trans)
819 {
820 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
821
822 IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
823 set_bit(STATUS_INT_ENABLED, &trans->status);
824 if (!trans_pcie->msix_enabled) {
825 trans_pcie->inta_mask = CSR_INI_SET_MASK;
826 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
827 } else {
828
829
830
831
832 trans_pcie->hw_mask = trans_pcie->hw_init_mask;
833 trans_pcie->fh_mask = trans_pcie->fh_init_mask;
834 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
835 ~trans_pcie->fh_mask);
836 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
837 ~trans_pcie->hw_mask);
838 }
839 }
840
841 static inline void iwl_enable_interrupts(struct iwl_trans *trans)
842 {
843 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
844
845 spin_lock(&trans_pcie->irq_lock);
846 _iwl_enable_interrupts(trans);
847 spin_unlock(&trans_pcie->irq_lock);
848 }
849 static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
850 {
851 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
852
853 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk);
854 trans_pcie->hw_mask = msk;
855 }
856
857 static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk)
858 {
859 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
860
861 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk);
862 trans_pcie->fh_mask = msk;
863 }
864
865 static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
866 {
867 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
868
869 IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
870 if (!trans_pcie->msix_enabled) {
871 trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
872 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
873 } else {
874 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
875 trans_pcie->hw_init_mask);
876 iwl_enable_fh_int_msk_msix(trans,
877 MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
878 }
879 }
880
881 static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans)
882 {
883 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
884
885 IWL_DEBUG_ISR(trans, "Enabling ALIVE interrupt only\n");
886
887 if (!trans_pcie->msix_enabled) {
888
889
890
891
892
893
894
895 trans_pcie->inta_mask = CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX;
896 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
897 } else {
898 iwl_enable_hw_int_msk_msix(trans,
899 MSIX_HW_INT_CAUSES_REG_ALIVE);
900
901
902
903
904 iwl_enable_fh_int_msk_msix(trans, trans_pcie->fh_init_mask);
905 }
906 }
907
908 static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index)
909 {
910 return index & (q->n_window - 1);
911 }
912
913 static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,
914 struct iwl_txq *txq, int idx)
915 {
916 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
917
918 if (trans->trans_cfg->use_tfh)
919 idx = iwl_pcie_get_cmd_index(txq, idx);
920
921 return txq->tfds + trans_pcie->tfd_size * idx;
922 }
923
924 static inline const char *queue_name(struct device *dev,
925 struct iwl_trans_pcie *trans_p, int i)
926 {
927 if (trans_p->shared_vec_mask) {
928 int vec = trans_p->shared_vec_mask &
929 IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
930
931 if (i == 0)
932 return DRV_NAME ": shared IRQ";
933
934 return devm_kasprintf(dev, GFP_KERNEL,
935 DRV_NAME ": queue %d", i + vec);
936 }
937 if (i == 0)
938 return DRV_NAME ": default queue";
939
940 if (i == trans_p->alloc_vecs - 1)
941 return DRV_NAME ": exception";
942
943 return devm_kasprintf(dev, GFP_KERNEL,
944 DRV_NAME ": queue %d", i);
945 }
946
947 static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
948 {
949 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
950
951 IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
952 if (!trans_pcie->msix_enabled) {
953 trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
954 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
955 } else {
956 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
957 trans_pcie->fh_init_mask);
958 iwl_enable_hw_int_msk_msix(trans,
959 MSIX_HW_INT_CAUSES_REG_RF_KILL);
960 }
961
962 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) {
963
964
965
966
967
968 iwl_set_bit(trans, CSR_GP_CNTRL,
969 CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
970 }
971 }
972
973 void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans);
974
975 static inline void iwl_wake_queue(struct iwl_trans *trans,
976 struct iwl_txq *txq)
977 {
978 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
979
980 if (test_and_clear_bit(txq->id, trans_pcie->queue_stopped)) {
981 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
982 iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
983 }
984 }
985
986 static inline void iwl_stop_queue(struct iwl_trans *trans,
987 struct iwl_txq *txq)
988 {
989 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
990
991 if (!test_and_set_bit(txq->id, trans_pcie->queue_stopped)) {
992 iwl_op_mode_queue_full(trans->op_mode, txq->id);
993 IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
994 } else
995 IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
996 txq->id);
997 }
998
999 static inline bool iwl_queue_used(const struct iwl_txq *q, int i)
1000 {
1001 int index = iwl_pcie_get_cmd_index(q, i);
1002 int r = iwl_pcie_get_cmd_index(q, q->read_ptr);
1003 int w = iwl_pcie_get_cmd_index(q, q->write_ptr);
1004
1005 return w >= r ?
1006 (index >= r && index < w) :
1007 !(index < r && index >= w);
1008 }
1009
1010 static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
1011 {
1012 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1013
1014 lockdep_assert_held(&trans_pcie->mutex);
1015
1016 if (trans_pcie->debug_rfkill == 1)
1017 return true;
1018
1019 return !(iwl_read32(trans, CSR_GP_CNTRL) &
1020 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
1021 }
1022
1023 static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
1024 u32 reg, u32 mask, u32 value)
1025 {
1026 u32 v;
1027
1028 #ifdef CONFIG_IWLWIFI_DEBUG
1029 WARN_ON_ONCE(value & ~mask);
1030 #endif
1031
1032 v = iwl_read32(trans, reg);
1033 v &= ~mask;
1034 v |= value;
1035 iwl_write32(trans, reg, v);
1036 }
1037
1038 static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
1039 u32 reg, u32 mask)
1040 {
1041 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
1042 }
1043
1044 static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
1045 u32 reg, u32 mask)
1046 {
1047 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
1048 }
1049
1050 static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
1051 {
1052 return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans));
1053 }
1054
1055 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
1056 void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
1057 void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans);
1058
1059 #ifdef CONFIG_IWLWIFI_DEBUGFS
1060 void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
1061 #else
1062 static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { }
1063 #endif
1064
1065 void iwl_pcie_rx_allocator_work(struct work_struct *data);
1066
1067
1068 int iwl_pcie_gen2_apm_init(struct iwl_trans *trans);
1069 void iwl_pcie_apm_config(struct iwl_trans *trans);
1070 int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
1071 void iwl_pcie_synchronize_irqs(struct iwl_trans *trans);
1072 bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans);
1073 void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
1074 bool was_in_rfkill);
1075 void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
1076 int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q);
1077 void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
1078 void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
1079 int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
1080 int slots_num, bool cmd_queue);
1081 int iwl_pcie_txq_alloc(struct iwl_trans *trans,
1082 struct iwl_txq *txq, int slots_num, bool cmd_queue);
1083 int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
1084 struct iwl_dma_ptr *ptr, size_t size);
1085 void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
1086 void iwl_pcie_apply_destination(struct iwl_trans *trans);
1087 void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
1088 struct sk_buff *skb);
1089 #ifdef CONFIG_INET
1090 struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len);
1091 #endif
1092
1093
1094 void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
1095
1096
1097 int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
1098 const struct fw_img *fw, bool run_in_rfkill);
1099 void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr);
1100 void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
1101 struct iwl_txq *txq);
1102 int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans,
1103 struct iwl_txq **intxq, int size,
1104 unsigned int timeout);
1105 int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans,
1106 struct iwl_txq *txq,
1107 struct iwl_host_cmd *hcmd);
1108 int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
1109 __le16 flags, u8 sta_id, u8 tid,
1110 int cmd_id, int size,
1111 unsigned int timeout);
1112 void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue);
1113 int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
1114 struct iwl_device_tx_cmd *dev_cmd, int txq_id);
1115 int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
1116 struct iwl_host_cmd *cmd);
1117 void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
1118 void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
1119 void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id);
1120 void iwl_pcie_gen2_tx_free(struct iwl_trans *trans);
1121 void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans);
1122 void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
1123 bool test, bool reset);
1124 #endif