This source file includes following definitions.
- iwl_rx_packet_len
- iwl_rx_packet_payload_len
- iwl_free_resp
- rxb_addr
- rxb_offset
- rxb_steal_page
- iwl_free_rxb
- iwl_trans_get_rb_size_order
- iwl_trans_configure
- iwl_trans_start_hw
- iwl_trans_op_mode_leave
- iwl_trans_fw_alive
- iwl_trans_start_fw
- iwl_trans_stop_device
- iwl_trans_d3_suspend
- iwl_trans_d3_resume
- iwl_trans_suspend
- iwl_trans_resume
- iwl_trans_dump_data
- iwl_trans_alloc_tx_cmd
- iwl_trans_free_tx_cmd
- iwl_trans_tx
- iwl_trans_reclaim
- iwl_trans_set_q_ptrs
- iwl_trans_txq_disable
- iwl_trans_txq_enable_cfg
- iwl_trans_get_rxq_dma_data
- iwl_trans_txq_free
- iwl_trans_txq_alloc
- iwl_trans_txq_set_shared_mode
- iwl_trans_txq_enable
- iwl_trans_ac_txq_enable
- iwl_trans_freeze_txq_timer
- iwl_trans_block_txq_ptrs
- iwl_trans_wait_tx_queues_empty
- iwl_trans_wait_txq_empty
- iwl_trans_write8
- iwl_trans_write32
- iwl_trans_read32
- iwl_trans_read_prph
- iwl_trans_write_prph
- iwl_trans_read_mem
- iwl_trans_read_mem32
- iwl_trans_write_mem
- iwl_trans_write_mem32
- iwl_trans_set_pmi
- iwl_trans_sw_reset
- iwl_trans_set_bits_mask
- __releases
- iwl_trans_fw_error
- iwl_trans_sync_nmi
- iwl_trans_dbg_ini_valid
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64 #ifndef __iwl_trans_h__
65 #define __iwl_trans_h__
66
67 #include <linux/ieee80211.h>
68 #include <linux/mm.h>
69 #include <linux/lockdep.h>
70 #include <linux/kernel.h>
71
72 #include "iwl-debug.h"
73 #include "iwl-config.h"
74 #include "fw/img.h"
75 #include "iwl-op-mode.h"
76 #include "fw/api/cmdhdr.h"
77 #include "fw/api/txq.h"
78 #include "fw/api/dbg-tlv.h"
79 #include "iwl-dbg-tlv.h"
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115 #define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF
116 #define FH_RSCSR_FRAME_INVALID 0x55550000
117 #define FH_RSCSR_FRAME_ALIGN 0x40
118 #define FH_RSCSR_RPA_EN BIT(25)
119 #define FH_RSCSR_RADA_EN BIT(26)
120 #define FH_RSCSR_RXQ_POS 16
121 #define FH_RSCSR_RXQ_MASK 0x3F0000
122
123 struct iwl_rx_packet {
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141 __le32 len_n_flags;
142 struct iwl_cmd_header hdr;
143 u8 data[];
144 } __packed;
145
146 static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt)
147 {
148 return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
149 }
150
151 static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
152 {
153 return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr);
154 }
155
156
157
158
159
160
161
162
163
164
165 enum CMD_MODE {
166 CMD_ASYNC = BIT(0),
167 CMD_WANT_SKB = BIT(1),
168 CMD_SEND_IN_RFKILL = BIT(2),
169 CMD_WANT_ASYNC_CALLBACK = BIT(3),
170 };
171
172 #define DEF_CMD_PAYLOAD_SIZE 320
173
174
175
176
177
178
179
180
181 struct iwl_device_cmd {
182 union {
183 struct {
184 struct iwl_cmd_header hdr;
185 u8 payload[DEF_CMD_PAYLOAD_SIZE];
186 };
187 struct {
188 struct iwl_cmd_header_wide hdr_wide;
189 u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
190 sizeof(struct iwl_cmd_header_wide) +
191 sizeof(struct iwl_cmd_header)];
192 };
193 };
194 } __packed;
195
196
197
198
199
200
201
202
203 struct iwl_device_tx_cmd {
204 struct iwl_cmd_header hdr;
205 u8 payload[];
206 } __packed;
207
208 #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
209
210
211
212
213
214 #define IWL_MAX_CMD_TBS_PER_TFD 2
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231 enum iwl_hcmd_dataflag {
232 IWL_HCMD_DFL_NOCOPY = BIT(0),
233 IWL_HCMD_DFL_DUP = BIT(1),
234 };
235
236 enum iwl_error_event_table_status {
237 IWL_ERROR_EVENT_TABLE_LMAC1 = BIT(0),
238 IWL_ERROR_EVENT_TABLE_LMAC2 = BIT(1),
239 IWL_ERROR_EVENT_TABLE_UMAC = BIT(2),
240 };
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255 struct iwl_host_cmd {
256 const void *data[IWL_MAX_CMD_TBS_PER_TFD];
257 struct iwl_rx_packet *resp_pkt;
258 unsigned long _rx_page_addr;
259 u32 _rx_page_order;
260
261 u32 flags;
262 u32 id;
263 u16 len[IWL_MAX_CMD_TBS_PER_TFD];
264 u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
265 };
266
267 static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
268 {
269 free_pages(cmd->_rx_page_addr, cmd->_rx_page_order);
270 }
271
272 struct iwl_rx_cmd_buffer {
273 struct page *_page;
274 int _offset;
275 bool _page_stolen;
276 u32 _rx_page_order;
277 unsigned int truesize;
278 };
279
280 static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
281 {
282 return (void *)((unsigned long)page_address(r->_page) + r->_offset);
283 }
284
285 static inline int rxb_offset(struct iwl_rx_cmd_buffer *r)
286 {
287 return r->_offset;
288 }
289
290 static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
291 {
292 r->_page_stolen = true;
293 get_page(r->_page);
294 return r->_page;
295 }
296
297 static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
298 {
299 __free_pages(r->_page, r->_rx_page_order);
300 }
301
302 #define MAX_NO_RECLAIM_CMDS 6
303
304 #define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
305
306
307
308
309
310 #define IWL_MAX_HW_QUEUES 32
311 #define IWL_MAX_TVQM_QUEUES 512
312
313 #define IWL_MAX_TID_COUNT 8
314 #define IWL_MGMT_TID 15
315 #define IWL_FRAME_LIMIT 64
316 #define IWL_MAX_RX_HW_QUEUES 16
317
318
319
320
321
322
323 enum iwl_d3_status {
324 IWL_D3_STATUS_ALIVE,
325 IWL_D3_STATUS_RESET,
326 };
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342 enum iwl_trans_status {
343 STATUS_SYNC_HCMD_ACTIVE,
344 STATUS_DEVICE_ENABLED,
345 STATUS_TPOWER_PMI,
346 STATUS_INT_ENABLED,
347 STATUS_RFKILL_HW,
348 STATUS_RFKILL_OPMODE,
349 STATUS_FW_ERROR,
350 STATUS_TRANS_GOING_IDLE,
351 STATUS_TRANS_IDLE,
352 STATUS_TRANS_DEAD,
353 };
354
355 static inline int
356 iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
357 {
358 switch (rb_size) {
359 case IWL_AMSDU_2K:
360 return get_order(2 * 1024);
361 case IWL_AMSDU_4K:
362 return get_order(4 * 1024);
363 case IWL_AMSDU_8K:
364 return get_order(8 * 1024);
365 case IWL_AMSDU_12K:
366 return get_order(12 * 1024);
367 default:
368 WARN_ON(1);
369 return -1;
370 }
371 }
372
373 struct iwl_hcmd_names {
374 u8 cmd_id;
375 const char *const cmd_name;
376 };
377
378 #define HCMD_NAME(x) \
379 { .cmd_id = x, .cmd_name = #x }
380
381 struct iwl_hcmd_arr {
382 const struct iwl_hcmd_names *arr;
383 int size;
384 };
385
386 #define HCMD_ARR(x) \
387 { .arr = x, .size = ARRAY_SIZE(x) }
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414 struct iwl_trans_config {
415 struct iwl_op_mode *op_mode;
416
417 u8 cmd_queue;
418 u8 cmd_fifo;
419 unsigned int cmd_q_wdg_timeout;
420 const u8 *no_reclaim_cmds;
421 unsigned int n_no_reclaim_cmds;
422
423 enum iwl_amsdu_size rx_buf_size;
424 bool bc_table_dword;
425 bool scd_set_active;
426 bool sw_csum_tx;
427 const struct iwl_hcmd_arr *command_groups;
428 int command_groups_size;
429
430 u8 cb_data_offs;
431 };
432
433 struct iwl_trans_dump_data {
434 u32 len;
435 u8 data[];
436 };
437
438 struct iwl_trans;
439
440 struct iwl_trans_txq_scd_cfg {
441 u8 fifo;
442 u8 sta_id;
443 u8 tid;
444 bool aggregate;
445 int frame_limit;
446 };
447
448
449
450
451
452
453
454
455 struct iwl_trans_rxq_dma_data {
456 u64 fr_bd_cb;
457 u32 fr_bd_wid;
458 u64 urbd_stts_wrptr;
459 u64 ur_bd_cb;
460 };
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543 struct iwl_trans_ops {
544
545 int (*start_hw)(struct iwl_trans *iwl_trans);
546 void (*op_mode_leave)(struct iwl_trans *iwl_trans);
547 int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
548 bool run_in_rfkill);
549 void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
550 void (*stop_device)(struct iwl_trans *trans);
551
552 int (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset);
553 int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
554 bool test, bool reset);
555
556 int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
557
558 int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
559 struct iwl_device_tx_cmd *dev_cmd, int queue);
560 void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
561 struct sk_buff_head *skbs);
562
563 void (*set_q_ptrs)(struct iwl_trans *trans, int queue, int ptr);
564
565 bool (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
566 const struct iwl_trans_txq_scd_cfg *cfg,
567 unsigned int queue_wdg_timeout);
568 void (*txq_disable)(struct iwl_trans *trans, int queue,
569 bool configure_scd);
570
571 int (*txq_alloc)(struct iwl_trans *trans,
572 __le16 flags, u8 sta_id, u8 tid,
573 int cmd_id, int size,
574 unsigned int queue_wdg_timeout);
575 void (*txq_free)(struct iwl_trans *trans, int queue);
576 int (*rxq_dma_data)(struct iwl_trans *trans, int queue,
577 struct iwl_trans_rxq_dma_data *data);
578
579 void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
580 bool shared);
581
582 int (*wait_tx_queues_empty)(struct iwl_trans *trans, u32 txq_bm);
583 int (*wait_txq_empty)(struct iwl_trans *trans, int queue);
584 void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
585 bool freeze);
586 void (*block_txq_ptrs)(struct iwl_trans *trans, bool block);
587
588 void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
589 void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
590 u32 (*read32)(struct iwl_trans *trans, u32 ofs);
591 u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
592 void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
593 int (*read_mem)(struct iwl_trans *trans, u32 addr,
594 void *buf, int dwords);
595 int (*write_mem)(struct iwl_trans *trans, u32 addr,
596 const void *buf, int dwords);
597 void (*configure)(struct iwl_trans *trans,
598 const struct iwl_trans_config *trans_cfg);
599 void (*set_pmi)(struct iwl_trans *trans, bool state);
600 void (*sw_reset)(struct iwl_trans *trans);
601 bool (*grab_nic_access)(struct iwl_trans *trans, unsigned long *flags);
602 void (*release_nic_access)(struct iwl_trans *trans,
603 unsigned long *flags);
604 void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
605 u32 value);
606 int (*suspend)(struct iwl_trans *trans);
607 void (*resume)(struct iwl_trans *trans);
608
609 struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
610 u32 dump_mask);
611 void (*debugfs_cleanup)(struct iwl_trans *trans);
612 void (*sync_nmi)(struct iwl_trans *trans);
613 };
614
615
616
617
618
619
620
621 enum iwl_trans_state {
622 IWL_TRANS_NO_FW = 0,
623 IWL_TRANS_FW_ALIVE = 1,
624 };
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660 enum iwl_plat_pm_mode {
661 IWL_PLAT_PM_MODE_DISABLED,
662 IWL_PLAT_PM_MODE_D3,
663 };
664
665
666
667
668
669
670
671
672 enum iwl_ini_cfg_state {
673 IWL_INI_CFG_STATE_NOT_LOADED,
674 IWL_INI_CFG_STATE_LOADED,
675 IWL_INI_CFG_STATE_CORRUPTED,
676 };
677
678
679 #define IWL_TRANS_NMI_TIMEOUT (HZ / 4)
680
681
682
683
684
685
686
687 struct iwl_dram_data {
688 dma_addr_t physical;
689 void *block;
690 int size;
691 };
692
693
694
695
696
697
698
699
700 struct iwl_self_init_dram {
701 struct iwl_dram_data *fw;
702 int fw_cnt;
703 struct iwl_dram_data *paging;
704 int paging_cnt;
705 };
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726 struct iwl_trans_debug {
727 u8 n_dest_reg;
728 bool rec_on;
729
730 const struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv;
731 const struct iwl_fw_dbg_conf_tlv *conf_tlv[FW_DBG_CONF_MAX];
732 struct iwl_fw_dbg_trigger_tlv * const *trigger_tlv;
733
734 u32 lmac_error_event_table[2];
735 u32 umac_error_event_table;
736 unsigned int error_event_table_tlv_status;
737
738 enum iwl_ini_cfg_state internal_ini_cfg;
739 enum iwl_ini_cfg_state external_ini_cfg;
740
741 int num_blocks;
742 struct iwl_dram_data fw_mon[IWL_FW_INI_ALLOCATION_NUM];
743
744 bool hw_error;
745 enum iwl_fw_ini_buffer_location ini_dest;
746 };
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782 struct iwl_trans {
783 const struct iwl_trans_ops *ops;
784 struct iwl_op_mode *op_mode;
785 const struct iwl_cfg_trans_params *trans_cfg;
786 const struct iwl_cfg *cfg;
787 struct iwl_drv *drv;
788 enum iwl_trans_state state;
789 unsigned long status;
790
791 struct device *dev;
792 u32 max_skb_frags;
793 u32 hw_rev;
794 u32 hw_rf_id;
795 u32 hw_id;
796 char hw_id_str[52];
797
798 u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
799
800 bool pm_support;
801 bool ltr_enabled;
802
803 const struct iwl_hcmd_arr *command_groups;
804 int command_groups_size;
805 bool wide_cmd_header;
806
807 u8 num_rx_queues;
808
809 size_t iml_len;
810 u8 *iml;
811
812
813 struct kmem_cache *dev_cmd_pool;
814 char dev_cmd_pool_name[50];
815
816 struct dentry *dbgfs_dir;
817
818 #ifdef CONFIG_LOCKDEP
819 struct lockdep_map sync_cmd_lockdep_map;
820 #endif
821
822 struct iwl_trans_debug dbg;
823 struct iwl_self_init_dram init_dram;
824
825 enum iwl_plat_pm_mode system_pm_mode;
826
827
828
829 char trans_specific[0] __aligned(sizeof(void *));
830 };
831
832 const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id);
833 int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans);
834
835 static inline void iwl_trans_configure(struct iwl_trans *trans,
836 const struct iwl_trans_config *trans_cfg)
837 {
838 trans->op_mode = trans_cfg->op_mode;
839
840 trans->ops->configure(trans, trans_cfg);
841 WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg));
842 }
843
844 static inline int iwl_trans_start_hw(struct iwl_trans *trans)
845 {
846 might_sleep();
847
848 return trans->ops->start_hw(trans);
849 }
850
851 static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans)
852 {
853 might_sleep();
854
855 if (trans->ops->op_mode_leave)
856 trans->ops->op_mode_leave(trans);
857
858 trans->op_mode = NULL;
859
860 trans->state = IWL_TRANS_NO_FW;
861 }
862
863 static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
864 {
865 might_sleep();
866
867 trans->state = IWL_TRANS_FW_ALIVE;
868
869 trans->ops->fw_alive(trans, scd_addr);
870 }
871
872 static inline int iwl_trans_start_fw(struct iwl_trans *trans,
873 const struct fw_img *fw,
874 bool run_in_rfkill)
875 {
876 might_sleep();
877
878 WARN_ON_ONCE(!trans->rx_mpdu_cmd);
879
880 clear_bit(STATUS_FW_ERROR, &trans->status);
881 return trans->ops->start_fw(trans, fw, run_in_rfkill);
882 }
883
884 static inline void iwl_trans_stop_device(struct iwl_trans *trans)
885 {
886 might_sleep();
887
888 trans->ops->stop_device(trans);
889
890 trans->state = IWL_TRANS_NO_FW;
891 }
892
893 static inline int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test,
894 bool reset)
895 {
896 might_sleep();
897 if (!trans->ops->d3_suspend)
898 return 0;
899
900 return trans->ops->d3_suspend(trans, test, reset);
901 }
902
903 static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
904 enum iwl_d3_status *status,
905 bool test, bool reset)
906 {
907 might_sleep();
908 if (!trans->ops->d3_resume)
909 return 0;
910
911 return trans->ops->d3_resume(trans, status, test, reset);
912 }
913
914 static inline int iwl_trans_suspend(struct iwl_trans *trans)
915 {
916 if (!trans->ops->suspend)
917 return 0;
918
919 return trans->ops->suspend(trans);
920 }
921
922 static inline void iwl_trans_resume(struct iwl_trans *trans)
923 {
924 if (trans->ops->resume)
925 trans->ops->resume(trans);
926 }
927
928 static inline struct iwl_trans_dump_data *
929 iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask)
930 {
931 if (!trans->ops->dump_data)
932 return NULL;
933 return trans->ops->dump_data(trans, dump_mask);
934 }
935
936 static inline struct iwl_device_tx_cmd *
937 iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
938 {
939 return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC);
940 }
941
942 int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
943
944 static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
945 struct iwl_device_tx_cmd *dev_cmd)
946 {
947 kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
948 }
949
950 static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
951 struct iwl_device_tx_cmd *dev_cmd, int queue)
952 {
953 if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
954 return -EIO;
955
956 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
957 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
958 return -EIO;
959 }
960
961 return trans->ops->tx(trans, skb, dev_cmd, queue);
962 }
963
964 static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
965 int ssn, struct sk_buff_head *skbs)
966 {
967 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
968 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
969 return;
970 }
971
972 trans->ops->reclaim(trans, queue, ssn, skbs);
973 }
974
975 static inline void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue,
976 int ptr)
977 {
978 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
979 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
980 return;
981 }
982
983 trans->ops->set_q_ptrs(trans, queue, ptr);
984 }
985
986 static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
987 bool configure_scd)
988 {
989 trans->ops->txq_disable(trans, queue, configure_scd);
990 }
991
992 static inline bool
993 iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
994 const struct iwl_trans_txq_scd_cfg *cfg,
995 unsigned int queue_wdg_timeout)
996 {
997 might_sleep();
998
999 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1000 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1001 return false;
1002 }
1003
1004 return trans->ops->txq_enable(trans, queue, ssn,
1005 cfg, queue_wdg_timeout);
1006 }
1007
1008 static inline int
1009 iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
1010 struct iwl_trans_rxq_dma_data *data)
1011 {
1012 if (WARN_ON_ONCE(!trans->ops->rxq_dma_data))
1013 return -ENOTSUPP;
1014
1015 return trans->ops->rxq_dma_data(trans, queue, data);
1016 }
1017
1018 static inline void
1019 iwl_trans_txq_free(struct iwl_trans *trans, int queue)
1020 {
1021 if (WARN_ON_ONCE(!trans->ops->txq_free))
1022 return;
1023
1024 trans->ops->txq_free(trans, queue);
1025 }
1026
1027 static inline int
1028 iwl_trans_txq_alloc(struct iwl_trans *trans,
1029 __le16 flags, u8 sta_id, u8 tid,
1030 int cmd_id, int size,
1031 unsigned int wdg_timeout)
1032 {
1033 might_sleep();
1034
1035 if (WARN_ON_ONCE(!trans->ops->txq_alloc))
1036 return -ENOTSUPP;
1037
1038 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1039 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1040 return -EIO;
1041 }
1042
1043 return trans->ops->txq_alloc(trans, flags, sta_id, tid,
1044 cmd_id, size, wdg_timeout);
1045 }
1046
1047 static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
1048 int queue, bool shared_mode)
1049 {
1050 if (trans->ops->txq_set_shared_mode)
1051 trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
1052 }
1053
1054 static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
1055 int fifo, int sta_id, int tid,
1056 int frame_limit, u16 ssn,
1057 unsigned int queue_wdg_timeout)
1058 {
1059 struct iwl_trans_txq_scd_cfg cfg = {
1060 .fifo = fifo,
1061 .sta_id = sta_id,
1062 .tid = tid,
1063 .frame_limit = frame_limit,
1064 .aggregate = sta_id >= 0,
1065 };
1066
1067 iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout);
1068 }
1069
1070 static inline
1071 void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
1072 unsigned int queue_wdg_timeout)
1073 {
1074 struct iwl_trans_txq_scd_cfg cfg = {
1075 .fifo = fifo,
1076 .sta_id = -1,
1077 .tid = IWL_MAX_TID_COUNT,
1078 .frame_limit = IWL_FRAME_LIMIT,
1079 .aggregate = false,
1080 };
1081
1082 iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
1083 }
1084
1085 static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
1086 unsigned long txqs,
1087 bool freeze)
1088 {
1089 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1090 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1091 return;
1092 }
1093
1094 if (trans->ops->freeze_txq_timer)
1095 trans->ops->freeze_txq_timer(trans, txqs, freeze);
1096 }
1097
1098 static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans,
1099 bool block)
1100 {
1101 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1102 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1103 return;
1104 }
1105
1106 if (trans->ops->block_txq_ptrs)
1107 trans->ops->block_txq_ptrs(trans, block);
1108 }
1109
1110 static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans,
1111 u32 txqs)
1112 {
1113 if (WARN_ON_ONCE(!trans->ops->wait_tx_queues_empty))
1114 return -ENOTSUPP;
1115
1116 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1117 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1118 return -EIO;
1119 }
1120
1121 return trans->ops->wait_tx_queues_empty(trans, txqs);
1122 }
1123
1124 static inline int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue)
1125 {
1126 if (WARN_ON_ONCE(!trans->ops->wait_txq_empty))
1127 return -ENOTSUPP;
1128
1129 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1130 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1131 return -EIO;
1132 }
1133
1134 return trans->ops->wait_txq_empty(trans, queue);
1135 }
1136
1137 static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1138 {
1139 trans->ops->write8(trans, ofs, val);
1140 }
1141
1142 static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1143 {
1144 trans->ops->write32(trans, ofs, val);
1145 }
1146
1147 static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
1148 {
1149 return trans->ops->read32(trans, ofs);
1150 }
1151
1152 static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
1153 {
1154 return trans->ops->read_prph(trans, ofs);
1155 }
1156
1157 static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
1158 u32 val)
1159 {
1160 return trans->ops->write_prph(trans, ofs, val);
1161 }
1162
1163 static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
1164 void *buf, int dwords)
1165 {
1166 return trans->ops->read_mem(trans, addr, buf, dwords);
1167 }
1168
1169 #define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \
1170 do { \
1171 if (__builtin_constant_p(bufsize)) \
1172 BUILD_BUG_ON((bufsize) % sizeof(u32)); \
1173 iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
1174 } while (0)
1175
1176 static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
1177 {
1178 u32 value;
1179
1180 if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1)))
1181 return 0xa5a5a5a5;
1182
1183 return value;
1184 }
1185
1186 static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
1187 const void *buf, int dwords)
1188 {
1189 return trans->ops->write_mem(trans, addr, buf, dwords);
1190 }
1191
1192 static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
1193 u32 val)
1194 {
1195 return iwl_trans_write_mem(trans, addr, &val, 1);
1196 }
1197
1198 static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
1199 {
1200 if (trans->ops->set_pmi)
1201 trans->ops->set_pmi(trans, state);
1202 }
1203
1204 static inline void iwl_trans_sw_reset(struct iwl_trans *trans)
1205 {
1206 if (trans->ops->sw_reset)
1207 trans->ops->sw_reset(trans);
1208 }
1209
1210 static inline void
1211 iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
1212 {
1213 trans->ops->set_bits_mask(trans, reg, mask, value);
1214 }
1215
1216 #define iwl_trans_grab_nic_access(trans, flags) \
1217 __cond_lock(nic_access, \
1218 likely((trans)->ops->grab_nic_access(trans, flags)))
1219
1220 static inline void __releases(nic_access)
1221 iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags)
1222 {
1223 trans->ops->release_nic_access(trans, flags);
1224 __release(nic_access);
1225 }
1226
1227 static inline void iwl_trans_fw_error(struct iwl_trans *trans)
1228 {
1229 if (WARN_ON_ONCE(!trans->op_mode))
1230 return;
1231
1232
1233 if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status))
1234 iwl_op_mode_nic_error(trans->op_mode);
1235 }
1236
1237 static inline void iwl_trans_sync_nmi(struct iwl_trans *trans)
1238 {
1239 if (trans->ops->sync_nmi)
1240 trans->ops->sync_nmi(trans);
1241 }
1242
1243 static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
1244 {
1245 return trans->dbg.internal_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED ||
1246 trans->dbg.external_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED;
1247 }
1248
1249
1250
1251
1252 struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
1253 struct device *dev,
1254 const struct iwl_trans_ops *ops,
1255 unsigned int cmd_pool_size,
1256 unsigned int cmd_pool_align);
1257 void iwl_trans_free(struct iwl_trans *trans);
1258
1259
1260
1261
1262 int __must_check iwl_pci_register_driver(void);
1263 void iwl_pci_unregister_driver(void);
1264
1265 #endif