root/drivers/net/wireless/ath/ath10k/pci.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. ath10k_pci_is_awake
  2. __ath10k_pci_wake
  3. __ath10k_pci_sleep
  4. ath10k_pci_wake_wait
  5. ath10k_pci_force_wake
  6. ath10k_pci_force_sleep
  7. ath10k_pci_wake
  8. ath10k_pci_sleep
  9. ath10k_pci_ps_timer
  10. ath10k_pci_sleep_sync
  11. ath10k_bus_pci_write32
  12. ath10k_bus_pci_read32
  13. ath10k_pci_write32
  14. ath10k_pci_read32
  15. ath10k_pci_soc_read32
  16. ath10k_pci_soc_write32
  17. ath10k_pci_reg_read32
  18. ath10k_pci_reg_write32
  19. ath10k_pci_irq_pending
  20. ath10k_pci_disable_and_clear_legacy_irq
  21. ath10k_pci_enable_legacy_irq
  22. ath10k_pci_get_irq_method
  23. __ath10k_pci_rx_post_buf
  24. ath10k_pci_rx_post_pipe
  25. ath10k_pci_rx_post
  26. ath10k_pci_rx_replenish_retry
  27. ath10k_pci_qca988x_targ_cpu_to_ce_addr
  28. ath10k_pci_qca6174_targ_cpu_to_ce_addr
  29. ath10k_pci_qca99x0_targ_cpu_to_ce_addr
  30. ath10k_pci_targ_cpu_to_ce_addr
  31. ath10k_pci_diag_read_mem
  32. ath10k_pci_diag_read32
  33. __ath10k_pci_diag_read_hi
  34. ath10k_pci_diag_write_mem
  35. ath10k_pci_diag_write32
  36. ath10k_pci_htc_tx_cb
  37. ath10k_pci_process_rx_cb
  38. ath10k_pci_process_htt_rx_cb
  39. ath10k_pci_htc_rx_cb
  40. ath10k_pci_htt_htc_rx_cb
  41. ath10k_pci_pktlog_rx_cb
  42. ath10k_pci_htt_tx_cb
  43. ath10k_pci_htt_rx_deliver
  44. ath10k_pci_htt_rx_cb
  45. ath10k_pci_hif_tx_sg
  46. ath10k_pci_hif_diag_read
  47. ath10k_pci_hif_get_free_queue_number
  48. ath10k_pci_dump_registers
  49. ath10k_pci_dump_memory_section
  50. ath10k_pci_set_ram_config
  51. ath10k_pci_dump_memory_sram
  52. ath10k_pci_dump_memory_reg
  53. ath10k_pci_dump_memory_generic
  54. ath10k_pci_dump_memory
  55. ath10k_pci_fw_dump_work
  56. ath10k_pci_fw_crashed_dump
  57. ath10k_pci_hif_send_complete_check
  58. ath10k_pci_rx_retry_sync
  59. ath10k_pci_hif_map_service_to_pipe
  60. ath10k_pci_hif_get_default_pipe
  61. ath10k_pci_irq_msi_fw_mask
  62. ath10k_pci_irq_msi_fw_unmask
  63. ath10k_pci_irq_disable
  64. ath10k_pci_irq_sync
  65. ath10k_pci_irq_enable
  66. ath10k_pci_hif_start
  67. ath10k_pci_rx_pipe_cleanup
  68. ath10k_pci_tx_pipe_cleanup
  69. ath10k_pci_buffer_cleanup
  70. ath10k_pci_ce_deinit
  71. ath10k_pci_flush
  72. ath10k_pci_hif_stop
  73. ath10k_pci_hif_exchange_bmi_msg
  74. ath10k_pci_bmi_send_done
  75. ath10k_pci_bmi_recv_data
  76. ath10k_pci_bmi_wait
  77. ath10k_pci_wake_target_cpu
  78. ath10k_pci_get_num_banks
  79. ath10k_bus_get_num_banks
  80. ath10k_pci_init_config
  81. ath10k_pci_override_ce_config
  82. ath10k_pci_alloc_pipes
  83. ath10k_pci_free_pipes
  84. ath10k_pci_init_pipes
  85. ath10k_pci_has_fw_crashed
  86. ath10k_pci_fw_crashed_clear
  87. ath10k_pci_has_device_gone
  88. ath10k_pci_warm_reset_si0
  89. ath10k_pci_warm_reset_cpu
  90. ath10k_pci_warm_reset_ce
  91. ath10k_pci_warm_reset_clear_lf
  92. ath10k_pci_warm_reset
  93. ath10k_pci_qca99x0_soft_chip_reset
  94. ath10k_pci_safe_chip_reset
  95. ath10k_pci_qca988x_chip_reset
  96. ath10k_pci_qca6174_chip_reset
  97. ath10k_pci_qca99x0_chip_reset
  98. ath10k_pci_chip_reset
  99. ath10k_pci_hif_power_up
  100. ath10k_pci_hif_power_down
  101. ath10k_pci_hif_suspend
  102. ath10k_pci_suspend
  103. ath10k_pci_hif_resume
  104. ath10k_pci_resume
  105. ath10k_pci_validate_cal
  106. ath10k_pci_enable_eeprom
  107. ath10k_pci_read_eeprom
  108. ath10k_pci_hif_fetch_cal_eeprom
  109. ath10k_pci_interrupt_handler
  110. ath10k_pci_napi_poll
  111. ath10k_pci_request_irq_msi
  112. ath10k_pci_request_irq_legacy
  113. ath10k_pci_request_irq
  114. ath10k_pci_free_irq
  115. ath10k_pci_init_napi
  116. ath10k_pci_init_irq
  117. ath10k_pci_deinit_irq_legacy
  118. ath10k_pci_deinit_irq
  119. ath10k_pci_wait_for_target_init
  120. ath10k_pci_cold_reset
  121. ath10k_pci_claim
  122. ath10k_pci_release
  123. ath10k_pci_chip_is_supported
  124. ath10k_pci_setup_resource
  125. ath10k_pci_release_resource
  126. ath10k_pci_probe
  127. ath10k_pci_remove
  128. ath10k_pci_pm_suspend
  129. ath10k_pci_pm_resume
  130. ath10k_pci_init
  131. ath10k_pci_exit

   1 // SPDX-License-Identifier: ISC
   2 /*
   3  * Copyright (c) 2005-2011 Atheros Communications Inc.
   4  * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
   5  */
   6 
   7 #include <linux/pci.h>
   8 #include <linux/module.h>
   9 #include <linux/interrupt.h>
  10 #include <linux/spinlock.h>
  11 #include <linux/bitops.h>
  12 
  13 #include "core.h"
  14 #include "debug.h"
  15 #include "coredump.h"
  16 
  17 #include "targaddrs.h"
  18 #include "bmi.h"
  19 
  20 #include "hif.h"
  21 #include "htc.h"
  22 
  23 #include "ce.h"
  24 #include "pci.h"
  25 
  26 enum ath10k_pci_reset_mode {
  27         ATH10K_PCI_RESET_AUTO = 0,
  28         ATH10K_PCI_RESET_WARM_ONLY = 1,
  29 };
  30 
  31 static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
  32 static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
  33 
  34 module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
  35 MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
  36 
  37 module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
  38 MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
  39 
  40 /* how long wait to wait for target to initialise, in ms */
  41 #define ATH10K_PCI_TARGET_WAIT 3000
  42 #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
  43 
  44 /* Maximum number of bytes that can be handled atomically by
  45  * diag read and write.
  46  */
  47 #define ATH10K_DIAG_TRANSFER_LIMIT      0x5000
  48 
  49 #define QCA99X0_PCIE_BAR0_START_REG    0x81030
  50 #define QCA99X0_CPU_MEM_ADDR_REG       0x4d00c
  51 #define QCA99X0_CPU_MEM_DATA_REG       0x4d010
  52 
  53 static const struct pci_device_id ath10k_pci_id_table[] = {
  54         /* PCI-E QCA988X V2 (Ubiquiti branded) */
  55         { PCI_VDEVICE(UBIQUITI, QCA988X_2_0_DEVICE_ID_UBNT) },
  56 
  57         { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
  58         { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
  59         { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
  60         { PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */
  61         { PCI_VDEVICE(ATHEROS, QCA9888_2_0_DEVICE_ID) }, /* PCI-E QCA9888 V2 */
  62         { PCI_VDEVICE(ATHEROS, QCA9984_1_0_DEVICE_ID) }, /* PCI-E QCA9984 V1 */
  63         { PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */
  64         { PCI_VDEVICE(ATHEROS, QCA9887_1_0_DEVICE_ID) }, /* PCI-E QCA9887 */
  65         {0}
  66 };
  67 
  68 static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
  69         /* QCA988X pre 2.0 chips are not supported because they need some nasty
  70          * hacks. ath10k doesn't have them and these devices crash horribly
  71          * because of that.
  72          */
  73         { QCA988X_2_0_DEVICE_ID_UBNT, QCA988X_HW_2_0_CHIP_ID_REV },
  74         { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
  75 
  76         { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
  77         { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
  78         { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
  79         { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
  80         { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
  81 
  82         { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
  83         { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
  84         { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
  85         { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
  86         { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
  87 
  88         { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
  89 
  90         { QCA9984_1_0_DEVICE_ID, QCA9984_HW_1_0_CHIP_ID_REV },
  91 
  92         { QCA9888_2_0_DEVICE_ID, QCA9888_HW_2_0_CHIP_ID_REV },
  93 
  94         { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV },
  95         { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV },
  96 
  97         { QCA9887_1_0_DEVICE_ID, QCA9887_HW_1_0_CHIP_ID_REV },
  98 };
  99 
 100 static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
 101 static int ath10k_pci_cold_reset(struct ath10k *ar);
 102 static int ath10k_pci_safe_chip_reset(struct ath10k *ar);
 103 static int ath10k_pci_init_irq(struct ath10k *ar);
 104 static int ath10k_pci_deinit_irq(struct ath10k *ar);
 105 static int ath10k_pci_request_irq(struct ath10k *ar);
 106 static void ath10k_pci_free_irq(struct ath10k *ar);
 107 static int ath10k_pci_bmi_wait(struct ath10k *ar,
 108                                struct ath10k_ce_pipe *tx_pipe,
 109                                struct ath10k_ce_pipe *rx_pipe,
 110                                struct bmi_xfer *xfer);
 111 static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar);
 112 static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
 113 static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
 114 static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
 115 static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
 116 static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
 117 static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
 118 
 119 static struct ce_attr host_ce_config_wlan[] = {
 120         /* CE0: host->target HTC control and raw streams */
 121         {
 122                 .flags = CE_ATTR_FLAGS,
 123                 .src_nentries = 16,
 124                 .src_sz_max = 256,
 125                 .dest_nentries = 0,
 126                 .send_cb = ath10k_pci_htc_tx_cb,
 127         },
 128 
 129         /* CE1: target->host HTT + HTC control */
 130         {
 131                 .flags = CE_ATTR_FLAGS,
 132                 .src_nentries = 0,
 133                 .src_sz_max = 2048,
 134                 .dest_nentries = 512,
 135                 .recv_cb = ath10k_pci_htt_htc_rx_cb,
 136         },
 137 
 138         /* CE2: target->host WMI */
 139         {
 140                 .flags = CE_ATTR_FLAGS,
 141                 .src_nentries = 0,
 142                 .src_sz_max = 2048,
 143                 .dest_nentries = 128,
 144                 .recv_cb = ath10k_pci_htc_rx_cb,
 145         },
 146 
 147         /* CE3: host->target WMI */
 148         {
 149                 .flags = CE_ATTR_FLAGS,
 150                 .src_nentries = 32,
 151                 .src_sz_max = 2048,
 152                 .dest_nentries = 0,
 153                 .send_cb = ath10k_pci_htc_tx_cb,
 154         },
 155 
 156         /* CE4: host->target HTT */
 157         {
 158                 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
 159                 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
 160                 .src_sz_max = 256,
 161                 .dest_nentries = 0,
 162                 .send_cb = ath10k_pci_htt_tx_cb,
 163         },
 164 
 165         /* CE5: target->host HTT (HIF->HTT) */
 166         {
 167                 .flags = CE_ATTR_FLAGS,
 168                 .src_nentries = 0,
 169                 .src_sz_max = 512,
 170                 .dest_nentries = 512,
 171                 .recv_cb = ath10k_pci_htt_rx_cb,
 172         },
 173 
 174         /* CE6: target autonomous hif_memcpy */
 175         {
 176                 .flags = CE_ATTR_FLAGS,
 177                 .src_nentries = 0,
 178                 .src_sz_max = 0,
 179                 .dest_nentries = 0,
 180         },
 181 
 182         /* CE7: ce_diag, the Diagnostic Window */
 183         {
 184                 .flags = CE_ATTR_FLAGS | CE_ATTR_POLL,
 185                 .src_nentries = 2,
 186                 .src_sz_max = DIAG_TRANSFER_LIMIT,
 187                 .dest_nentries = 2,
 188         },
 189 
 190         /* CE8: target->host pktlog */
 191         {
 192                 .flags = CE_ATTR_FLAGS,
 193                 .src_nentries = 0,
 194                 .src_sz_max = 2048,
 195                 .dest_nentries = 128,
 196                 .recv_cb = ath10k_pci_pktlog_rx_cb,
 197         },
 198 
 199         /* CE9 target autonomous qcache memcpy */
 200         {
 201                 .flags = CE_ATTR_FLAGS,
 202                 .src_nentries = 0,
 203                 .src_sz_max = 0,
 204                 .dest_nentries = 0,
 205         },
 206 
 207         /* CE10: target autonomous hif memcpy */
 208         {
 209                 .flags = CE_ATTR_FLAGS,
 210                 .src_nentries = 0,
 211                 .src_sz_max = 0,
 212                 .dest_nentries = 0,
 213         },
 214 
 215         /* CE11: target autonomous hif memcpy */
 216         {
 217                 .flags = CE_ATTR_FLAGS,
 218                 .src_nentries = 0,
 219                 .src_sz_max = 0,
 220                 .dest_nentries = 0,
 221         },
 222 };
 223 
 224 /* Target firmware's Copy Engine configuration. */
 225 static struct ce_pipe_config target_ce_config_wlan[] = {
 226         /* CE0: host->target HTC control and raw streams */
 227         {
 228                 .pipenum = __cpu_to_le32(0),
 229                 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
 230                 .nentries = __cpu_to_le32(32),
 231                 .nbytes_max = __cpu_to_le32(256),
 232                 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 233                 .reserved = __cpu_to_le32(0),
 234         },
 235 
 236         /* CE1: target->host HTT + HTC control */
 237         {
 238                 .pipenum = __cpu_to_le32(1),
 239                 .pipedir = __cpu_to_le32(PIPEDIR_IN),
 240                 .nentries = __cpu_to_le32(32),
 241                 .nbytes_max = __cpu_to_le32(2048),
 242                 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 243                 .reserved = __cpu_to_le32(0),
 244         },
 245 
 246         /* CE2: target->host WMI */
 247         {
 248                 .pipenum = __cpu_to_le32(2),
 249                 .pipedir = __cpu_to_le32(PIPEDIR_IN),
 250                 .nentries = __cpu_to_le32(64),
 251                 .nbytes_max = __cpu_to_le32(2048),
 252                 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 253                 .reserved = __cpu_to_le32(0),
 254         },
 255 
 256         /* CE3: host->target WMI */
 257         {
 258                 .pipenum = __cpu_to_le32(3),
 259                 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
 260                 .nentries = __cpu_to_le32(32),
 261                 .nbytes_max = __cpu_to_le32(2048),
 262                 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 263                 .reserved = __cpu_to_le32(0),
 264         },
 265 
 266         /* CE4: host->target HTT */
 267         {
 268                 .pipenum = __cpu_to_le32(4),
 269                 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
 270                 .nentries = __cpu_to_le32(256),
 271                 .nbytes_max = __cpu_to_le32(256),
 272                 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 273                 .reserved = __cpu_to_le32(0),
 274         },
 275 
 276         /* NB: 50% of src nentries, since tx has 2 frags */
 277 
 278         /* CE5: target->host HTT (HIF->HTT) */
 279         {
 280                 .pipenum = __cpu_to_le32(5),
 281                 .pipedir = __cpu_to_le32(PIPEDIR_IN),
 282                 .nentries = __cpu_to_le32(32),
 283                 .nbytes_max = __cpu_to_le32(512),
 284                 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 285                 .reserved = __cpu_to_le32(0),
 286         },
 287 
 288         /* CE6: Reserved for target autonomous hif_memcpy */
 289         {
 290                 .pipenum = __cpu_to_le32(6),
 291                 .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
 292                 .nentries = __cpu_to_le32(32),
 293                 .nbytes_max = __cpu_to_le32(4096),
 294                 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 295                 .reserved = __cpu_to_le32(0),
 296         },
 297 
 298         /* CE7 used only by Host */
 299         {
 300                 .pipenum = __cpu_to_le32(7),
 301                 .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
 302                 .nentries = __cpu_to_le32(0),
 303                 .nbytes_max = __cpu_to_le32(0),
 304                 .flags = __cpu_to_le32(0),
 305                 .reserved = __cpu_to_le32(0),
 306         },
 307 
 308         /* CE8 target->host packtlog */
 309         {
 310                 .pipenum = __cpu_to_le32(8),
 311                 .pipedir = __cpu_to_le32(PIPEDIR_IN),
 312                 .nentries = __cpu_to_le32(64),
 313                 .nbytes_max = __cpu_to_le32(2048),
 314                 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
 315                 .reserved = __cpu_to_le32(0),
 316         },
 317 
 318         /* CE9 target autonomous qcache memcpy */
 319         {
 320                 .pipenum = __cpu_to_le32(9),
 321                 .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
 322                 .nentries = __cpu_to_le32(32),
 323                 .nbytes_max = __cpu_to_le32(2048),
 324                 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
 325                 .reserved = __cpu_to_le32(0),
 326         },
 327 
 328         /* It not necessary to send target wlan configuration for CE10 & CE11
 329          * as these CEs are not actively used in target.
 330          */
 331 };
 332 
 333 /*
 334  * Map from service/endpoint to Copy Engine.
 335  * This table is derived from the CE_PCI TABLE, above.
 336  * It is passed to the Target at startup for use by firmware.
 337  */
 338 static struct service_to_pipe target_service_to_ce_map_wlan[] = {
 339         {
 340                 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
 341                 __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 342                 __cpu_to_le32(3),
 343         },
 344         {
 345                 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
 346                 __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 347                 __cpu_to_le32(2),
 348         },
 349         {
 350                 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
 351                 __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 352                 __cpu_to_le32(3),
 353         },
 354         {
 355                 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
 356                 __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 357                 __cpu_to_le32(2),
 358         },
 359         {
 360                 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
 361                 __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 362                 __cpu_to_le32(3),
 363         },
 364         {
 365                 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
 366                 __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 367                 __cpu_to_le32(2),
 368         },
 369         {
 370                 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
 371                 __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 372                 __cpu_to_le32(3),
 373         },
 374         {
 375                 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
 376                 __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 377                 __cpu_to_le32(2),
 378         },
 379         {
 380                 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
 381                 __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 382                 __cpu_to_le32(3),
 383         },
 384         {
 385                 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
 386                 __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 387                 __cpu_to_le32(2),
 388         },
 389         {
 390                 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
 391                 __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 392                 __cpu_to_le32(0),
 393         },
 394         {
 395                 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
 396                 __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 397                 __cpu_to_le32(1),
 398         },
 399         { /* not used */
 400                 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
 401                 __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 402                 __cpu_to_le32(0),
 403         },
 404         { /* not used */
 405                 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
 406                 __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 407                 __cpu_to_le32(1),
 408         },
 409         {
 410                 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
 411                 __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 412                 __cpu_to_le32(4),
 413         },
 414         {
 415                 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
 416                 __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 417                 __cpu_to_le32(5),
 418         },
 419 
 420         /* (Additions here) */
 421 
 422         { /* must be last */
 423                 __cpu_to_le32(0),
 424                 __cpu_to_le32(0),
 425                 __cpu_to_le32(0),
 426         },
 427 };
 428 
 429 static bool ath10k_pci_is_awake(struct ath10k *ar)
 430 {
 431         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 432         u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
 433                            RTC_STATE_ADDRESS);
 434 
 435         return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
 436 }
 437 
 438 static void __ath10k_pci_wake(struct ath10k *ar)
 439 {
 440         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 441 
 442         lockdep_assert_held(&ar_pci->ps_lock);
 443 
 444         ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n",
 445                    ar_pci->ps_wake_refcount, ar_pci->ps_awake);
 446 
 447         iowrite32(PCIE_SOC_WAKE_V_MASK,
 448                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
 449                   PCIE_SOC_WAKE_ADDRESS);
 450 }
 451 
 452 static void __ath10k_pci_sleep(struct ath10k *ar)
 453 {
 454         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 455 
 456         lockdep_assert_held(&ar_pci->ps_lock);
 457 
 458         ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n",
 459                    ar_pci->ps_wake_refcount, ar_pci->ps_awake);
 460 
 461         iowrite32(PCIE_SOC_WAKE_RESET,
 462                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
 463                   PCIE_SOC_WAKE_ADDRESS);
 464         ar_pci->ps_awake = false;
 465 }
 466 
 467 static int ath10k_pci_wake_wait(struct ath10k *ar)
 468 {
 469         int tot_delay = 0;
 470         int curr_delay = 5;
 471 
 472         while (tot_delay < PCIE_WAKE_TIMEOUT) {
 473                 if (ath10k_pci_is_awake(ar)) {
 474                         if (tot_delay > PCIE_WAKE_LATE_US)
 475                                 ath10k_warn(ar, "device wakeup took %d ms which is unusually long, otherwise it works normally.\n",
 476                                             tot_delay / 1000);
 477                         return 0;
 478                 }
 479 
 480                 udelay(curr_delay);
 481                 tot_delay += curr_delay;
 482 
 483                 if (curr_delay < 50)
 484                         curr_delay += 5;
 485         }
 486 
 487         return -ETIMEDOUT;
 488 }
 489 
 490 static int ath10k_pci_force_wake(struct ath10k *ar)
 491 {
 492         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 493         unsigned long flags;
 494         int ret = 0;
 495 
 496         if (ar_pci->pci_ps)
 497                 return ret;
 498 
 499         spin_lock_irqsave(&ar_pci->ps_lock, flags);
 500 
 501         if (!ar_pci->ps_awake) {
 502                 iowrite32(PCIE_SOC_WAKE_V_MASK,
 503                           ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
 504                           PCIE_SOC_WAKE_ADDRESS);
 505 
 506                 ret = ath10k_pci_wake_wait(ar);
 507                 if (ret == 0)
 508                         ar_pci->ps_awake = true;
 509         }
 510 
 511         spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 512 
 513         return ret;
 514 }
 515 
 516 static void ath10k_pci_force_sleep(struct ath10k *ar)
 517 {
 518         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 519         unsigned long flags;
 520 
 521         spin_lock_irqsave(&ar_pci->ps_lock, flags);
 522 
 523         iowrite32(PCIE_SOC_WAKE_RESET,
 524                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
 525                   PCIE_SOC_WAKE_ADDRESS);
 526         ar_pci->ps_awake = false;
 527 
 528         spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 529 }
 530 
 531 static int ath10k_pci_wake(struct ath10k *ar)
 532 {
 533         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 534         unsigned long flags;
 535         int ret = 0;
 536 
 537         if (ar_pci->pci_ps == 0)
 538                 return ret;
 539 
 540         spin_lock_irqsave(&ar_pci->ps_lock, flags);
 541 
 542         ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n",
 543                    ar_pci->ps_wake_refcount, ar_pci->ps_awake);
 544 
 545         /* This function can be called very frequently. To avoid excessive
 546          * CPU stalls for MMIO reads use a cache var to hold the device state.
 547          */
 548         if (!ar_pci->ps_awake) {
 549                 __ath10k_pci_wake(ar);
 550 
 551                 ret = ath10k_pci_wake_wait(ar);
 552                 if (ret == 0)
 553                         ar_pci->ps_awake = true;
 554         }
 555 
 556         if (ret == 0) {
 557                 ar_pci->ps_wake_refcount++;
 558                 WARN_ON(ar_pci->ps_wake_refcount == 0);
 559         }
 560 
 561         spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 562 
 563         return ret;
 564 }
 565 
 566 static void ath10k_pci_sleep(struct ath10k *ar)
 567 {
 568         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 569         unsigned long flags;
 570 
 571         if (ar_pci->pci_ps == 0)
 572                 return;
 573 
 574         spin_lock_irqsave(&ar_pci->ps_lock, flags);
 575 
 576         ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n",
 577                    ar_pci->ps_wake_refcount, ar_pci->ps_awake);
 578 
 579         if (WARN_ON(ar_pci->ps_wake_refcount == 0))
 580                 goto skip;
 581 
 582         ar_pci->ps_wake_refcount--;
 583 
 584         mod_timer(&ar_pci->ps_timer, jiffies +
 585                   msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC));
 586 
 587 skip:
 588         spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 589 }
 590 
 591 static void ath10k_pci_ps_timer(struct timer_list *t)
 592 {
 593         struct ath10k_pci *ar_pci = from_timer(ar_pci, t, ps_timer);
 594         struct ath10k *ar = ar_pci->ar;
 595         unsigned long flags;
 596 
 597         spin_lock_irqsave(&ar_pci->ps_lock, flags);
 598 
 599         ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n",
 600                    ar_pci->ps_wake_refcount, ar_pci->ps_awake);
 601 
 602         if (ar_pci->ps_wake_refcount > 0)
 603                 goto skip;
 604 
 605         __ath10k_pci_sleep(ar);
 606 
 607 skip:
 608         spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 609 }
 610 
 611 static void ath10k_pci_sleep_sync(struct ath10k *ar)
 612 {
 613         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 614         unsigned long flags;
 615 
 616         if (ar_pci->pci_ps == 0) {
 617                 ath10k_pci_force_sleep(ar);
 618                 return;
 619         }
 620 
 621         del_timer_sync(&ar_pci->ps_timer);
 622 
 623         spin_lock_irqsave(&ar_pci->ps_lock, flags);
 624         WARN_ON(ar_pci->ps_wake_refcount > 0);
 625         __ath10k_pci_sleep(ar);
 626         spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 627 }
 628 
 629 static void ath10k_bus_pci_write32(struct ath10k *ar, u32 offset, u32 value)
 630 {
 631         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 632         int ret;
 633 
 634         if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) {
 635                 ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
 636                             offset, offset + sizeof(value), ar_pci->mem_len);
 637                 return;
 638         }
 639 
 640         ret = ath10k_pci_wake(ar);
 641         if (ret) {
 642                 ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",
 643                             value, offset, ret);
 644                 return;
 645         }
 646 
 647         iowrite32(value, ar_pci->mem + offset);
 648         ath10k_pci_sleep(ar);
 649 }
 650 
 651 static u32 ath10k_bus_pci_read32(struct ath10k *ar, u32 offset)
 652 {
 653         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 654         u32 val;
 655         int ret;
 656 
 657         if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) {
 658                 ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
 659                             offset, offset + sizeof(val), ar_pci->mem_len);
 660                 return 0;
 661         }
 662 
 663         ret = ath10k_pci_wake(ar);
 664         if (ret) {
 665                 ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",
 666                             offset, ret);
 667                 return 0xffffffff;
 668         }
 669 
 670         val = ioread32(ar_pci->mem + offset);
 671         ath10k_pci_sleep(ar);
 672 
 673         return val;
 674 }
 675 
 676 inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
 677 {
 678         struct ath10k_ce *ce = ath10k_ce_priv(ar);
 679 
 680         ce->bus_ops->write32(ar, offset, value);
 681 }
 682 
 683 inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
 684 {
 685         struct ath10k_ce *ce = ath10k_ce_priv(ar);
 686 
 687         return ce->bus_ops->read32(ar, offset);
 688 }
 689 
 690 u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
 691 {
 692         return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
 693 }
 694 
 695 void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
 696 {
 697         ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
 698 }
 699 
 700 u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
 701 {
 702         return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr);
 703 }
 704 
 705 void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
 706 {
 707         ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val);
 708 }
 709 
 710 bool ath10k_pci_irq_pending(struct ath10k *ar)
 711 {
 712         u32 cause;
 713 
 714         /* Check if the shared legacy irq is for us */
 715         cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
 716                                   PCIE_INTR_CAUSE_ADDRESS);
 717         if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
 718                 return true;
 719 
 720         return false;
 721 }
 722 
 723 void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
 724 {
 725         /* IMPORTANT: INTR_CLR register has to be set after
 726          * INTR_ENABLE is set to 0, otherwise interrupt can not be
 727          * really cleared.
 728          */
 729         ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
 730                            0);
 731         ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
 732                            PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
 733 
 734         /* IMPORTANT: this extra read transaction is required to
 735          * flush the posted write buffer.
 736          */
 737         (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
 738                                 PCIE_INTR_ENABLE_ADDRESS);
 739 }
 740 
 741 void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
 742 {
 743         ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
 744                            PCIE_INTR_ENABLE_ADDRESS,
 745                            PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
 746 
 747         /* IMPORTANT: this extra read transaction is required to
 748          * flush the posted write buffer.
 749          */
 750         (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
 751                                 PCIE_INTR_ENABLE_ADDRESS);
 752 }
 753 
 754 static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
 755 {
 756         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 757 
 758         if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_MSI)
 759                 return "msi";
 760 
 761         return "legacy";
 762 }
 763 
 764 static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
 765 {
 766         struct ath10k *ar = pipe->hif_ce_state;
 767         struct ath10k_ce *ce = ath10k_ce_priv(ar);
 768         struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
 769         struct sk_buff *skb;
 770         dma_addr_t paddr;
 771         int ret;
 772 
 773         skb = dev_alloc_skb(pipe->buf_sz);
 774         if (!skb)
 775                 return -ENOMEM;
 776 
 777         WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
 778 
 779         paddr = dma_map_single(ar->dev, skb->data,
 780                                skb->len + skb_tailroom(skb),
 781                                DMA_FROM_DEVICE);
 782         if (unlikely(dma_mapping_error(ar->dev, paddr))) {
 783                 ath10k_warn(ar, "failed to dma map pci rx buf\n");
 784                 dev_kfree_skb_any(skb);
 785                 return -EIO;
 786         }
 787 
 788         ATH10K_SKB_RXCB(skb)->paddr = paddr;
 789 
 790         spin_lock_bh(&ce->ce_lock);
 791         ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
 792         spin_unlock_bh(&ce->ce_lock);
 793         if (ret) {
 794                 dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
 795                                  DMA_FROM_DEVICE);
 796                 dev_kfree_skb_any(skb);
 797                 return ret;
 798         }
 799 
 800         return 0;
 801 }
 802 
 803 static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
 804 {
 805         struct ath10k *ar = pipe->hif_ce_state;
 806         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 807         struct ath10k_ce *ce = ath10k_ce_priv(ar);
 808         struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
 809         int ret, num;
 810 
 811         if (pipe->buf_sz == 0)
 812                 return;
 813 
 814         if (!ce_pipe->dest_ring)
 815                 return;
 816 
 817         spin_lock_bh(&ce->ce_lock);
 818         num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
 819         spin_unlock_bh(&ce->ce_lock);
 820 
 821         while (num >= 0) {
 822                 ret = __ath10k_pci_rx_post_buf(pipe);
 823                 if (ret) {
 824                         if (ret == -ENOSPC)
 825                                 break;
 826                         ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
 827                         mod_timer(&ar_pci->rx_post_retry, jiffies +
 828                                   ATH10K_PCI_RX_POST_RETRY_MS);
 829                         break;
 830                 }
 831                 num--;
 832         }
 833 }
 834 
 835 void ath10k_pci_rx_post(struct ath10k *ar)
 836 {
 837         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 838         int i;
 839 
 840         for (i = 0; i < CE_COUNT; i++)
 841                 ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
 842 }
 843 
 844 void ath10k_pci_rx_replenish_retry(struct timer_list *t)
 845 {
 846         struct ath10k_pci *ar_pci = from_timer(ar_pci, t, rx_post_retry);
 847         struct ath10k *ar = ar_pci->ar;
 848 
 849         ath10k_pci_rx_post(ar);
 850 }
 851 
 852 static u32 ath10k_pci_qca988x_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
 853 {
 854         u32 val = 0, region = addr & 0xfffff;
 855 
 856         val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
 857                                  & 0x7ff) << 21;
 858         val |= 0x100000 | region;
 859         return val;
 860 }
 861 
 862 /* Refactor from ath10k_pci_qca988x_targ_cpu_to_ce_addr.
 863  * Support to access target space below 1M for qca6174 and qca9377.
 864  * If target space is below 1M, the bit[20] of converted CE addr is 0.
 865  * Otherwise bit[20] of converted CE addr is 1.
 866  */
 867 static u32 ath10k_pci_qca6174_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
 868 {
 869         u32 val = 0, region = addr & 0xfffff;
 870 
 871         val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
 872                                  & 0x7ff) << 21;
 873         val |= ((addr >= 0x100000) ? 0x100000 : 0) | region;
 874         return val;
 875 }
 876 
 877 static u32 ath10k_pci_qca99x0_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
 878 {
 879         u32 val = 0, region = addr & 0xfffff;
 880 
 881         val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
 882         val |= 0x100000 | region;
 883         return val;
 884 }
 885 
 886 static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
 887 {
 888         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 889 
 890         if (WARN_ON_ONCE(!ar_pci->targ_cpu_to_ce_addr))
 891                 return -ENOTSUPP;
 892 
 893         return ar_pci->targ_cpu_to_ce_addr(ar, addr);
 894 }
 895 
 896 /*
 897  * Diagnostic read/write access is provided for startup/config/debug usage.
 898  * Caller must guarantee proper alignment, when applicable, and single user
 899  * at any moment.
 900  */
 901 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
 902                                     int nbytes)
 903 {
 904         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 905         int ret = 0;
 906         u32 *buf;
 907         unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
 908         struct ath10k_ce_pipe *ce_diag;
 909         /* Host buffer address in CE space */
 910         u32 ce_data;
 911         dma_addr_t ce_data_base = 0;
 912         void *data_buf;
 913         int i;
 914 
 915         mutex_lock(&ar_pci->ce_diag_mutex);
 916         ce_diag = ar_pci->ce_diag;
 917 
 918         /*
 919          * Allocate a temporary bounce buffer to hold caller's data
 920          * to be DMA'ed from Target. This guarantees
 921          *   1) 4-byte alignment
 922          *   2) Buffer in DMA-able space
 923          */
 924         alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
 925 
 926         data_buf = dma_alloc_coherent(ar->dev, alloc_nbytes, &ce_data_base,
 927                                       GFP_ATOMIC);
 928         if (!data_buf) {
 929                 ret = -ENOMEM;
 930                 goto done;
 931         }
 932 
 933         /* The address supplied by the caller is in the
 934          * Target CPU virtual address space.
 935          *
 936          * In order to use this address with the diagnostic CE,
 937          * convert it from Target CPU virtual address space
 938          * to CE address space
 939          */
 940         address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
 941 
 942         remaining_bytes = nbytes;
 943         ce_data = ce_data_base;
 944         while (remaining_bytes) {
 945                 nbytes = min_t(unsigned int, remaining_bytes,
 946                                DIAG_TRANSFER_LIMIT);
 947 
 948                 ret = ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data);
 949                 if (ret != 0)
 950                         goto done;
 951 
 952                 /* Request CE to send from Target(!) address to Host buffer */
 953                 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0, 0);
 954                 if (ret)
 955                         goto done;
 956 
 957                 i = 0;
 958                 while (ath10k_ce_completed_send_next(ce_diag, NULL) != 0) {
 959                         udelay(DIAG_ACCESS_CE_WAIT_US);
 960                         i += DIAG_ACCESS_CE_WAIT_US;
 961 
 962                         if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
 963                                 ret = -EBUSY;
 964                                 goto done;
 965                         }
 966                 }
 967 
 968                 i = 0;
 969                 while (ath10k_ce_completed_recv_next(ce_diag, (void **)&buf,
 970                                                      &completed_nbytes) != 0) {
 971                         udelay(DIAG_ACCESS_CE_WAIT_US);
 972                         i += DIAG_ACCESS_CE_WAIT_US;
 973 
 974                         if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
 975                                 ret = -EBUSY;
 976                                 goto done;
 977                         }
 978                 }
 979 
 980                 if (nbytes != completed_nbytes) {
 981                         ret = -EIO;
 982                         goto done;
 983                 }
 984 
 985                 if (*buf != ce_data) {
 986                         ret = -EIO;
 987                         goto done;
 988                 }
 989 
 990                 remaining_bytes -= nbytes;
 991                 memcpy(data, data_buf, nbytes);
 992 
 993                 address += nbytes;
 994                 data += nbytes;
 995         }
 996 
 997 done:
 998 
 999         if (data_buf)
1000                 dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
1001                                   ce_data_base);
1002 
1003         mutex_unlock(&ar_pci->ce_diag_mutex);
1004 
1005         return ret;
1006 }
1007 
1008 static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
1009 {
1010         __le32 val = 0;
1011         int ret;
1012 
1013         ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val));
1014         *value = __le32_to_cpu(val);
1015 
1016         return ret;
1017 }
1018 
1019 static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
1020                                      u32 src, u32 len)
1021 {
1022         u32 host_addr, addr;
1023         int ret;
1024 
1025         host_addr = host_interest_item_address(src);
1026 
1027         ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
1028         if (ret != 0) {
1029                 ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
1030                             src, ret);
1031                 return ret;
1032         }
1033 
1034         ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
1035         if (ret != 0) {
1036                 ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
1037                             addr, len, ret);
1038                 return ret;
1039         }
1040 
1041         return 0;
1042 }
1043 
1044 #define ath10k_pci_diag_read_hi(ar, dest, src, len)             \
1045         __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)
1046 
1047 int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
1048                               const void *data, int nbytes)
1049 {
1050         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1051         int ret = 0;
1052         u32 *buf;
1053         unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
1054         struct ath10k_ce_pipe *ce_diag;
1055         void *data_buf;
1056         dma_addr_t ce_data_base = 0;
1057         int i;
1058 
1059         mutex_lock(&ar_pci->ce_diag_mutex);
1060         ce_diag = ar_pci->ce_diag;
1061 
1062         /*
1063          * Allocate a temporary bounce buffer to hold caller's data
1064          * to be DMA'ed to Target. This guarantees
1065          *   1) 4-byte alignment
1066          *   2) Buffer in DMA-able space
1067          */
1068         alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
1069 
1070         data_buf = dma_alloc_coherent(ar->dev, alloc_nbytes, &ce_data_base,
1071                                       GFP_ATOMIC);
1072         if (!data_buf) {
1073                 ret = -ENOMEM;
1074                 goto done;
1075         }
1076 
1077         /*
1078          * The address supplied by the caller is in the
1079          * Target CPU virtual address space.
1080          *
1081          * In order to use this address with the diagnostic CE,
1082          * convert it from
1083          *    Target CPU virtual address space
1084          * to
1085          *    CE address space
1086          */
1087         address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
1088 
1089         remaining_bytes = nbytes;
1090         while (remaining_bytes) {
1091                 /* FIXME: check cast */
1092                 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
1093 
1094                 /* Copy caller's data to allocated DMA buf */
1095                 memcpy(data_buf, data, nbytes);
1096 
1097                 /* Set up to receive directly into Target(!) address */
1098                 ret = ath10k_ce_rx_post_buf(ce_diag, &address, address);
1099                 if (ret != 0)
1100                         goto done;
1101 
1102                 /*
1103                  * Request CE to send caller-supplied data that
1104                  * was copied to bounce buffer to Target(!) address.
1105                  */
1106                 ret = ath10k_ce_send(ce_diag, NULL, ce_data_base, nbytes, 0, 0);
1107                 if (ret != 0)
1108                         goto done;
1109 
1110                 i = 0;
1111                 while (ath10k_ce_completed_send_next(ce_diag, NULL) != 0) {
1112                         udelay(DIAG_ACCESS_CE_WAIT_US);
1113                         i += DIAG_ACCESS_CE_WAIT_US;
1114 
1115                         if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
1116                                 ret = -EBUSY;
1117                                 goto done;
1118                         }
1119                 }
1120 
1121                 i = 0;
1122                 while (ath10k_ce_completed_recv_next(ce_diag, (void **)&buf,
1123                                                      &completed_nbytes) != 0) {
1124                         udelay(DIAG_ACCESS_CE_WAIT_US);
1125                         i += DIAG_ACCESS_CE_WAIT_US;
1126 
1127                         if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
1128                                 ret = -EBUSY;
1129                                 goto done;
1130                         }
1131                 }
1132 
1133                 if (nbytes != completed_nbytes) {
1134                         ret = -EIO;
1135                         goto done;
1136                 }
1137 
1138                 if (*buf != address) {
1139                         ret = -EIO;
1140                         goto done;
1141                 }
1142 
1143                 remaining_bytes -= nbytes;
1144                 address += nbytes;
1145                 data += nbytes;
1146         }
1147 
1148 done:
1149         if (data_buf) {
1150                 dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
1151                                   ce_data_base);
1152         }
1153 
1154         if (ret != 0)
1155                 ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
1156                             address, ret);
1157 
1158         mutex_unlock(&ar_pci->ce_diag_mutex);
1159 
1160         return ret;
1161 }
1162 
1163 static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
1164 {
1165         __le32 val = __cpu_to_le32(value);
1166 
1167         return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
1168 }
1169 
1170 /* Called by lower (CE) layer when a send to Target completes. */
1171 static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
1172 {
1173         struct ath10k *ar = ce_state->ar;
1174         struct sk_buff_head list;
1175         struct sk_buff *skb;
1176 
1177         __skb_queue_head_init(&list);
1178         while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
1179                 /* no need to call tx completion for NULL pointers */
1180                 if (skb == NULL)
1181                         continue;
1182 
1183                 __skb_queue_tail(&list, skb);
1184         }
1185 
1186         while ((skb = __skb_dequeue(&list)))
1187                 ath10k_htc_tx_completion_handler(ar, skb);
1188 }
1189 
1190 static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,
1191                                      void (*callback)(struct ath10k *ar,
1192                                                       struct sk_buff *skb))
1193 {
1194         struct ath10k *ar = ce_state->ar;
1195         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1196         struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
1197         struct sk_buff *skb;
1198         struct sk_buff_head list;
1199         void *transfer_context;
1200         unsigned int nbytes, max_nbytes;
1201 
1202         __skb_queue_head_init(&list);
1203         while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
1204                                              &nbytes) == 0) {
1205                 skb = transfer_context;
1206                 max_nbytes = skb->len + skb_tailroom(skb);
1207                 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1208                                  max_nbytes, DMA_FROM_DEVICE);
1209 
1210                 if (unlikely(max_nbytes < nbytes)) {
1211                         ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
1212                                     nbytes, max_nbytes);
1213                         dev_kfree_skb_any(skb);
1214                         continue;
1215                 }
1216 
1217                 skb_put(skb, nbytes);
1218                 __skb_queue_tail(&list, skb);
1219         }
1220 
1221         while ((skb = __skb_dequeue(&list))) {
1222                 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1223                            ce_state->id, skb->len);
1224                 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1225                                 skb->data, skb->len);
1226 
1227                 callback(ar, skb);
1228         }
1229 
1230         ath10k_pci_rx_post_pipe(pipe_info);
1231 }
1232 
1233 static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state,
1234                                          void (*callback)(struct ath10k *ar,
1235                                                           struct sk_buff *skb))
1236 {
1237         struct ath10k *ar = ce_state->ar;
1238         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1239         struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
1240         struct ath10k_ce_pipe *ce_pipe = pipe_info->ce_hdl;
1241         struct sk_buff *skb;
1242         struct sk_buff_head list;
1243         void *transfer_context;
1244         unsigned int nbytes, max_nbytes, nentries;
1245         int orig_len;
1246 
1247         /* No need to aquire ce_lock for CE5, since this is the only place CE5
1248          * is processed other than init and deinit. Before releasing CE5
1249          * buffers, interrupts are disabled. Thus CE5 access is serialized.
1250          */
1251         __skb_queue_head_init(&list);
1252         while (ath10k_ce_completed_recv_next_nolock(ce_state, &transfer_context,
1253                                                     &nbytes) == 0) {
1254                 skb = transfer_context;
1255                 max_nbytes = skb->len + skb_tailroom(skb);
1256 
1257                 if (unlikely(max_nbytes < nbytes)) {
1258                         ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
1259                                     nbytes, max_nbytes);
1260                         continue;
1261                 }
1262 
1263                 dma_sync_single_for_cpu(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1264                                         max_nbytes, DMA_FROM_DEVICE);
1265                 skb_put(skb, nbytes);
1266                 __skb_queue_tail(&list, skb);
1267         }
1268 
1269         nentries = skb_queue_len(&list);
1270         while ((skb = __skb_dequeue(&list))) {
1271                 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1272                            ce_state->id, skb->len);
1273                 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1274                                 skb->data, skb->len);
1275 
1276                 orig_len = skb->len;
1277                 callback(ar, skb);
1278                 skb_push(skb, orig_len - skb->len);
1279                 skb_reset_tail_pointer(skb);
1280                 skb_trim(skb, 0);
1281 
1282                 /*let device gain the buffer again*/
1283                 dma_sync_single_for_device(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1284                                            skb->len + skb_tailroom(skb),
1285                                            DMA_FROM_DEVICE);
1286         }
1287         ath10k_ce_rx_update_write_idx(ce_pipe, nentries);
1288 }
1289 
1290 /* Called by lower (CE) layer when data is received from the Target. */
1291 static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1292 {
1293         ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
1294 }
1295 
1296 static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1297 {
1298         /* CE4 polling needs to be done whenever CE pipe which transports
1299          * HTT Rx (target->host) is processed.
1300          */
1301         ath10k_ce_per_engine_service(ce_state->ar, 4);
1302 
1303         ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
1304 }
1305 
1306 /* Called by lower (CE) layer when data is received from the Target.
1307  * Only 10.4 firmware uses separate CE to transfer pktlog data.
1308  */
1309 static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)
1310 {
1311         ath10k_pci_process_rx_cb(ce_state,
1312                                  ath10k_htt_rx_pktlog_completion_handler);
1313 }
1314 
1315 /* Called by lower (CE) layer when a send to HTT Target completes. */
1316 static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
1317 {
1318         struct ath10k *ar = ce_state->ar;
1319         struct sk_buff *skb;
1320 
1321         while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
1322                 /* no need to call tx completion for NULL pointers */
1323                 if (!skb)
1324                         continue;
1325 
1326                 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
1327                                  skb->len, DMA_TO_DEVICE);
1328                 ath10k_htt_hif_tx_complete(ar, skb);
1329         }
1330 }
1331 
1332 static void ath10k_pci_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
1333 {
1334         skb_pull(skb, sizeof(struct ath10k_htc_hdr));
1335         ath10k_htt_t2h_msg_handler(ar, skb);
1336 }
1337 
1338 /* Called by lower (CE) layer when HTT data is received from the Target. */
1339 static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
1340 {
1341         /* CE4 polling needs to be done whenever CE pipe which transports
1342          * HTT Rx (target->host) is processed.
1343          */
1344         ath10k_ce_per_engine_service(ce_state->ar, 4);
1345 
1346         ath10k_pci_process_htt_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
1347 }
1348 
1349 int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
1350                          struct ath10k_hif_sg_item *items, int n_items)
1351 {
1352         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1353         struct ath10k_ce *ce = ath10k_ce_priv(ar);
1354         struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
1355         struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
1356         struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
1357         unsigned int nentries_mask;
1358         unsigned int sw_index;
1359         unsigned int write_index;
1360         int err, i = 0;
1361 
1362         spin_lock_bh(&ce->ce_lock);
1363 
1364         nentries_mask = src_ring->nentries_mask;
1365         sw_index = src_ring->sw_index;
1366         write_index = src_ring->write_index;
1367 
1368         if (unlikely(CE_RING_DELTA(nentries_mask,
1369                                    write_index, sw_index - 1) < n_items)) {
1370                 err = -ENOBUFS;
1371                 goto err;
1372         }
1373 
1374         for (i = 0; i < n_items - 1; i++) {
1375                 ath10k_dbg(ar, ATH10K_DBG_PCI,
1376                            "pci tx item %d paddr %pad len %d n_items %d\n",
1377                            i, &items[i].paddr, items[i].len, n_items);
1378                 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
1379                                 items[i].vaddr, items[i].len);
1380 
1381                 err = ath10k_ce_send_nolock(ce_pipe,
1382                                             items[i].transfer_context,
1383                                             items[i].paddr,
1384                                             items[i].len,
1385                                             items[i].transfer_id,
1386                                             CE_SEND_FLAG_GATHER);
1387                 if (err)
1388                         goto err;
1389         }
1390 
1391         /* `i` is equal to `n_items -1` after for() */
1392 
1393         ath10k_dbg(ar, ATH10K_DBG_PCI,
1394                    "pci tx item %d paddr %pad len %d n_items %d\n",
1395                    i, &items[i].paddr, items[i].len, n_items);
1396         ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
1397                         items[i].vaddr, items[i].len);
1398 
1399         err = ath10k_ce_send_nolock(ce_pipe,
1400                                     items[i].transfer_context,
1401                                     items[i].paddr,
1402                                     items[i].len,
1403                                     items[i].transfer_id,
1404                                     0);
1405         if (err)
1406                 goto err;
1407 
1408         spin_unlock_bh(&ce->ce_lock);
1409         return 0;
1410 
1411 err:
1412         for (; i > 0; i--)
1413                 __ath10k_ce_send_revert(ce_pipe);
1414 
1415         spin_unlock_bh(&ce->ce_lock);
1416         return err;
1417 }
1418 
1419 int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
1420                              size_t buf_len)
1421 {
1422         return ath10k_pci_diag_read_mem(ar, address, buf, buf_len);
1423 }
1424 
1425 u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
1426 {
1427         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1428 
1429         ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");
1430 
1431         return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
1432 }
1433 
1434 static void ath10k_pci_dump_registers(struct ath10k *ar,
1435                                       struct ath10k_fw_crash_data *crash_data)
1436 {
1437         __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
1438         int i, ret;
1439 
1440         lockdep_assert_held(&ar->dump_mutex);
1441 
1442         ret = ath10k_pci_diag_read_hi(ar, &reg_dump_values[0],
1443                                       hi_failure_state,
1444                                       REG_DUMP_COUNT_QCA988X * sizeof(__le32));
1445         if (ret) {
1446                 ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);
1447                 return;
1448         }
1449 
1450         BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
1451 
1452         ath10k_err(ar, "firmware register dump:\n");
1453         for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
1454                 ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
1455                            i,
1456                            __le32_to_cpu(reg_dump_values[i]),
1457                            __le32_to_cpu(reg_dump_values[i + 1]),
1458                            __le32_to_cpu(reg_dump_values[i + 2]),
1459                            __le32_to_cpu(reg_dump_values[i + 3]));
1460 
1461         if (!crash_data)
1462                 return;
1463 
1464         for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
1465                 crash_data->registers[i] = reg_dump_values[i];
1466 }
1467 
1468 static int ath10k_pci_dump_memory_section(struct ath10k *ar,
1469                                           const struct ath10k_mem_region *mem_region,
1470                                           u8 *buf, size_t buf_len)
1471 {
1472         const struct ath10k_mem_section *cur_section, *next_section;
1473         unsigned int count, section_size, skip_size;
1474         int ret, i, j;
1475 
1476         if (!mem_region || !buf)
1477                 return 0;
1478 
1479         cur_section = &mem_region->section_table.sections[0];
1480 
1481         if (mem_region->start > cur_section->start) {
1482                 ath10k_warn(ar, "incorrect memdump region 0x%x with section start address 0x%x.\n",
1483                             mem_region->start, cur_section->start);
1484                 return 0;
1485         }
1486 
1487         skip_size = cur_section->start - mem_region->start;
1488 
1489         /* fill the gap between the first register section and register
1490          * start address
1491          */
1492         for (i = 0; i < skip_size; i++) {
1493                 *buf = ATH10K_MAGIC_NOT_COPIED;
1494                 buf++;
1495         }
1496 
1497         count = 0;
1498 
1499         for (i = 0; cur_section != NULL; i++) {
1500                 section_size = cur_section->end - cur_section->start;
1501 
1502                 if (section_size <= 0) {
1503                         ath10k_warn(ar, "incorrect ramdump format with start address 0x%x and stop address 0x%x\n",
1504                                     cur_section->start,
1505                                     cur_section->end);
1506                         break;
1507                 }
1508 
1509                 if ((i + 1) == mem_region->section_table.size) {
1510                         /* last section */
1511                         next_section = NULL;
1512                         skip_size = 0;
1513                 } else {
1514                         next_section = cur_section + 1;
1515 
1516                         if (cur_section->end > next_section->start) {
1517                                 ath10k_warn(ar, "next ramdump section 0x%x is smaller than current end address 0x%x\n",
1518                                             next_section->start,
1519                                             cur_section->end);
1520                                 break;
1521                         }
1522 
1523                         skip_size = next_section->start - cur_section->end;
1524                 }
1525 
1526                 if (buf_len < (skip_size + section_size)) {
1527                         ath10k_warn(ar, "ramdump buffer is too small: %zu\n", buf_len);
1528                         break;
1529                 }
1530 
1531                 buf_len -= skip_size + section_size;
1532 
1533                 /* read section to dest memory */
1534                 ret = ath10k_pci_diag_read_mem(ar, cur_section->start,
1535                                                buf, section_size);
1536                 if (ret) {
1537                         ath10k_warn(ar, "failed to read ramdump from section 0x%x: %d\n",
1538                                     cur_section->start, ret);
1539                         break;
1540                 }
1541 
1542                 buf += section_size;
1543                 count += section_size;
1544 
1545                 /* fill in the gap between this section and the next */
1546                 for (j = 0; j < skip_size; j++) {
1547                         *buf = ATH10K_MAGIC_NOT_COPIED;
1548                         buf++;
1549                 }
1550 
1551                 count += skip_size;
1552 
1553                 if (!next_section)
1554                         /* this was the last section */
1555                         break;
1556 
1557                 cur_section = next_section;
1558         }
1559 
1560         return count;
1561 }
1562 
1563 static int ath10k_pci_set_ram_config(struct ath10k *ar, u32 config)
1564 {
1565         u32 val;
1566 
1567         ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1568                            FW_RAM_CONFIG_ADDRESS, config);
1569 
1570         val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1571                                 FW_RAM_CONFIG_ADDRESS);
1572         if (val != config) {
1573                 ath10k_warn(ar, "failed to set RAM config from 0x%x to 0x%x\n",
1574                             val, config);
1575                 return -EIO;
1576         }
1577 
1578         return 0;
1579 }
1580 
1581 /* if an error happened returns < 0, otherwise the length */
1582 static int ath10k_pci_dump_memory_sram(struct ath10k *ar,
1583                                        const struct ath10k_mem_region *region,
1584                                        u8 *buf)
1585 {
1586         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1587         u32 base_addr, i;
1588 
1589         base_addr = ioread32(ar_pci->mem + QCA99X0_PCIE_BAR0_START_REG);
1590         base_addr += region->start;
1591 
1592         for (i = 0; i < region->len; i += 4) {
1593                 iowrite32(base_addr + i, ar_pci->mem + QCA99X0_CPU_MEM_ADDR_REG);
1594                 *(u32 *)(buf + i) = ioread32(ar_pci->mem + QCA99X0_CPU_MEM_DATA_REG);
1595         }
1596 
1597         return region->len;
1598 }
1599 
1600 /* if an error happened returns < 0, otherwise the length */
1601 static int ath10k_pci_dump_memory_reg(struct ath10k *ar,
1602                                       const struct ath10k_mem_region *region,
1603                                       u8 *buf)
1604 {
1605         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1606         u32 i;
1607         int ret;
1608 
1609         mutex_lock(&ar->conf_mutex);
1610         if (ar->state != ATH10K_STATE_ON) {
1611                 ath10k_warn(ar, "Skipping pci_dump_memory_reg invalid state\n");
1612                 ret = -EIO;
1613                 goto done;
1614         }
1615 
1616         for (i = 0; i < region->len; i += 4)
1617                 *(u32 *)(buf + i) = ioread32(ar_pci->mem + region->start + i);
1618 
1619         ret = region->len;
1620 done:
1621         mutex_unlock(&ar->conf_mutex);
1622         return ret;
1623 }
1624 
1625 /* if an error happened returns < 0, otherwise the length */
1626 static int ath10k_pci_dump_memory_generic(struct ath10k *ar,
1627                                           const struct ath10k_mem_region *current_region,
1628                                           u8 *buf)
1629 {
1630         int ret;
1631 
1632         if (current_region->section_table.size > 0)
1633                 /* Copy each section individually. */
1634                 return ath10k_pci_dump_memory_section(ar,
1635                                                       current_region,
1636                                                       buf,
1637                                                       current_region->len);
1638 
1639         /* No individiual memory sections defined so we can
1640          * copy the entire memory region.
1641          */
1642         ret = ath10k_pci_diag_read_mem(ar,
1643                                        current_region->start,
1644                                        buf,
1645                                        current_region->len);
1646         if (ret) {
1647                 ath10k_warn(ar, "failed to copy ramdump region %s: %d\n",
1648                             current_region->name, ret);
1649                 return ret;
1650         }
1651 
1652         return current_region->len;
1653 }
1654 
1655 static void ath10k_pci_dump_memory(struct ath10k *ar,
1656                                    struct ath10k_fw_crash_data *crash_data)
1657 {
1658         const struct ath10k_hw_mem_layout *mem_layout;
1659         const struct ath10k_mem_region *current_region;
1660         struct ath10k_dump_ram_data_hdr *hdr;
1661         u32 count, shift;
1662         size_t buf_len;
1663         int ret, i;
1664         u8 *buf;
1665 
1666         lockdep_assert_held(&ar->dump_mutex);
1667 
1668         if (!crash_data)
1669                 return;
1670 
1671         mem_layout = ath10k_coredump_get_mem_layout(ar);
1672         if (!mem_layout)
1673                 return;
1674 
1675         current_region = &mem_layout->region_table.regions[0];
1676 
1677         buf = crash_data->ramdump_buf;
1678         buf_len = crash_data->ramdump_buf_len;
1679 
1680         memset(buf, 0, buf_len);
1681 
1682         for (i = 0; i < mem_layout->region_table.size; i++) {
1683                 count = 0;
1684 
1685                 if (current_region->len > buf_len) {
1686                         ath10k_warn(ar, "memory region %s size %d is larger that remaining ramdump buffer size %zu\n",
1687                                     current_region->name,
1688                                     current_region->len,
1689                                     buf_len);
1690                         break;
1691                 }
1692 
1693                 /* To get IRAM dump, the host driver needs to switch target
1694                  * ram config from DRAM to IRAM.
1695                  */
1696                 if (current_region->type == ATH10K_MEM_REGION_TYPE_IRAM1 ||
1697                     current_region->type == ATH10K_MEM_REGION_TYPE_IRAM2) {
1698                         shift = current_region->start >> 20;
1699 
1700                         ret = ath10k_pci_set_ram_config(ar, shift);
1701                         if (ret) {
1702                                 ath10k_warn(ar, "failed to switch ram config to IRAM for section %s: %d\n",
1703                                             current_region->name, ret);
1704                                 break;
1705                         }
1706                 }
1707 
1708                 /* Reserve space for the header. */
1709                 hdr = (void *)buf;
1710                 buf += sizeof(*hdr);
1711                 buf_len -= sizeof(*hdr);
1712 
1713                 switch (current_region->type) {
1714                 case ATH10K_MEM_REGION_TYPE_IOSRAM:
1715                         count = ath10k_pci_dump_memory_sram(ar, current_region, buf);
1716                         break;
1717                 case ATH10K_MEM_REGION_TYPE_IOREG:
1718                         ret = ath10k_pci_dump_memory_reg(ar, current_region, buf);
1719                         if (ret < 0)
1720                                 break;
1721 
1722                         count = ret;
1723                         break;
1724                 default:
1725                         ret = ath10k_pci_dump_memory_generic(ar, current_region, buf);
1726                         if (ret < 0)
1727                                 break;
1728 
1729                         count = ret;
1730                         break;
1731                 }
1732 
1733                 hdr->region_type = cpu_to_le32(current_region->type);
1734                 hdr->start = cpu_to_le32(current_region->start);
1735                 hdr->length = cpu_to_le32(count);
1736 
1737                 if (count == 0)
1738                         /* Note: the header remains, just with zero length. */
1739                         break;
1740 
1741                 buf += count;
1742                 buf_len -= count;
1743 
1744                 current_region++;
1745         }
1746 }
1747 
1748 static void ath10k_pci_fw_dump_work(struct work_struct *work)
1749 {
1750         struct ath10k_pci *ar_pci = container_of(work, struct ath10k_pci,
1751                                                  dump_work);
1752         struct ath10k_fw_crash_data *crash_data;
1753         struct ath10k *ar = ar_pci->ar;
1754         char guid[UUID_STRING_LEN + 1];
1755 
1756         mutex_lock(&ar->dump_mutex);
1757 
1758         spin_lock_bh(&ar->data_lock);
1759         ar->stats.fw_crash_counter++;
1760         spin_unlock_bh(&ar->data_lock);
1761 
1762         crash_data = ath10k_coredump_new(ar);
1763 
1764         if (crash_data)
1765                 scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);
1766         else
1767                 scnprintf(guid, sizeof(guid), "n/a");
1768 
1769         ath10k_err(ar, "firmware crashed! (guid %s)\n", guid);
1770         ath10k_print_driver_info(ar);
1771         ath10k_pci_dump_registers(ar, crash_data);
1772         ath10k_ce_dump_registers(ar, crash_data);
1773         ath10k_pci_dump_memory(ar, crash_data);
1774 
1775         mutex_unlock(&ar->dump_mutex);
1776 
1777         queue_work(ar->workqueue, &ar->restart_work);
1778 }
1779 
1780 static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
1781 {
1782         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1783 
1784         queue_work(ar->workqueue, &ar_pci->dump_work);
1785 }
1786 
1787 void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
1788                                         int force)
1789 {
1790         ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
1791 
1792         if (!force) {
1793                 int resources;
1794                 /*
1795                  * Decide whether to actually poll for completions, or just
1796                  * wait for a later chance.
1797                  * If there seem to be plenty of resources left, then just wait
1798                  * since checking involves reading a CE register, which is a
1799                  * relatively expensive operation.
1800                  */
1801                 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
1802 
1803                 /*
1804                  * If at least 50% of the total resources are still available,
1805                  * don't bother checking again yet.
1806                  */
1807                 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
1808                         return;
1809         }
1810         ath10k_ce_per_engine_service(ar, pipe);
1811 }
1812 
1813 static void ath10k_pci_rx_retry_sync(struct ath10k *ar)
1814 {
1815         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1816 
1817         del_timer_sync(&ar_pci->rx_post_retry);
1818 }
1819 
1820 int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
1821                                        u8 *ul_pipe, u8 *dl_pipe)
1822 {
1823         const struct service_to_pipe *entry;
1824         bool ul_set = false, dl_set = false;
1825         int i;
1826 
1827         ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
1828 
1829         for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
1830                 entry = &target_service_to_ce_map_wlan[i];
1831 
1832                 if (__le32_to_cpu(entry->service_id) != service_id)
1833                         continue;
1834 
1835                 switch (__le32_to_cpu(entry->pipedir)) {
1836                 case PIPEDIR_NONE:
1837                         break;
1838                 case PIPEDIR_IN:
1839                         WARN_ON(dl_set);
1840                         *dl_pipe = __le32_to_cpu(entry->pipenum);
1841                         dl_set = true;
1842                         break;
1843                 case PIPEDIR_OUT:
1844                         WARN_ON(ul_set);
1845                         *ul_pipe = __le32_to_cpu(entry->pipenum);
1846                         ul_set = true;
1847                         break;
1848                 case PIPEDIR_INOUT:
1849                         WARN_ON(dl_set);
1850                         WARN_ON(ul_set);
1851                         *dl_pipe = __le32_to_cpu(entry->pipenum);
1852                         *ul_pipe = __le32_to_cpu(entry->pipenum);
1853                         dl_set = true;
1854                         ul_set = true;
1855                         break;
1856                 }
1857         }
1858 
1859         if (!ul_set || !dl_set)
1860                 return -ENOENT;
1861 
1862         return 0;
1863 }
1864 
1865 void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1866                                      u8 *ul_pipe, u8 *dl_pipe)
1867 {
1868         ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
1869 
1870         (void)ath10k_pci_hif_map_service_to_pipe(ar,
1871                                                  ATH10K_HTC_SVC_ID_RSVD_CTRL,
1872                                                  ul_pipe, dl_pipe);
1873 }
1874 
1875 void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
1876 {
1877         u32 val;
1878 
1879         switch (ar->hw_rev) {
1880         case ATH10K_HW_QCA988X:
1881         case ATH10K_HW_QCA9887:
1882         case ATH10K_HW_QCA6174:
1883         case ATH10K_HW_QCA9377:
1884                 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1885                                         CORE_CTRL_ADDRESS);
1886                 val &= ~CORE_CTRL_PCIE_REG_31_MASK;
1887                 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1888                                    CORE_CTRL_ADDRESS, val);
1889                 break;
1890         case ATH10K_HW_QCA99X0:
1891         case ATH10K_HW_QCA9984:
1892         case ATH10K_HW_QCA9888:
1893         case ATH10K_HW_QCA4019:
1894                 /* TODO: Find appropriate register configuration for QCA99X0
1895                  *  to mask irq/MSI.
1896                  */
1897                 break;
1898         case ATH10K_HW_WCN3990:
1899                 break;
1900         }
1901 }
1902 
1903 static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
1904 {
1905         u32 val;
1906 
1907         switch (ar->hw_rev) {
1908         case ATH10K_HW_QCA988X:
1909         case ATH10K_HW_QCA9887:
1910         case ATH10K_HW_QCA6174:
1911         case ATH10K_HW_QCA9377:
1912                 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1913                                         CORE_CTRL_ADDRESS);
1914                 val |= CORE_CTRL_PCIE_REG_31_MASK;
1915                 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1916                                    CORE_CTRL_ADDRESS, val);
1917                 break;
1918         case ATH10K_HW_QCA99X0:
1919         case ATH10K_HW_QCA9984:
1920         case ATH10K_HW_QCA9888:
1921         case ATH10K_HW_QCA4019:
1922                 /* TODO: Find appropriate register configuration for QCA99X0
1923                  *  to unmask irq/MSI.
1924                  */
1925                 break;
1926         case ATH10K_HW_WCN3990:
1927                 break;
1928         }
1929 }
1930 
1931 static void ath10k_pci_irq_disable(struct ath10k *ar)
1932 {
1933         ath10k_ce_disable_interrupts(ar);
1934         ath10k_pci_disable_and_clear_legacy_irq(ar);
1935         ath10k_pci_irq_msi_fw_mask(ar);
1936 }
1937 
1938 static void ath10k_pci_irq_sync(struct ath10k *ar)
1939 {
1940         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1941 
1942         synchronize_irq(ar_pci->pdev->irq);
1943 }
1944 
1945 static void ath10k_pci_irq_enable(struct ath10k *ar)
1946 {
1947         ath10k_ce_enable_interrupts(ar);
1948         ath10k_pci_enable_legacy_irq(ar);
1949         ath10k_pci_irq_msi_fw_unmask(ar);
1950 }
1951 
1952 static int ath10k_pci_hif_start(struct ath10k *ar)
1953 {
1954         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1955 
1956         ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
1957 
1958         napi_enable(&ar->napi);
1959 
1960         ath10k_pci_irq_enable(ar);
1961         ath10k_pci_rx_post(ar);
1962 
1963         pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
1964                                    ar_pci->link_ctl);
1965 
1966         return 0;
1967 }
1968 
1969 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
1970 {
1971         struct ath10k *ar;
1972         struct ath10k_ce_pipe *ce_pipe;
1973         struct ath10k_ce_ring *ce_ring;
1974         struct sk_buff *skb;
1975         int i;
1976 
1977         ar = pci_pipe->hif_ce_state;
1978         ce_pipe = pci_pipe->ce_hdl;
1979         ce_ring = ce_pipe->dest_ring;
1980 
1981         if (!ce_ring)
1982                 return;
1983 
1984         if (!pci_pipe->buf_sz)
1985                 return;
1986 
1987         for (i = 0; i < ce_ring->nentries; i++) {
1988                 skb = ce_ring->per_transfer_context[i];
1989                 if (!skb)
1990                         continue;
1991 
1992                 ce_ring->per_transfer_context[i] = NULL;
1993 
1994                 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1995                                  skb->len + skb_tailroom(skb),
1996                                  DMA_FROM_DEVICE);
1997                 dev_kfree_skb_any(skb);
1998         }
1999 }
2000 
2001 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
2002 {
2003         struct ath10k *ar;
2004         struct ath10k_ce_pipe *ce_pipe;
2005         struct ath10k_ce_ring *ce_ring;
2006         struct sk_buff *skb;
2007         int i;
2008 
2009         ar = pci_pipe->hif_ce_state;
2010         ce_pipe = pci_pipe->ce_hdl;
2011         ce_ring = ce_pipe->src_ring;
2012 
2013         if (!ce_ring)
2014                 return;
2015 
2016         if (!pci_pipe->buf_sz)
2017                 return;
2018 
2019         for (i = 0; i < ce_ring->nentries; i++) {
2020                 skb = ce_ring->per_transfer_context[i];
2021                 if (!skb)
2022                         continue;
2023 
2024                 ce_ring->per_transfer_context[i] = NULL;
2025 
2026                 ath10k_htc_tx_completion_handler(ar, skb);
2027         }
2028 }
2029 
2030 /*
2031  * Cleanup residual buffers for device shutdown:
2032  *    buffers that were enqueued for receive
2033  *    buffers that were to be sent
2034  * Note: Buffers that had completed but which were
2035  * not yet processed are on a completion queue. They
2036  * are handled when the completion thread shuts down.
2037  */
2038 static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
2039 {
2040         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2041         int pipe_num;
2042 
2043         for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
2044                 struct ath10k_pci_pipe *pipe_info;
2045 
2046                 pipe_info = &ar_pci->pipe_info[pipe_num];
2047                 ath10k_pci_rx_pipe_cleanup(pipe_info);
2048                 ath10k_pci_tx_pipe_cleanup(pipe_info);
2049         }
2050 }
2051 
2052 void ath10k_pci_ce_deinit(struct ath10k *ar)
2053 {
2054         int i;
2055 
2056         for (i = 0; i < CE_COUNT; i++)
2057                 ath10k_ce_deinit_pipe(ar, i);
2058 }
2059 
2060 void ath10k_pci_flush(struct ath10k *ar)
2061 {
2062         ath10k_pci_rx_retry_sync(ar);
2063         ath10k_pci_buffer_cleanup(ar);
2064 }
2065 
2066 static void ath10k_pci_hif_stop(struct ath10k *ar)
2067 {
2068         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2069         unsigned long flags;
2070 
2071         ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
2072 
2073         ath10k_pci_irq_disable(ar);
2074         ath10k_pci_irq_sync(ar);
2075         napi_synchronize(&ar->napi);
2076         napi_disable(&ar->napi);
2077 
2078         /* Most likely the device has HTT Rx ring configured. The only way to
2079          * prevent the device from accessing (and possible corrupting) host
2080          * memory is to reset the chip now.
2081          *
2082          * There's also no known way of masking MSI interrupts on the device.
2083          * For ranged MSI the CE-related interrupts can be masked. However
2084          * regardless how many MSI interrupts are assigned the first one
2085          * is always used for firmware indications (crashes) and cannot be
2086          * masked. To prevent the device from asserting the interrupt reset it
2087          * before proceeding with cleanup.
2088          */
2089         ath10k_pci_safe_chip_reset(ar);
2090 
2091         ath10k_pci_flush(ar);
2092 
2093         spin_lock_irqsave(&ar_pci->ps_lock, flags);
2094         WARN_ON(ar_pci->ps_wake_refcount > 0);
2095         spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
2096 }
2097 
2098 int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
2099                                     void *req, u32 req_len,
2100                                     void *resp, u32 *resp_len)
2101 {
2102         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2103         struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
2104         struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
2105         struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
2106         struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
2107         dma_addr_t req_paddr = 0;
2108         dma_addr_t resp_paddr = 0;
2109         struct bmi_xfer xfer = {};
2110         void *treq, *tresp = NULL;
2111         int ret = 0;
2112 
2113         might_sleep();
2114 
2115         if (resp && !resp_len)
2116                 return -EINVAL;
2117 
2118         if (resp && resp_len && *resp_len == 0)
2119                 return -EINVAL;
2120 
2121         treq = kmemdup(req, req_len, GFP_KERNEL);
2122         if (!treq)
2123                 return -ENOMEM;
2124 
2125         req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
2126         ret = dma_mapping_error(ar->dev, req_paddr);
2127         if (ret) {
2128                 ret = -EIO;
2129                 goto err_dma;
2130         }
2131 
2132         if (resp && resp_len) {
2133                 tresp = kzalloc(*resp_len, GFP_KERNEL);
2134                 if (!tresp) {
2135                         ret = -ENOMEM;
2136                         goto err_req;
2137                 }
2138 
2139                 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
2140                                             DMA_FROM_DEVICE);
2141                 ret = dma_mapping_error(ar->dev, resp_paddr);
2142                 if (ret) {
2143                         ret = -EIO;
2144                         goto err_req;
2145                 }
2146 
2147                 xfer.wait_for_resp = true;
2148                 xfer.resp_len = 0;
2149 
2150                 ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);
2151         }
2152 
2153         ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
2154         if (ret)
2155                 goto err_resp;
2156 
2157         ret = ath10k_pci_bmi_wait(ar, ce_tx, ce_rx, &xfer);
2158         if (ret) {
2159                 dma_addr_t unused_buffer;
2160                 unsigned int unused_nbytes;
2161                 unsigned int unused_id;
2162 
2163                 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
2164                                            &unused_nbytes, &unused_id);
2165         } else {
2166                 /* non-zero means we did not time out */
2167                 ret = 0;
2168         }
2169 
2170 err_resp:
2171         if (resp) {
2172                 dma_addr_t unused_buffer;
2173 
2174                 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
2175                 dma_unmap_single(ar->dev, resp_paddr,
2176                                  *resp_len, DMA_FROM_DEVICE);
2177         }
2178 err_req:
2179         dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
2180 
2181         if (ret == 0 && resp_len) {
2182                 *resp_len = min(*resp_len, xfer.resp_len);
2183                 memcpy(resp, tresp, xfer.resp_len);
2184         }
2185 err_dma:
2186         kfree(treq);
2187         kfree(tresp);
2188 
2189         return ret;
2190 }
2191 
2192 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
2193 {
2194         struct bmi_xfer *xfer;
2195 
2196         if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer))
2197                 return;
2198 
2199         xfer->tx_done = true;
2200 }
2201 
2202 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
2203 {
2204         struct ath10k *ar = ce_state->ar;
2205         struct bmi_xfer *xfer;
2206         unsigned int nbytes;
2207 
2208         if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer,
2209                                           &nbytes))
2210                 return;
2211 
2212         if (WARN_ON_ONCE(!xfer))
2213                 return;
2214 
2215         if (!xfer->wait_for_resp) {
2216                 ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
2217                 return;
2218         }
2219 
2220         xfer->resp_len = nbytes;
2221         xfer->rx_done = true;
2222 }
2223 
2224 static int ath10k_pci_bmi_wait(struct ath10k *ar,
2225                                struct ath10k_ce_pipe *tx_pipe,
2226                                struct ath10k_ce_pipe *rx_pipe,
2227                                struct bmi_xfer *xfer)
2228 {
2229         unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
2230         unsigned long started = jiffies;
2231         unsigned long dur;
2232         int ret;
2233 
2234         while (time_before_eq(jiffies, timeout)) {
2235                 ath10k_pci_bmi_send_done(tx_pipe);
2236                 ath10k_pci_bmi_recv_data(rx_pipe);
2237 
2238                 if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp)) {
2239                         ret = 0;
2240                         goto out;
2241                 }
2242 
2243                 schedule();
2244         }
2245 
2246         ret = -ETIMEDOUT;
2247 
2248 out:
2249         dur = jiffies - started;
2250         if (dur > HZ)
2251                 ath10k_dbg(ar, ATH10K_DBG_BMI,
2252                            "bmi cmd took %lu jiffies hz %d ret %d\n",
2253                            dur, HZ, ret);
2254         return ret;
2255 }
2256 
2257 /*
2258  * Send an interrupt to the device to wake up the Target CPU
2259  * so it has an opportunity to notice any changed state.
2260  */
2261 static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
2262 {
2263         u32 addr, val;
2264 
2265         addr = SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS;
2266         val = ath10k_pci_read32(ar, addr);
2267         val |= CORE_CTRL_CPU_INTR_MASK;
2268         ath10k_pci_write32(ar, addr, val);
2269 
2270         return 0;
2271 }
2272 
2273 static int ath10k_pci_get_num_banks(struct ath10k *ar)
2274 {
2275         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2276 
2277         switch (ar_pci->pdev->device) {
2278         case QCA988X_2_0_DEVICE_ID_UBNT:
2279         case QCA988X_2_0_DEVICE_ID:
2280         case QCA99X0_2_0_DEVICE_ID:
2281         case QCA9888_2_0_DEVICE_ID:
2282         case QCA9984_1_0_DEVICE_ID:
2283         case QCA9887_1_0_DEVICE_ID:
2284                 return 1;
2285         case QCA6164_2_1_DEVICE_ID:
2286         case QCA6174_2_1_DEVICE_ID:
2287                 switch (MS(ar->bus_param.chip_id, SOC_CHIP_ID_REV)) {
2288                 case QCA6174_HW_1_0_CHIP_ID_REV:
2289                 case QCA6174_HW_1_1_CHIP_ID_REV:
2290                 case QCA6174_HW_2_1_CHIP_ID_REV:
2291                 case QCA6174_HW_2_2_CHIP_ID_REV:
2292                         return 3;
2293                 case QCA6174_HW_1_3_CHIP_ID_REV:
2294                         return 2;
2295                 case QCA6174_HW_3_0_CHIP_ID_REV:
2296                 case QCA6174_HW_3_1_CHIP_ID_REV:
2297                 case QCA6174_HW_3_2_CHIP_ID_REV:
2298                         return 9;
2299                 }
2300                 break;
2301         case QCA9377_1_0_DEVICE_ID:
2302                 return 9;
2303         }
2304 
2305         ath10k_warn(ar, "unknown number of banks, assuming 1\n");
2306         return 1;
2307 }
2308 
2309 static int ath10k_bus_get_num_banks(struct ath10k *ar)
2310 {
2311         struct ath10k_ce *ce = ath10k_ce_priv(ar);
2312 
2313         return ce->bus_ops->get_num_banks(ar);
2314 }
2315 
2316 int ath10k_pci_init_config(struct ath10k *ar)
2317 {
2318         u32 interconnect_targ_addr;
2319         u32 pcie_state_targ_addr = 0;
2320         u32 pipe_cfg_targ_addr = 0;
2321         u32 svc_to_pipe_map = 0;
2322         u32 pcie_config_flags = 0;
2323         u32 ealloc_value;
2324         u32 ealloc_targ_addr;
2325         u32 flag2_value;
2326         u32 flag2_targ_addr;
2327         int ret = 0;
2328 
2329         /* Download to Target the CE Config and the service-to-CE map */
2330         interconnect_targ_addr =
2331                 host_interest_item_address(HI_ITEM(hi_interconnect_state));
2332 
2333         /* Supply Target-side CE configuration */
2334         ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr,
2335                                      &pcie_state_targ_addr);
2336         if (ret != 0) {
2337                 ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);
2338                 return ret;
2339         }
2340 
2341         if (pcie_state_targ_addr == 0) {
2342                 ret = -EIO;
2343                 ath10k_err(ar, "Invalid pcie state addr\n");
2344                 return ret;
2345         }
2346 
2347         ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2348                                           offsetof(struct pcie_state,
2349                                                    pipe_cfg_addr)),
2350                                      &pipe_cfg_targ_addr);
2351         if (ret != 0) {
2352                 ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);
2353                 return ret;
2354         }
2355 
2356         if (pipe_cfg_targ_addr == 0) {
2357                 ret = -EIO;
2358                 ath10k_err(ar, "Invalid pipe cfg addr\n");
2359                 return ret;
2360         }
2361 
2362         ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
2363                                         target_ce_config_wlan,
2364                                         sizeof(struct ce_pipe_config) *
2365                                         NUM_TARGET_CE_CONFIG_WLAN);
2366 
2367         if (ret != 0) {
2368                 ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
2369                 return ret;
2370         }
2371 
2372         ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2373                                           offsetof(struct pcie_state,
2374                                                    svc_to_pipe_map)),
2375                                      &svc_to_pipe_map);
2376         if (ret != 0) {
2377                 ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);
2378                 return ret;
2379         }
2380 
2381         if (svc_to_pipe_map == 0) {
2382                 ret = -EIO;
2383                 ath10k_err(ar, "Invalid svc_to_pipe map\n");
2384                 return ret;
2385         }
2386 
2387         ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
2388                                         target_service_to_ce_map_wlan,
2389                                         sizeof(target_service_to_ce_map_wlan));
2390         if (ret != 0) {
2391                 ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
2392                 return ret;
2393         }
2394 
2395         ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2396                                           offsetof(struct pcie_state,
2397                                                    config_flags)),
2398                                      &pcie_config_flags);
2399         if (ret != 0) {
2400                 ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
2401                 return ret;
2402         }
2403 
2404         pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
2405 
2406         ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr +
2407                                            offsetof(struct pcie_state,
2408                                                     config_flags)),
2409                                       pcie_config_flags);
2410         if (ret != 0) {
2411                 ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
2412                 return ret;
2413         }
2414 
2415         /* configure early allocation */
2416         ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
2417 
2418         ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
2419         if (ret != 0) {
2420                 ath10k_err(ar, "Failed to get early alloc val: %d\n", ret);
2421                 return ret;
2422         }
2423 
2424         /* first bank is switched to IRAM */
2425         ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
2426                          HI_EARLY_ALLOC_MAGIC_MASK);
2427         ealloc_value |= ((ath10k_bus_get_num_banks(ar) <<
2428                           HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
2429                          HI_EARLY_ALLOC_IRAM_BANKS_MASK);
2430 
2431         ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
2432         if (ret != 0) {
2433                 ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);
2434                 return ret;
2435         }
2436 
2437         /* Tell Target to proceed with initialization */
2438         flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
2439 
2440         ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value);
2441         if (ret != 0) {
2442                 ath10k_err(ar, "Failed to get option val: %d\n", ret);
2443                 return ret;
2444         }
2445 
2446         flag2_value |= HI_OPTION_EARLY_CFG_DONE;
2447 
2448         ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value);
2449         if (ret != 0) {
2450                 ath10k_err(ar, "Failed to set option val: %d\n", ret);
2451                 return ret;
2452         }
2453 
2454         return 0;
2455 }
2456 
2457 static void ath10k_pci_override_ce_config(struct ath10k *ar)
2458 {
2459         struct ce_attr *attr;
2460         struct ce_pipe_config *config;
2461 
2462         /* For QCA6174 we're overriding the Copy Engine 5 configuration,
2463          * since it is currently used for other feature.
2464          */
2465 
2466         /* Override Host's Copy Engine 5 configuration */
2467         attr = &host_ce_config_wlan[5];
2468         attr->src_sz_max = 0;
2469         attr->dest_nentries = 0;
2470 
2471         /* Override Target firmware's Copy Engine configuration */
2472         config = &target_ce_config_wlan[5];
2473         config->pipedir = __cpu_to_le32(PIPEDIR_OUT);
2474         config->nbytes_max = __cpu_to_le32(2048);
2475 
2476         /* Map from service/endpoint to Copy Engine */
2477         target_service_to_ce_map_wlan[15].pipenum = __cpu_to_le32(1);
2478 }
2479 
2480 int ath10k_pci_alloc_pipes(struct ath10k *ar)
2481 {
2482         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2483         struct ath10k_pci_pipe *pipe;
2484         struct ath10k_ce *ce = ath10k_ce_priv(ar);
2485         int i, ret;
2486 
2487         for (i = 0; i < CE_COUNT; i++) {
2488                 pipe = &ar_pci->pipe_info[i];
2489                 pipe->ce_hdl = &ce->ce_states[i];
2490                 pipe->pipe_num = i;
2491                 pipe->hif_ce_state = ar;
2492 
2493                 ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
2494                 if (ret) {
2495                         ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
2496                                    i, ret);
2497                         return ret;
2498                 }
2499 
2500                 /* Last CE is Diagnostic Window */
2501                 if (i == CE_DIAG_PIPE) {
2502                         ar_pci->ce_diag = pipe->ce_hdl;
2503                         continue;
2504                 }
2505 
2506                 pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max);
2507         }
2508 
2509         return 0;
2510 }
2511 
2512 void ath10k_pci_free_pipes(struct ath10k *ar)
2513 {
2514         int i;
2515 
2516         for (i = 0; i < CE_COUNT; i++)
2517                 ath10k_ce_free_pipe(ar, i);
2518 }
2519 
2520 int ath10k_pci_init_pipes(struct ath10k *ar)
2521 {
2522         int i, ret;
2523 
2524         for (i = 0; i < CE_COUNT; i++) {
2525                 ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
2526                 if (ret) {
2527                         ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
2528                                    i, ret);
2529                         return ret;
2530                 }
2531         }
2532 
2533         return 0;
2534 }
2535 
2536 static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)
2537 {
2538         return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &
2539                FW_IND_EVENT_PENDING;
2540 }
2541 
2542 static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
2543 {
2544         u32 val;
2545 
2546         val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2547         val &= ~FW_IND_EVENT_PENDING;
2548         ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
2549 }
2550 
2551 static bool ath10k_pci_has_device_gone(struct ath10k *ar)
2552 {
2553         u32 val;
2554 
2555         val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2556         return (val == 0xffffffff);
2557 }
2558 
2559 /* this function effectively clears target memory controller assert line */
2560 static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
2561 {
2562         u32 val;
2563 
2564         val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2565         ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2566                                val | SOC_RESET_CONTROL_SI0_RST_MASK);
2567         val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2568 
2569         msleep(10);
2570 
2571         val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2572         ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2573                                val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
2574         val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2575 
2576         msleep(10);
2577 }
2578 
2579 static void ath10k_pci_warm_reset_cpu(struct ath10k *ar)
2580 {
2581         u32 val;
2582 
2583         ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
2584 
2585         val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2586                                 SOC_RESET_CONTROL_ADDRESS);
2587         ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2588                            val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
2589 }
2590 
2591 static void ath10k_pci_warm_reset_ce(struct ath10k *ar)
2592 {
2593         u32 val;
2594 
2595         val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2596                                 SOC_RESET_CONTROL_ADDRESS);
2597 
2598         ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2599                            val | SOC_RESET_CONTROL_CE_RST_MASK);
2600         msleep(10);
2601         ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2602                            val & ~SOC_RESET_CONTROL_CE_RST_MASK);
2603 }
2604 
2605 static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar)
2606 {
2607         u32 val;
2608 
2609         val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2610                                 SOC_LF_TIMER_CONTROL0_ADDRESS);
2611         ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
2612                            SOC_LF_TIMER_CONTROL0_ADDRESS,
2613                            val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
2614 }
2615 
2616 static int ath10k_pci_warm_reset(struct ath10k *ar)
2617 {
2618         int ret;
2619 
2620         ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
2621 
2622         spin_lock_bh(&ar->data_lock);
2623         ar->stats.fw_warm_reset_counter++;
2624         spin_unlock_bh(&ar->data_lock);
2625 
2626         ath10k_pci_irq_disable(ar);
2627 
2628         /* Make sure the target CPU is not doing anything dangerous, e.g. if it
2629          * were to access copy engine while host performs copy engine reset
2630          * then it is possible for the device to confuse pci-e controller to
2631          * the point of bringing host system to a complete stop (i.e. hang).
2632          */
2633         ath10k_pci_warm_reset_si0(ar);
2634         ath10k_pci_warm_reset_cpu(ar);
2635         ath10k_pci_init_pipes(ar);
2636         ath10k_pci_wait_for_target_init(ar);
2637 
2638         ath10k_pci_warm_reset_clear_lf(ar);
2639         ath10k_pci_warm_reset_ce(ar);
2640         ath10k_pci_warm_reset_cpu(ar);
2641         ath10k_pci_init_pipes(ar);
2642 
2643         ret = ath10k_pci_wait_for_target_init(ar);
2644         if (ret) {
2645                 ath10k_warn(ar, "failed to wait for target init: %d\n", ret);
2646                 return ret;
2647         }
2648 
2649         ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
2650 
2651         return 0;
2652 }
2653 
2654 static int ath10k_pci_qca99x0_soft_chip_reset(struct ath10k *ar)
2655 {
2656         ath10k_pci_irq_disable(ar);
2657         return ath10k_pci_qca99x0_chip_reset(ar);
2658 }
2659 
2660 static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
2661 {
2662         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2663 
2664         if (!ar_pci->pci_soft_reset)
2665                 return -ENOTSUPP;
2666 
2667         return ar_pci->pci_soft_reset(ar);
2668 }
2669 
2670 static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
2671 {
2672         int i, ret;
2673         u32 val;
2674 
2675         ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n");
2676 
2677         /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset.
2678          * It is thus preferred to use warm reset which is safer but may not be
2679          * able to recover the device from all possible fail scenarios.
2680          *
2681          * Warm reset doesn't always work on first try so attempt it a few
2682          * times before giving up.
2683          */
2684         for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
2685                 ret = ath10k_pci_warm_reset(ar);
2686                 if (ret) {
2687                         ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n",
2688                                     i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS,
2689                                     ret);
2690                         continue;
2691                 }
2692 
2693                 /* FIXME: Sometimes copy engine doesn't recover after warm
2694                  * reset. In most cases this needs cold reset. In some of these
2695                  * cases the device is in such a state that a cold reset may
2696                  * lock up the host.
2697                  *
2698                  * Reading any host interest register via copy engine is
2699                  * sufficient to verify if device is capable of booting
2700                  * firmware blob.
2701                  */
2702                 ret = ath10k_pci_init_pipes(ar);
2703                 if (ret) {
2704                         ath10k_warn(ar, "failed to init copy engine: %d\n",
2705                                     ret);
2706                         continue;
2707                 }
2708 
2709                 ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS,
2710                                              &val);
2711                 if (ret) {
2712                         ath10k_warn(ar, "failed to poke copy engine: %d\n",
2713                                     ret);
2714                         continue;
2715                 }
2716 
2717                 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n");
2718                 return 0;
2719         }
2720 
2721         if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) {
2722                 ath10k_warn(ar, "refusing cold reset as requested\n");
2723                 return -EPERM;
2724         }
2725 
2726         ret = ath10k_pci_cold_reset(ar);
2727         if (ret) {
2728                 ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2729                 return ret;
2730         }
2731 
2732         ret = ath10k_pci_wait_for_target_init(ar);
2733         if (ret) {
2734                 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2735                             ret);
2736                 return ret;
2737         }
2738 
2739         ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n");
2740 
2741         return 0;
2742 }
2743 
2744 static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
2745 {
2746         int ret;
2747 
2748         ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n");
2749 
2750         /* FIXME: QCA6174 requires cold + warm reset to work. */
2751 
2752         ret = ath10k_pci_cold_reset(ar);
2753         if (ret) {
2754                 ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2755                 return ret;
2756         }
2757 
2758         ret = ath10k_pci_wait_for_target_init(ar);
2759         if (ret) {
2760                 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2761                             ret);
2762                 return ret;
2763         }
2764 
2765         ret = ath10k_pci_warm_reset(ar);
2766         if (ret) {
2767                 ath10k_warn(ar, "failed to warm reset: %d\n", ret);
2768                 return ret;
2769         }
2770 
2771         ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n");
2772 
2773         return 0;
2774 }
2775 
2776 static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar)
2777 {
2778         int ret;
2779 
2780         ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n");
2781 
2782         ret = ath10k_pci_cold_reset(ar);
2783         if (ret) {
2784                 ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2785                 return ret;
2786         }
2787 
2788         ret = ath10k_pci_wait_for_target_init(ar);
2789         if (ret) {
2790                 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2791                             ret);
2792                 return ret;
2793         }
2794 
2795         ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n");
2796 
2797         return 0;
2798 }
2799 
2800 static int ath10k_pci_chip_reset(struct ath10k *ar)
2801 {
2802         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2803 
2804         if (WARN_ON(!ar_pci->pci_hard_reset))
2805                 return -ENOTSUPP;
2806 
2807         return ar_pci->pci_hard_reset(ar);
2808 }
2809 
2810 static int ath10k_pci_hif_power_up(struct ath10k *ar,
2811                                    enum ath10k_firmware_mode fw_mode)
2812 {
2813         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2814         int ret;
2815 
2816         ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
2817 
2818         pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2819                                   &ar_pci->link_ctl);
2820         pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2821                                    ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
2822 
2823         /*
2824          * Bring the target up cleanly.
2825          *
2826          * The target may be in an undefined state with an AUX-powered Target
2827          * and a Host in WoW mode. If the Host crashes, loses power, or is
2828          * restarted (without unloading the driver) then the Target is left
2829          * (aux) powered and running. On a subsequent driver load, the Target
2830          * is in an unexpected state. We try to catch that here in order to
2831          * reset the Target and retry the probe.
2832          */
2833         ret = ath10k_pci_chip_reset(ar);
2834         if (ret) {
2835                 if (ath10k_pci_has_fw_crashed(ar)) {
2836                         ath10k_warn(ar, "firmware crashed during chip reset\n");
2837                         ath10k_pci_fw_crashed_clear(ar);
2838                         ath10k_pci_fw_crashed_dump(ar);
2839                 }
2840 
2841                 ath10k_err(ar, "failed to reset chip: %d\n", ret);
2842                 goto err_sleep;
2843         }
2844 
2845         ret = ath10k_pci_init_pipes(ar);
2846         if (ret) {
2847                 ath10k_err(ar, "failed to initialize CE: %d\n", ret);
2848                 goto err_sleep;
2849         }
2850 
2851         ret = ath10k_pci_init_config(ar);
2852         if (ret) {
2853                 ath10k_err(ar, "failed to setup init config: %d\n", ret);
2854                 goto err_ce;
2855         }
2856 
2857         ret = ath10k_pci_wake_target_cpu(ar);
2858         if (ret) {
2859                 ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
2860                 goto err_ce;
2861         }
2862 
2863         return 0;
2864 
2865 err_ce:
2866         ath10k_pci_ce_deinit(ar);
2867 
2868 err_sleep:
2869         return ret;
2870 }
2871 
2872 void ath10k_pci_hif_power_down(struct ath10k *ar)
2873 {
2874         ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
2875 
2876         /* Currently hif_power_up performs effectively a reset and hif_stop
2877          * resets the chip as well so there's no point in resetting here.
2878          */
2879 }
2880 
2881 static int ath10k_pci_hif_suspend(struct ath10k *ar)
2882 {
2883         /* Nothing to do; the important stuff is in the driver suspend. */
2884         return 0;
2885 }
2886 
2887 static int ath10k_pci_suspend(struct ath10k *ar)
2888 {
2889         /* The grace timer can still be counting down and ar->ps_awake be true.
2890          * It is known that the device may be asleep after resuming regardless
2891          * of the SoC powersave state before suspending. Hence make sure the
2892          * device is asleep before proceeding.
2893          */
2894         ath10k_pci_sleep_sync(ar);
2895 
2896         return 0;
2897 }
2898 
2899 static int ath10k_pci_hif_resume(struct ath10k *ar)
2900 {
2901         /* Nothing to do; the important stuff is in the driver resume. */
2902         return 0;
2903 }
2904 
2905 static int ath10k_pci_resume(struct ath10k *ar)
2906 {
2907         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2908         struct pci_dev *pdev = ar_pci->pdev;
2909         u32 val;
2910         int ret = 0;
2911 
2912         ret = ath10k_pci_force_wake(ar);
2913         if (ret) {
2914                 ath10k_err(ar, "failed to wake up target: %d\n", ret);
2915                 return ret;
2916         }
2917 
2918         /* Suspend/Resume resets the PCI configuration space, so we have to
2919          * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
2920          * from interfering with C3 CPU state. pci_restore_state won't help
2921          * here since it only restores the first 64 bytes pci config header.
2922          */
2923         pci_read_config_dword(pdev, 0x40, &val);
2924         if ((val & 0x0000ff00) != 0)
2925                 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2926 
2927         return ret;
2928 }
2929 
2930 static bool ath10k_pci_validate_cal(void *data, size_t size)
2931 {
2932         __le16 *cal_words = data;
2933         u16 checksum = 0;
2934         size_t i;
2935 
2936         if (size % 2 != 0)
2937                 return false;
2938 
2939         for (i = 0; i < size / 2; i++)
2940                 checksum ^= le16_to_cpu(cal_words[i]);
2941 
2942         return checksum == 0xffff;
2943 }
2944 
2945 static void ath10k_pci_enable_eeprom(struct ath10k *ar)
2946 {
2947         /* Enable SI clock */
2948         ath10k_pci_soc_write32(ar, CLOCK_CONTROL_OFFSET, 0x0);
2949 
2950         /* Configure GPIOs for I2C operation */
2951         ath10k_pci_write32(ar,
2952                            GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
2953                            4 * QCA9887_1_0_I2C_SDA_GPIO_PIN,
2954                            SM(QCA9887_1_0_I2C_SDA_PIN_CONFIG,
2955                               GPIO_PIN0_CONFIG) |
2956                            SM(1, GPIO_PIN0_PAD_PULL));
2957 
2958         ath10k_pci_write32(ar,
2959                            GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
2960                            4 * QCA9887_1_0_SI_CLK_GPIO_PIN,
2961                            SM(QCA9887_1_0_SI_CLK_PIN_CONFIG, GPIO_PIN0_CONFIG) |
2962                            SM(1, GPIO_PIN0_PAD_PULL));
2963 
2964         ath10k_pci_write32(ar,
2965                            GPIO_BASE_ADDRESS +
2966                            QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS,
2967                            1u << QCA9887_1_0_SI_CLK_GPIO_PIN);
2968 
2969         /* In Swift ASIC - EEPROM clock will be (110MHz/512) = 214KHz */
2970         ath10k_pci_write32(ar,
2971                            SI_BASE_ADDRESS + SI_CONFIG_OFFSET,
2972                            SM(1, SI_CONFIG_ERR_INT) |
2973                            SM(1, SI_CONFIG_BIDIR_OD_DATA) |
2974                            SM(1, SI_CONFIG_I2C) |
2975                            SM(1, SI_CONFIG_POS_SAMPLE) |
2976                            SM(1, SI_CONFIG_INACTIVE_DATA) |
2977                            SM(1, SI_CONFIG_INACTIVE_CLK) |
2978                            SM(8, SI_CONFIG_DIVIDER));
2979 }
2980 
2981 static int ath10k_pci_read_eeprom(struct ath10k *ar, u16 addr, u8 *out)
2982 {
2983         u32 reg;
2984         int wait_limit;
2985 
2986         /* set device select byte and for the read operation */
2987         reg = QCA9887_EEPROM_SELECT_READ |
2988               SM(addr, QCA9887_EEPROM_ADDR_LO) |
2989               SM(addr >> 8, QCA9887_EEPROM_ADDR_HI);
2990         ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_TX_DATA0_OFFSET, reg);
2991 
2992         /* write transmit data, transfer length, and START bit */
2993         ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET,
2994                            SM(1, SI_CS_START) | SM(1, SI_CS_RX_CNT) |
2995                            SM(4, SI_CS_TX_CNT));
2996 
2997         /* wait max 1 sec */
2998         wait_limit = 100000;
2999 
3000         /* wait for SI_CS_DONE_INT */
3001         do {
3002                 reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET);
3003                 if (MS(reg, SI_CS_DONE_INT))
3004                         break;
3005 
3006                 wait_limit--;
3007                 udelay(10);
3008         } while (wait_limit > 0);
3009 
3010         if (!MS(reg, SI_CS_DONE_INT)) {
3011                 ath10k_err(ar, "timeout while reading device EEPROM at %04x\n",
3012                            addr);
3013                 return -ETIMEDOUT;
3014         }
3015 
3016         /* clear SI_CS_DONE_INT */
3017         ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, reg);
3018 
3019         if (MS(reg, SI_CS_DONE_ERR)) {
3020                 ath10k_err(ar, "failed to read device EEPROM at %04x\n", addr);
3021                 return -EIO;
3022         }
3023 
3024         /* extract receive data */
3025         reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_RX_DATA0_OFFSET);
3026         *out = reg;
3027 
3028         return 0;
3029 }
3030 
3031 static int ath10k_pci_hif_fetch_cal_eeprom(struct ath10k *ar, void **data,
3032                                            size_t *data_len)
3033 {
3034         u8 *caldata = NULL;
3035         size_t calsize, i;
3036         int ret;
3037 
3038         if (!QCA_REV_9887(ar))
3039                 return -EOPNOTSUPP;
3040 
3041         calsize = ar->hw_params.cal_data_len;
3042         caldata = kmalloc(calsize, GFP_KERNEL);
3043         if (!caldata)
3044                 return -ENOMEM;
3045 
3046         ath10k_pci_enable_eeprom(ar);
3047 
3048         for (i = 0; i < calsize; i++) {
3049                 ret = ath10k_pci_read_eeprom(ar, i, &caldata[i]);
3050                 if (ret)
3051                         goto err_free;
3052         }
3053 
3054         if (!ath10k_pci_validate_cal(caldata, calsize))
3055                 goto err_free;
3056 
3057         *data = caldata;
3058         *data_len = calsize;
3059 
3060         return 0;
3061 
3062 err_free:
3063         kfree(caldata);
3064 
3065         return -EINVAL;
3066 }
3067 
3068 static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
3069         .tx_sg                  = ath10k_pci_hif_tx_sg,
3070         .diag_read              = ath10k_pci_hif_diag_read,
3071         .diag_write             = ath10k_pci_diag_write_mem,
3072         .exchange_bmi_msg       = ath10k_pci_hif_exchange_bmi_msg,
3073         .start                  = ath10k_pci_hif_start,
3074         .stop                   = ath10k_pci_hif_stop,
3075         .map_service_to_pipe    = ath10k_pci_hif_map_service_to_pipe,
3076         .get_default_pipe       = ath10k_pci_hif_get_default_pipe,
3077         .send_complete_check    = ath10k_pci_hif_send_complete_check,
3078         .get_free_queue_number  = ath10k_pci_hif_get_free_queue_number,
3079         .power_up               = ath10k_pci_hif_power_up,
3080         .power_down             = ath10k_pci_hif_power_down,
3081         .read32                 = ath10k_pci_read32,
3082         .write32                = ath10k_pci_write32,
3083         .suspend                = ath10k_pci_hif_suspend,
3084         .resume                 = ath10k_pci_hif_resume,
3085         .fetch_cal_eeprom       = ath10k_pci_hif_fetch_cal_eeprom,
3086 };
3087 
3088 /*
3089  * Top-level interrupt handler for all PCI interrupts from a Target.
3090  * When a block of MSI interrupts is allocated, this top-level handler
3091  * is not used; instead, we directly call the correct sub-handler.
3092  */
3093 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
3094 {
3095         struct ath10k *ar = arg;
3096         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3097         int ret;
3098 
3099         if (ath10k_pci_has_device_gone(ar))
3100                 return IRQ_NONE;
3101 
3102         ret = ath10k_pci_force_wake(ar);
3103         if (ret) {
3104                 ath10k_warn(ar, "failed to wake device up on irq: %d\n", ret);
3105                 return IRQ_NONE;
3106         }
3107 
3108         if ((ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) &&
3109             !ath10k_pci_irq_pending(ar))
3110                 return IRQ_NONE;
3111 
3112         ath10k_pci_disable_and_clear_legacy_irq(ar);
3113         ath10k_pci_irq_msi_fw_mask(ar);
3114         napi_schedule(&ar->napi);
3115 
3116         return IRQ_HANDLED;
3117 }
3118 
3119 static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget)
3120 {
3121         struct ath10k *ar = container_of(ctx, struct ath10k, napi);
3122         int done = 0;
3123 
3124         if (ath10k_pci_has_fw_crashed(ar)) {
3125                 ath10k_pci_fw_crashed_clear(ar);
3126                 ath10k_pci_fw_crashed_dump(ar);
3127                 napi_complete(ctx);
3128                 return done;
3129         }
3130 
3131         ath10k_ce_per_engine_service_any(ar);
3132 
3133         done = ath10k_htt_txrx_compl_task(ar, budget);
3134 
3135         if (done < budget) {
3136                 napi_complete_done(ctx, done);
3137                 /* In case of MSI, it is possible that interrupts are received
3138                  * while NAPI poll is inprogress. So pending interrupts that are
3139                  * received after processing all copy engine pipes by NAPI poll
3140                  * will not be handled again. This is causing failure to
3141                  * complete boot sequence in x86 platform. So before enabling
3142                  * interrupts safer to check for pending interrupts for
3143                  * immediate servicing.
3144                  */
3145                 if (ath10k_ce_interrupt_summary(ar)) {
3146                         napi_reschedule(ctx);
3147                         goto out;
3148                 }
3149                 ath10k_pci_enable_legacy_irq(ar);
3150                 ath10k_pci_irq_msi_fw_unmask(ar);
3151         }
3152 
3153 out:
3154         return done;
3155 }
3156 
3157 static int ath10k_pci_request_irq_msi(struct ath10k *ar)
3158 {
3159         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3160         int ret;
3161 
3162         ret = request_irq(ar_pci->pdev->irq,
3163                           ath10k_pci_interrupt_handler,
3164                           IRQF_SHARED, "ath10k_pci", ar);
3165         if (ret) {
3166                 ath10k_warn(ar, "failed to request MSI irq %d: %d\n",
3167                             ar_pci->pdev->irq, ret);
3168                 return ret;
3169         }
3170 
3171         return 0;
3172 }
3173 
3174 static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
3175 {
3176         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3177         int ret;
3178 
3179         ret = request_irq(ar_pci->pdev->irq,
3180                           ath10k_pci_interrupt_handler,
3181                           IRQF_SHARED, "ath10k_pci", ar);
3182         if (ret) {
3183                 ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
3184                             ar_pci->pdev->irq, ret);
3185                 return ret;
3186         }
3187 
3188         return 0;
3189 }
3190 
3191 static int ath10k_pci_request_irq(struct ath10k *ar)
3192 {
3193         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3194 
3195         switch (ar_pci->oper_irq_mode) {
3196         case ATH10K_PCI_IRQ_LEGACY:
3197                 return ath10k_pci_request_irq_legacy(ar);
3198         case ATH10K_PCI_IRQ_MSI:
3199                 return ath10k_pci_request_irq_msi(ar);
3200         default:
3201                 return -EINVAL;
3202         }
3203 }
3204 
3205 static void ath10k_pci_free_irq(struct ath10k *ar)
3206 {
3207         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3208 
3209         free_irq(ar_pci->pdev->irq, ar);
3210 }
3211 
3212 void ath10k_pci_init_napi(struct ath10k *ar)
3213 {
3214         netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_pci_napi_poll,
3215                        ATH10K_NAPI_BUDGET);
3216 }
3217 
3218 static int ath10k_pci_init_irq(struct ath10k *ar)
3219 {
3220         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3221         int ret;
3222 
3223         ath10k_pci_init_napi(ar);
3224 
3225         if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
3226                 ath10k_info(ar, "limiting irq mode to: %d\n",
3227                             ath10k_pci_irq_mode);
3228 
3229         /* Try MSI */
3230         if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
3231                 ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_MSI;
3232                 ret = pci_enable_msi(ar_pci->pdev);
3233                 if (ret == 0)
3234                         return 0;
3235 
3236                 /* fall-through */
3237         }
3238 
3239         /* Try legacy irq
3240          *
3241          * A potential race occurs here: The CORE_BASE write
3242          * depends on target correctly decoding AXI address but
3243          * host won't know when target writes BAR to CORE_CTRL.
3244          * This write might get lost if target has NOT written BAR.
3245          * For now, fix the race by repeating the write in below
3246          * synchronization checking.
3247          */
3248         ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY;
3249 
3250         ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
3251                            PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3252 
3253         return 0;
3254 }
3255 
3256 static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
3257 {
3258         ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
3259                            0);
3260 }
3261 
3262 static int ath10k_pci_deinit_irq(struct ath10k *ar)
3263 {
3264         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3265 
3266         switch (ar_pci->oper_irq_mode) {
3267         case ATH10K_PCI_IRQ_LEGACY:
3268                 ath10k_pci_deinit_irq_legacy(ar);
3269                 break;
3270         default:
3271                 pci_disable_msi(ar_pci->pdev);
3272                 break;
3273         }
3274 
3275         return 0;
3276 }
3277 
3278 int ath10k_pci_wait_for_target_init(struct ath10k *ar)
3279 {
3280         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3281         unsigned long timeout;
3282         u32 val;
3283 
3284         ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
3285 
3286         timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
3287 
3288         do {
3289                 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
3290 
3291                 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",
3292                            val);
3293 
3294                 /* target should never return this */
3295                 if (val == 0xffffffff)
3296                         continue;
3297 
3298                 /* the device has crashed so don't bother trying anymore */
3299                 if (val & FW_IND_EVENT_PENDING)
3300                         break;
3301 
3302                 if (val & FW_IND_INITIALIZED)
3303                         break;
3304 
3305                 if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY)
3306                         /* Fix potential race by repeating CORE_BASE writes */
3307                         ath10k_pci_enable_legacy_irq(ar);
3308 
3309                 mdelay(10);
3310         } while (time_before(jiffies, timeout));
3311 
3312         ath10k_pci_disable_and_clear_legacy_irq(ar);
3313         ath10k_pci_irq_msi_fw_mask(ar);
3314 
3315         if (val == 0xffffffff) {
3316                 ath10k_err(ar, "failed to read device register, device is gone\n");
3317                 return -EIO;
3318         }
3319 
3320         if (val & FW_IND_EVENT_PENDING) {
3321                 ath10k_warn(ar, "device has crashed during init\n");
3322                 return -ECOMM;
3323         }
3324 
3325         if (!(val & FW_IND_INITIALIZED)) {
3326                 ath10k_err(ar, "failed to receive initialized event from target: %08x\n",
3327                            val);
3328                 return -ETIMEDOUT;
3329         }
3330 
3331         ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");
3332         return 0;
3333 }
3334 
3335 static int ath10k_pci_cold_reset(struct ath10k *ar)
3336 {
3337         u32 val;
3338 
3339         ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
3340 
3341         spin_lock_bh(&ar->data_lock);
3342 
3343         ar->stats.fw_cold_reset_counter++;
3344 
3345         spin_unlock_bh(&ar->data_lock);
3346 
3347         /* Put Target, including PCIe, into RESET. */
3348         val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
3349         val |= 1;
3350         ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
3351 
3352         /* After writing into SOC_GLOBAL_RESET to put device into
3353          * reset and pulling out of reset pcie may not be stable
3354          * for any immediate pcie register access and cause bus error,
3355          * add delay before any pcie access request to fix this issue.
3356          */
3357         msleep(20);
3358 
3359         /* Pull Target, including PCIe, out of RESET. */
3360         val &= ~1;
3361         ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
3362 
3363         msleep(20);
3364 
3365         ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
3366 
3367         return 0;
3368 }
3369 
3370 static int ath10k_pci_claim(struct ath10k *ar)
3371 {
3372         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3373         struct pci_dev *pdev = ar_pci->pdev;
3374         int ret;
3375 
3376         pci_set_drvdata(pdev, ar);
3377 
3378         ret = pci_enable_device(pdev);
3379         if (ret) {
3380                 ath10k_err(ar, "failed to enable pci device: %d\n", ret);
3381                 return ret;
3382         }
3383 
3384         ret = pci_request_region(pdev, BAR_NUM, "ath");
3385         if (ret) {
3386                 ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,
3387                            ret);
3388                 goto err_device;
3389         }
3390 
3391         /* Target expects 32 bit DMA. Enforce it. */
3392         ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3393         if (ret) {
3394                 ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);
3395                 goto err_region;
3396         }
3397 
3398         ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3399         if (ret) {
3400                 ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n",
3401                            ret);
3402                 goto err_region;
3403         }
3404 
3405         pci_set_master(pdev);
3406 
3407         /* Arrange for access to Target SoC registers. */
3408         ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM);
3409         ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
3410         if (!ar_pci->mem) {
3411                 ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
3412                 ret = -EIO;
3413                 goto err_master;
3414         }
3415 
3416         ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%pK\n", ar_pci->mem);
3417         return 0;
3418 
3419 err_master:
3420         pci_clear_master(pdev);
3421 
3422 err_region:
3423         pci_release_region(pdev, BAR_NUM);
3424 
3425 err_device:
3426         pci_disable_device(pdev);
3427 
3428         return ret;
3429 }
3430 
3431 static void ath10k_pci_release(struct ath10k *ar)
3432 {
3433         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3434         struct pci_dev *pdev = ar_pci->pdev;
3435 
3436         pci_iounmap(pdev, ar_pci->mem);
3437         pci_release_region(pdev, BAR_NUM);
3438         pci_clear_master(pdev);
3439         pci_disable_device(pdev);
3440 }
3441 
3442 static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id)
3443 {
3444         const struct ath10k_pci_supp_chip *supp_chip;
3445         int i;
3446         u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV);
3447 
3448         for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) {
3449                 supp_chip = &ath10k_pci_supp_chips[i];
3450 
3451                 if (supp_chip->dev_id == dev_id &&
3452                     supp_chip->rev_id == rev_id)
3453                         return true;
3454         }
3455 
3456         return false;
3457 }
3458 
3459 int ath10k_pci_setup_resource(struct ath10k *ar)
3460 {
3461         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3462         struct ath10k_ce *ce = ath10k_ce_priv(ar);
3463         int ret;
3464 
3465         spin_lock_init(&ce->ce_lock);
3466         spin_lock_init(&ar_pci->ps_lock);
3467         mutex_init(&ar_pci->ce_diag_mutex);
3468 
3469         INIT_WORK(&ar_pci->dump_work, ath10k_pci_fw_dump_work);
3470 
3471         timer_setup(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 0);
3472 
3473         if (QCA_REV_6174(ar) || QCA_REV_9377(ar))
3474                 ath10k_pci_override_ce_config(ar);
3475 
3476         ret = ath10k_pci_alloc_pipes(ar);
3477         if (ret) {
3478                 ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
3479                            ret);
3480                 return ret;
3481         }
3482 
3483         return 0;
3484 }
3485 
3486 void ath10k_pci_release_resource(struct ath10k *ar)
3487 {
3488         ath10k_pci_rx_retry_sync(ar);
3489         netif_napi_del(&ar->napi);
3490         ath10k_pci_ce_deinit(ar);
3491         ath10k_pci_free_pipes(ar);
3492 }
3493 
3494 static const struct ath10k_bus_ops ath10k_pci_bus_ops = {
3495         .read32         = ath10k_bus_pci_read32,
3496         .write32        = ath10k_bus_pci_write32,
3497         .get_num_banks  = ath10k_pci_get_num_banks,
3498 };
3499 
3500 static int ath10k_pci_probe(struct pci_dev *pdev,
3501                             const struct pci_device_id *pci_dev)
3502 {
3503         int ret = 0;
3504         struct ath10k *ar;
3505         struct ath10k_pci *ar_pci;
3506         enum ath10k_hw_rev hw_rev;
3507         struct ath10k_bus_params bus_params = {};
3508         bool pci_ps, is_qca988x = false;
3509         int (*pci_soft_reset)(struct ath10k *ar);
3510         int (*pci_hard_reset)(struct ath10k *ar);
3511         u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
3512 
3513         switch (pci_dev->device) {
3514         case QCA988X_2_0_DEVICE_ID_UBNT:
3515         case QCA988X_2_0_DEVICE_ID:
3516                 hw_rev = ATH10K_HW_QCA988X;
3517                 pci_ps = false;
3518                 is_qca988x = true;
3519                 pci_soft_reset = ath10k_pci_warm_reset;
3520                 pci_hard_reset = ath10k_pci_qca988x_chip_reset;
3521                 targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
3522                 break;
3523         case QCA9887_1_0_DEVICE_ID:
3524                 hw_rev = ATH10K_HW_QCA9887;
3525                 pci_ps = false;
3526                 pci_soft_reset = ath10k_pci_warm_reset;
3527                 pci_hard_reset = ath10k_pci_qca988x_chip_reset;
3528                 targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
3529                 break;
3530         case QCA6164_2_1_DEVICE_ID:
3531         case QCA6174_2_1_DEVICE_ID:
3532                 hw_rev = ATH10K_HW_QCA6174;
3533                 pci_ps = true;
3534                 pci_soft_reset = ath10k_pci_warm_reset;
3535                 pci_hard_reset = ath10k_pci_qca6174_chip_reset;
3536                 targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr;
3537                 break;
3538         case QCA99X0_2_0_DEVICE_ID:
3539                 hw_rev = ATH10K_HW_QCA99X0;
3540                 pci_ps = false;
3541                 pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3542                 pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
3543                 targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
3544                 break;
3545         case QCA9984_1_0_DEVICE_ID:
3546                 hw_rev = ATH10K_HW_QCA9984;
3547                 pci_ps = false;
3548                 pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3549                 pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
3550                 targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
3551                 break;
3552         case QCA9888_2_0_DEVICE_ID:
3553                 hw_rev = ATH10K_HW_QCA9888;
3554                 pci_ps = false;
3555                 pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3556                 pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
3557                 targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
3558                 break;
3559         case QCA9377_1_0_DEVICE_ID:
3560                 hw_rev = ATH10K_HW_QCA9377;
3561                 pci_ps = true;
3562                 pci_soft_reset = ath10k_pci_warm_reset;
3563                 pci_hard_reset = ath10k_pci_qca6174_chip_reset;
3564                 targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr;
3565                 break;
3566         default:
3567                 WARN_ON(1);
3568                 return -ENOTSUPP;
3569         }
3570 
3571         ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
3572                                 hw_rev, &ath10k_pci_hif_ops);
3573         if (!ar) {
3574                 dev_err(&pdev->dev, "failed to allocate core\n");
3575                 return -ENOMEM;
3576         }
3577 
3578         ath10k_dbg(ar, ATH10K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n",
3579                    pdev->vendor, pdev->device,
3580                    pdev->subsystem_vendor, pdev->subsystem_device);
3581 
3582         ar_pci = ath10k_pci_priv(ar);
3583         ar_pci->pdev = pdev;
3584         ar_pci->dev = &pdev->dev;
3585         ar_pci->ar = ar;
3586         ar->dev_id = pci_dev->device;
3587         ar_pci->pci_ps = pci_ps;
3588         ar_pci->ce.bus_ops = &ath10k_pci_bus_ops;
3589         ar_pci->pci_soft_reset = pci_soft_reset;
3590         ar_pci->pci_hard_reset = pci_hard_reset;
3591         ar_pci->targ_cpu_to_ce_addr = targ_cpu_to_ce_addr;
3592         ar->ce_priv = &ar_pci->ce;
3593 
3594         ar->id.vendor = pdev->vendor;
3595         ar->id.device = pdev->device;
3596         ar->id.subsystem_vendor = pdev->subsystem_vendor;
3597         ar->id.subsystem_device = pdev->subsystem_device;
3598 
3599         timer_setup(&ar_pci->ps_timer, ath10k_pci_ps_timer, 0);
3600 
3601         ret = ath10k_pci_setup_resource(ar);
3602         if (ret) {
3603                 ath10k_err(ar, "failed to setup resource: %d\n", ret);
3604                 goto err_core_destroy;
3605         }
3606 
3607         ret = ath10k_pci_claim(ar);
3608         if (ret) {
3609                 ath10k_err(ar, "failed to claim device: %d\n", ret);
3610                 goto err_free_pipes;
3611         }
3612 
3613         ret = ath10k_pci_force_wake(ar);
3614         if (ret) {
3615                 ath10k_warn(ar, "failed to wake up device : %d\n", ret);
3616                 goto err_sleep;
3617         }
3618 
3619         ath10k_pci_ce_deinit(ar);
3620         ath10k_pci_irq_disable(ar);
3621 
3622         ret = ath10k_pci_init_irq(ar);
3623         if (ret) {
3624                 ath10k_err(ar, "failed to init irqs: %d\n", ret);
3625                 goto err_sleep;
3626         }
3627 
3628         ath10k_info(ar, "pci irq %s oper_irq_mode %d irq_mode %d reset_mode %d\n",
3629                     ath10k_pci_get_irq_method(ar), ar_pci->oper_irq_mode,
3630                     ath10k_pci_irq_mode, ath10k_pci_reset_mode);
3631 
3632         ret = ath10k_pci_request_irq(ar);
3633         if (ret) {
3634                 ath10k_warn(ar, "failed to request irqs: %d\n", ret);
3635                 goto err_deinit_irq;
3636         }
3637 
3638         bus_params.dev_type = ATH10K_DEV_TYPE_LL;
3639         bus_params.link_can_suspend = true;
3640         /* Read CHIP_ID before reset to catch QCA9880-AR1A v1 devices that
3641          * fall off the bus during chip_reset. These chips have the same pci
3642          * device id as the QCA9880 BR4A or 2R4E. So that's why the check.
3643          */
3644         if (is_qca988x) {
3645                 bus_params.chip_id =
3646                         ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
3647                 if (bus_params.chip_id != 0xffffffff) {
3648                         if (!ath10k_pci_chip_is_supported(pdev->device,
3649                                                           bus_params.chip_id))
3650                                 goto err_unsupported;
3651                 }
3652         }
3653 
3654         ret = ath10k_pci_chip_reset(ar);
3655         if (ret) {
3656                 ath10k_err(ar, "failed to reset chip: %d\n", ret);
3657                 goto err_free_irq;
3658         }
3659 
3660         bus_params.chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
3661         if (bus_params.chip_id == 0xffffffff)
3662                 goto err_unsupported;
3663 
3664         if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id))
3665                 goto err_free_irq;
3666 
3667         ret = ath10k_core_register(ar, &bus_params);
3668         if (ret) {
3669                 ath10k_err(ar, "failed to register driver core: %d\n", ret);
3670                 goto err_free_irq;
3671         }
3672 
3673         return 0;
3674 
3675 err_unsupported:
3676         ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
3677                    pdev->device, bus_params.chip_id);
3678 
3679 err_free_irq:
3680         ath10k_pci_free_irq(ar);
3681         ath10k_pci_rx_retry_sync(ar);
3682 
3683 err_deinit_irq:
3684         ath10k_pci_deinit_irq(ar);
3685 
3686 err_sleep:
3687         ath10k_pci_sleep_sync(ar);
3688         ath10k_pci_release(ar);
3689 
3690 err_free_pipes:
3691         ath10k_pci_free_pipes(ar);
3692 
3693 err_core_destroy:
3694         ath10k_core_destroy(ar);
3695 
3696         return ret;
3697 }
3698 
3699 static void ath10k_pci_remove(struct pci_dev *pdev)
3700 {
3701         struct ath10k *ar = pci_get_drvdata(pdev);
3702         struct ath10k_pci *ar_pci;
3703 
3704         ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
3705 
3706         if (!ar)
3707                 return;
3708 
3709         ar_pci = ath10k_pci_priv(ar);
3710 
3711         if (!ar_pci)
3712                 return;
3713 
3714         ath10k_core_unregister(ar);
3715         ath10k_pci_free_irq(ar);
3716         ath10k_pci_deinit_irq(ar);
3717         ath10k_pci_release_resource(ar);
3718         ath10k_pci_sleep_sync(ar);
3719         ath10k_pci_release(ar);
3720         ath10k_core_destroy(ar);
3721 }
3722 
3723 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
3724 
3725 static __maybe_unused int ath10k_pci_pm_suspend(struct device *dev)
3726 {
3727         struct ath10k *ar = dev_get_drvdata(dev);
3728         int ret;
3729 
3730         ret = ath10k_pci_suspend(ar);
3731         if (ret)
3732                 ath10k_warn(ar, "failed to suspend hif: %d\n", ret);
3733 
3734         return ret;
3735 }
3736 
3737 static __maybe_unused int ath10k_pci_pm_resume(struct device *dev)
3738 {
3739         struct ath10k *ar = dev_get_drvdata(dev);
3740         int ret;
3741 
3742         ret = ath10k_pci_resume(ar);
3743         if (ret)
3744                 ath10k_warn(ar, "failed to resume hif: %d\n", ret);
3745 
3746         return ret;
3747 }
3748 
3749 static SIMPLE_DEV_PM_OPS(ath10k_pci_pm_ops,
3750                          ath10k_pci_pm_suspend,
3751                          ath10k_pci_pm_resume);
3752 
3753 static struct pci_driver ath10k_pci_driver = {
3754         .name = "ath10k_pci",
3755         .id_table = ath10k_pci_id_table,
3756         .probe = ath10k_pci_probe,
3757         .remove = ath10k_pci_remove,
3758 #ifdef CONFIG_PM
3759         .driver.pm = &ath10k_pci_pm_ops,
3760 #endif
3761 };
3762 
3763 static int __init ath10k_pci_init(void)
3764 {
3765         int ret;
3766 
3767         ret = pci_register_driver(&ath10k_pci_driver);
3768         if (ret)
3769                 printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
3770                        ret);
3771 
3772         ret = ath10k_ahb_init();
3773         if (ret)
3774                 printk(KERN_ERR "ahb init failed: %d\n", ret);
3775 
3776         return ret;
3777 }
3778 module_init(ath10k_pci_init);
3779 
3780 static void __exit ath10k_pci_exit(void)
3781 {
3782         pci_unregister_driver(&ath10k_pci_driver);
3783         ath10k_ahb_exit();
3784 }
3785 
3786 module_exit(ath10k_pci_exit);
3787 
3788 MODULE_AUTHOR("Qualcomm Atheros");
3789 MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN PCIe/AHB devices");
3790 MODULE_LICENSE("Dual BSD/GPL");
3791 
3792 /* QCA988x 2.0 firmware files */
3793 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
3794 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
3795 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
3796 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3797 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
3798 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3799 
3800 /* QCA9887 1.0 firmware files */
3801 MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3802 MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" QCA9887_HW_1_0_BOARD_DATA_FILE);
3803 MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3804 
3805 /* QCA6174 2.1 firmware files */
3806 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
3807 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE);
3808 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE);
3809 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3810 
3811 /* QCA6174 3.1 firmware files */
3812 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
3813 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3814 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API6_FILE);
3815 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);
3816 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3817 
3818 /* QCA9377 1.0 firmware files */
3819 MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API6_FILE);
3820 MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3821 MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" QCA9377_HW_1_0_BOARD_DATA_FILE);

/* [<][>][^][v][top][bottom][index][help] */