This source file includes following definitions.
- ath10k_sdio_calc_txrx_padded_len
- pipe_id_to_eid
- ath10k_sdio_mbox_free_rx_pkt
- ath10k_sdio_mbox_alloc_rx_pkt
- is_trailer_only_msg
- ath10k_sdio_set_cmd52_arg
- ath10k_sdio_func0_cmd52_wr_byte
- ath10k_sdio_func0_cmd52_rd_byte
- ath10k_sdio_config
- ath10k_sdio_write32
- ath10k_sdio_writesb32
- ath10k_sdio_read32
- ath10k_sdio_read
- ath10k_sdio_write
- ath10k_sdio_readsb
- ath10k_sdio_mbox_rx_process_packet
- ath10k_sdio_mbox_rx_process_packets
- ath10k_sdio_mbox_alloc_pkt_bundle
- ath10k_sdio_mbox_rx_alloc
- ath10k_sdio_mbox_rx_packet
- ath10k_sdio_mbox_rx_fetch
- ath10k_sdio_mbox_rxmsg_pending_handler
- ath10k_sdio_mbox_proc_dbg_intr
- ath10k_sdio_mbox_proc_counter_intr
- ath10k_sdio_mbox_proc_err_intr
- ath10k_sdio_mbox_proc_cpu_intr
- ath10k_sdio_mbox_read_int_status
- ath10k_sdio_mbox_proc_pending_irqs
- ath10k_sdio_set_mbox_info
- ath10k_sdio_bmi_credits
- ath10k_sdio_bmi_get_rx_lookahead
- ath10k_sdio_bmi_exchange_msg
- ath10k_sdio_alloc_busreq
- ath10k_sdio_free_bus_req
- __ath10k_sdio_write_async
- ath10k_sdio_write_async_work
- ath10k_sdio_prep_async_req
- ath10k_sdio_irq_handler
- ath10k_sdio_hif_disable_intrs
- ath10k_sdio_hif_power_up
- ath10k_sdio_hif_power_down
- ath10k_sdio_hif_tx_sg
- ath10k_sdio_hif_enable_intrs
- ath10k_sdio_hif_set_mbox_sleep
- ath10k_sdio_hif_diag_read
- ath10k_sdio_hif_diag_read32
- ath10k_sdio_hif_diag_write_mem
- ath10k_sdio_hif_swap_mailbox
- ath10k_sdio_hif_start
- ath10k_sdio_irq_disable
- ath10k_sdio_hif_stop
- ath10k_sdio_hif_suspend
- ath10k_sdio_hif_resume
- ath10k_sdio_hif_map_service_to_pipe
- ath10k_sdio_hif_get_default_pipe
- ath10k_sdio_hif_send_complete_check
- ath10k_sdio_pm_suspend
- ath10k_sdio_pm_resume
- ath10k_sdio_probe
- ath10k_sdio_remove
- ath10k_sdio_init
- ath10k_sdio_exit
   1 
   2 
   3 
   4 
   5 
   6 
   7 
   8 #include <linux/module.h>
   9 #include <linux/mmc/card.h>
  10 #include <linux/mmc/mmc.h>
  11 #include <linux/mmc/host.h>
  12 #include <linux/mmc/sdio_func.h>
  13 #include <linux/mmc/sdio_ids.h>
  14 #include <linux/mmc/sdio.h>
  15 #include <linux/mmc/sd.h>
  16 #include <linux/bitfield.h>
  17 #include "core.h"
  18 #include "bmi.h"
  19 #include "debug.h"
  20 #include "hif.h"
  21 #include "htc.h"
  22 #include "mac.h"
  23 #include "targaddrs.h"
  24 #include "trace.h"
  25 #include "sdio.h"
  26 
  27 
  28 
  29 static inline int ath10k_sdio_calc_txrx_padded_len(struct ath10k_sdio *ar_sdio,
  30                                                    size_t len)
  31 {
  32         return __ALIGN_MASK((len), ar_sdio->mbox_info.block_mask);
  33 }
  34 
  35 static inline enum ath10k_htc_ep_id pipe_id_to_eid(u8 pipe_id)
  36 {
  37         return (enum ath10k_htc_ep_id)pipe_id;
  38 }
  39 
  40 static inline void ath10k_sdio_mbox_free_rx_pkt(struct ath10k_sdio_rx_data *pkt)
  41 {
  42         dev_kfree_skb(pkt->skb);
  43         pkt->skb = NULL;
  44         pkt->alloc_len = 0;
  45         pkt->act_len = 0;
  46         pkt->trailer_only = false;
  47 }
  48 
  49 static inline int ath10k_sdio_mbox_alloc_rx_pkt(struct ath10k_sdio_rx_data *pkt,
  50                                                 size_t act_len, size_t full_len,
  51                                                 bool part_of_bundle,
  52                                                 bool last_in_bundle)
  53 {
  54         pkt->skb = dev_alloc_skb(full_len);
  55         if (!pkt->skb)
  56                 return -ENOMEM;
  57 
  58         pkt->act_len = act_len;
  59         pkt->alloc_len = full_len;
  60         pkt->part_of_bundle = part_of_bundle;
  61         pkt->last_in_bundle = last_in_bundle;
  62         pkt->trailer_only = false;
  63 
  64         return 0;
  65 }
  66 
  67 static inline bool is_trailer_only_msg(struct ath10k_sdio_rx_data *pkt)
  68 {
  69         bool trailer_only = false;
  70         struct ath10k_htc_hdr *htc_hdr =
  71                 (struct ath10k_htc_hdr *)pkt->skb->data;
  72         u16 len = __le16_to_cpu(htc_hdr->len);
  73 
  74         if (len == htc_hdr->trailer_len)
  75                 trailer_only = true;
  76 
  77         return trailer_only;
  78 }
  79 
  80 
  81 
  82 static inline void ath10k_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw,
  83                                              unsigned int address,
  84                                              unsigned char val)
  85 {
  86         *arg = FIELD_PREP(BIT(31), write) |
  87                FIELD_PREP(BIT(27), raw) |
  88                FIELD_PREP(BIT(26), 1) |
  89                FIELD_PREP(GENMASK(25, 9), address) |
  90                FIELD_PREP(BIT(8), 1) |
  91                FIELD_PREP(GENMASK(7, 0), val);
  92 }
  93 
  94 static int ath10k_sdio_func0_cmd52_wr_byte(struct mmc_card *card,
  95                                            unsigned int address,
  96                                            unsigned char byte)
  97 {
  98         struct mmc_command io_cmd;
  99 
 100         memset(&io_cmd, 0, sizeof(io_cmd));
 101         ath10k_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte);
 102         io_cmd.opcode = SD_IO_RW_DIRECT;
 103         io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
 104 
 105         return mmc_wait_for_cmd(card->host, &io_cmd, 0);
 106 }
 107 
 108 static int ath10k_sdio_func0_cmd52_rd_byte(struct mmc_card *card,
 109                                            unsigned int address,
 110                                            unsigned char *byte)
 111 {
 112         struct mmc_command io_cmd;
 113         int ret;
 114 
 115         memset(&io_cmd, 0, sizeof(io_cmd));
 116         ath10k_sdio_set_cmd52_arg(&io_cmd.arg, 0, 0, address, 0);
 117         io_cmd.opcode = SD_IO_RW_DIRECT;
 118         io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
 119 
 120         ret = mmc_wait_for_cmd(card->host, &io_cmd, 0);
 121         if (!ret)
 122                 *byte = io_cmd.resp[0];
 123 
 124         return ret;
 125 }
 126 
 127 static int ath10k_sdio_config(struct ath10k *ar)
 128 {
 129         struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 130         struct sdio_func *func = ar_sdio->func;
 131         unsigned char byte, asyncintdelay = 2;
 132         int ret;
 133 
 134         ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio configuration\n");
 135 
 136         sdio_claim_host(func);
 137 
 138         byte = 0;
 139         ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
 140                                               SDIO_CCCR_DRIVE_STRENGTH,
 141                                               &byte);
 142 
 143         byte &= ~ATH10K_SDIO_DRIVE_DTSX_MASK;
 144         byte |= FIELD_PREP(ATH10K_SDIO_DRIVE_DTSX_MASK,
 145                            ATH10K_SDIO_DRIVE_DTSX_TYPE_D);
 146 
 147         ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
 148                                               SDIO_CCCR_DRIVE_STRENGTH,
 149                                               byte);
 150 
 151         byte = 0;
 152         ret = ath10k_sdio_func0_cmd52_rd_byte(
 153                 func->card,
 154                 CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR,
 155                 &byte);
 156 
 157         byte |= (CCCR_SDIO_DRIVER_STRENGTH_ENABLE_A |
 158                  CCCR_SDIO_DRIVER_STRENGTH_ENABLE_C |
 159                  CCCR_SDIO_DRIVER_STRENGTH_ENABLE_D);
 160 
 161         ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
 162                                               CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR,
 163                                               byte);
 164         if (ret) {
 165                 ath10k_warn(ar, "failed to enable driver strength: %d\n", ret);
 166                 goto out;
 167         }
 168 
 169         byte = 0;
 170         ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
 171                                               CCCR_SDIO_IRQ_MODE_REG_SDIO3,
 172                                               &byte);
 173 
 174         byte |= SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_SDIO3;
 175 
 176         ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
 177                                               CCCR_SDIO_IRQ_MODE_REG_SDIO3,
 178                                               byte);
 179         if (ret) {
 180                 ath10k_warn(ar, "failed to enable 4-bit async irq mode: %d\n",
 181                             ret);
 182                 goto out;
 183         }
 184 
 185         byte = 0;
 186         ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
 187                                               CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS,
 188                                               &byte);
 189 
 190         byte &= ~CCCR_SDIO_ASYNC_INT_DELAY_MASK;
 191         byte |= FIELD_PREP(CCCR_SDIO_ASYNC_INT_DELAY_MASK, asyncintdelay);
 192 
 193         ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
 194                                               CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS,
 195                                               byte);
 196 
 197         
 198         func->enable_timeout = 100;
 199 
 200         ret = sdio_set_block_size(func, ar_sdio->mbox_info.block_size);
 201         if (ret) {
 202                 ath10k_warn(ar, "failed to set sdio block size to %d: %d\n",
 203                             ar_sdio->mbox_info.block_size, ret);
 204                 goto out;
 205         }
 206 
 207 out:
 208         sdio_release_host(func);
 209         return ret;
 210 }
 211 
 212 static int ath10k_sdio_write32(struct ath10k *ar, u32 addr, u32 val)
 213 {
 214         struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 215         struct sdio_func *func = ar_sdio->func;
 216         int ret;
 217 
 218         sdio_claim_host(func);
 219 
 220         sdio_writel(func, val, addr, &ret);
 221         if (ret) {
 222                 ath10k_warn(ar, "failed to write 0x%x to address 0x%x: %d\n",
 223                             val, addr, ret);
 224                 goto out;
 225         }
 226 
 227         ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write32 addr 0x%x val 0x%x\n",
 228                    addr, val);
 229 
 230 out:
 231         sdio_release_host(func);
 232 
 233         return ret;
 234 }
 235 
 236 static int ath10k_sdio_writesb32(struct ath10k *ar, u32 addr, u32 val)
 237 {
 238         struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 239         struct sdio_func *func = ar_sdio->func;
 240         __le32 *buf;
 241         int ret;
 242 
 243         buf = kzalloc(sizeof(*buf), GFP_KERNEL);
 244         if (!buf)
 245                 return -ENOMEM;
 246 
 247         *buf = cpu_to_le32(val);
 248 
 249         sdio_claim_host(func);
 250 
 251         ret = sdio_writesb(func, addr, buf, sizeof(*buf));
 252         if (ret) {
 253                 ath10k_warn(ar, "failed to write value 0x%x to fixed sb address 0x%x: %d\n",
 254                             val, addr, ret);
 255                 goto out;
 256         }
 257 
 258         ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio writesb32 addr 0x%x val 0x%x\n",
 259                    addr, val);
 260 
 261 out:
 262         sdio_release_host(func);
 263 
 264         kfree(buf);
 265 
 266         return ret;
 267 }
 268 
 269 static int ath10k_sdio_read32(struct ath10k *ar, u32 addr, u32 *val)
 270 {
 271         struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 272         struct sdio_func *func = ar_sdio->func;
 273         int ret;
 274 
 275         sdio_claim_host(func);
 276         *val = sdio_readl(func, addr, &ret);
 277         if (ret) {
 278                 ath10k_warn(ar, "failed to read from address 0x%x: %d\n",
 279                             addr, ret);
 280                 goto out;
 281         }
 282 
 283         ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read32 addr 0x%x val 0x%x\n",
 284                    addr, *val);
 285 
 286 out:
 287         sdio_release_host(func);
 288 
 289         return ret;
 290 }
 291 
 292 static int ath10k_sdio_read(struct ath10k *ar, u32 addr, void *buf, size_t len)
 293 {
 294         struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 295         struct sdio_func *func = ar_sdio->func;
 296         int ret;
 297 
 298         sdio_claim_host(func);
 299 
 300         ret = sdio_memcpy_fromio(func, buf, addr, len);
 301         if (ret) {
 302                 ath10k_warn(ar, "failed to read from address 0x%x: %d\n",
 303                             addr, ret);
 304                 goto out;
 305         }
 306 
 307         ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read addr 0x%x buf 0x%p len %zu\n",
 308                    addr, buf, len);
 309         ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio read ", buf, len);
 310 
 311 out:
 312         sdio_release_host(func);
 313 
 314         return ret;
 315 }
 316 
 317 static int ath10k_sdio_write(struct ath10k *ar, u32 addr, const void *buf, size_t len)
 318 {
 319         struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 320         struct sdio_func *func = ar_sdio->func;
 321         int ret;
 322 
 323         sdio_claim_host(func);
 324 
 325         
 326 
 327 
 328         ret = sdio_memcpy_toio(func, addr, (void *)buf, len);
 329         if (ret) {
 330                 ath10k_warn(ar, "failed to write to address 0x%x: %d\n",
 331                             addr, ret);
 332                 goto out;
 333         }
 334 
 335         ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write addr 0x%x buf 0x%p len %zu\n",
 336                    addr, buf, len);
 337         ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio write ", buf, len);
 338 
 339 out:
 340         sdio_release_host(func);
 341 
 342         return ret;
 343 }
 344 
 345 static int ath10k_sdio_readsb(struct ath10k *ar, u32 addr, void *buf, size_t len)
 346 {
 347         struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 348         struct sdio_func *func = ar_sdio->func;
 349         int ret;
 350 
 351         sdio_claim_host(func);
 352 
 353         len = round_down(len, ar_sdio->mbox_info.block_size);
 354 
 355         ret = sdio_readsb(func, buf, addr, len);
 356         if (ret) {
 357                 ath10k_warn(ar, "failed to read from fixed (sb) address 0x%x: %d\n",
 358                             addr, ret);
 359                 goto out;
 360         }
 361 
 362         ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio readsb addr 0x%x buf 0x%p len %zu\n",
 363                    addr, buf, len);
 364         ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio readsb ", buf, len);
 365 
 366 out:
 367         sdio_release_host(func);
 368 
 369         return ret;
 370 }
 371 
 372 
 373 
 374 static int ath10k_sdio_mbox_rx_process_packet(struct ath10k *ar,
 375                                               struct ath10k_sdio_rx_data *pkt,
 376                                               u32 *lookaheads,
 377                                               int *n_lookaheads)
 378 {
 379         struct ath10k_htc *htc = &ar->htc;
 380         struct sk_buff *skb = pkt->skb;
 381         struct ath10k_htc_hdr *htc_hdr = (struct ath10k_htc_hdr *)skb->data;
 382         bool trailer_present = htc_hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
 383         enum ath10k_htc_ep_id eid;
 384         u8 *trailer;
 385         int ret;
 386 
 387         if (trailer_present) {
 388                 trailer = skb->data + skb->len - htc_hdr->trailer_len;
 389 
 390                 eid = pipe_id_to_eid(htc_hdr->eid);
 391 
 392                 ret = ath10k_htc_process_trailer(htc,
 393                                                  trailer,
 394                                                  htc_hdr->trailer_len,
 395                                                  eid,
 396                                                  lookaheads,
 397                                                  n_lookaheads);
 398                 if (ret)
 399                         return ret;
 400 
 401                 if (is_trailer_only_msg(pkt))
 402                         pkt->trailer_only = true;
 403 
 404                 skb_trim(skb, skb->len - htc_hdr->trailer_len);
 405         }
 406 
 407         skb_pull(skb, sizeof(*htc_hdr));
 408 
 409         return 0;
 410 }
 411 
 412 static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar,
 413                                                u32 lookaheads[],
 414                                                int *n_lookahead)
 415 {
 416         struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 417         struct ath10k_htc *htc = &ar->htc;
 418         struct ath10k_sdio_rx_data *pkt;
 419         struct ath10k_htc_ep *ep;
 420         enum ath10k_htc_ep_id id;
 421         int ret, i, *n_lookahead_local;
 422         u32 *lookaheads_local;
 423         int lookahead_idx = 0;
 424 
 425         for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
 426                 lookaheads_local = lookaheads;
 427                 n_lookahead_local = n_lookahead;
 428 
 429                 id = ((struct ath10k_htc_hdr *)
 430                       &lookaheads[lookahead_idx++])->eid;
 431 
 432                 if (id >= ATH10K_HTC_EP_COUNT) {
 433                         ath10k_warn(ar, "invalid endpoint in look-ahead: %d\n",
 434                                     id);
 435                         ret = -ENOMEM;
 436                         goto out;
 437                 }
 438 
 439                 ep = &htc->endpoint[id];
 440 
 441                 if (ep->service_id == 0) {
 442                         ath10k_warn(ar, "ep %d is not connected\n", id);
 443                         ret = -ENOMEM;
 444                         goto out;
 445                 }
 446 
 447                 pkt = &ar_sdio->rx_pkts[i];
 448 
 449                 if (pkt->part_of_bundle && !pkt->last_in_bundle) {
 450                         
 451 
 452 
 453                         lookahead_idx--;
 454                         lookaheads_local = NULL;
 455                         n_lookahead_local = NULL;
 456                 }
 457 
 458                 ret = ath10k_sdio_mbox_rx_process_packet(ar,
 459                                                          pkt,
 460                                                          lookaheads_local,
 461                                                          n_lookahead_local);
 462                 if (ret)
 463                         goto out;
 464 
 465                 if (!pkt->trailer_only)
 466                         ep->ep_ops.ep_rx_complete(ar_sdio->ar, pkt->skb);
 467                 else
 468                         kfree_skb(pkt->skb);
 469 
 470                 
 471                 pkt->skb = NULL;
 472                 pkt->alloc_len = 0;
 473         }
 474 
 475         ret = 0;
 476 
 477 out:
 478         
 479 
 480 
 481         for (; i < ar_sdio->n_rx_pkts; i++)
 482                 ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
 483 
 484         return ret;
 485 }
 486 
 487 static int ath10k_sdio_mbox_alloc_pkt_bundle(struct ath10k *ar,
 488                                              struct ath10k_sdio_rx_data *rx_pkts,
 489                                              struct ath10k_htc_hdr *htc_hdr,
 490                                              size_t full_len, size_t act_len,
 491                                              size_t *bndl_cnt)
 492 {
 493         int ret, i;
 494 
 495         *bndl_cnt = FIELD_GET(ATH10K_HTC_FLAG_BUNDLE_MASK, htc_hdr->flags);
 496 
 497         if (*bndl_cnt > HTC_HOST_MAX_MSG_PER_RX_BUNDLE) {
 498                 ath10k_warn(ar,
 499                             "HTC bundle length %u exceeds maximum %u\n",
 500                             le16_to_cpu(htc_hdr->len),
 501                             HTC_HOST_MAX_MSG_PER_RX_BUNDLE);
 502                 return -ENOMEM;
 503         }
 504 
 505         
 506 
 507 
 508 
 509 
 510 
 511         for (i = 0; i < *bndl_cnt; i++) {
 512                 ret = ath10k_sdio_mbox_alloc_rx_pkt(&rx_pkts[i],
 513                                                     act_len,
 514                                                     full_len,
 515                                                     true,
 516                                                     false);
 517                 if (ret)
 518                         return ret;
 519         }
 520 
 521         return 0;
 522 }
 523 
 524 static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
 525                                      u32 lookaheads[], int n_lookaheads)
 526 {
 527         struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 528         struct ath10k_htc_hdr *htc_hdr;
 529         size_t full_len, act_len;
 530         bool last_in_bundle;
 531         int ret, i;
 532 
 533         if (n_lookaheads > ATH10K_SDIO_MAX_RX_MSGS) {
 534                 ath10k_warn(ar,
 535                             "the total number of pkgs to be fetched (%u) exceeds maximum %u\n",
 536                             n_lookaheads,
 537                             ATH10K_SDIO_MAX_RX_MSGS);
 538                 ret = -ENOMEM;
 539                 goto err;
 540         }
 541 
 542         for (i = 0; i < n_lookaheads; i++) {
 543                 htc_hdr = (struct ath10k_htc_hdr *)&lookaheads[i];
 544                 last_in_bundle = false;
 545 
 546                 if (le16_to_cpu(htc_hdr->len) >
 547                     ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH) {
 548                         ath10k_warn(ar,
 549                                     "payload length %d exceeds max htc length: %zu\n",
 550                                     le16_to_cpu(htc_hdr->len),
 551                                     ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH);
 552                         ret = -ENOMEM;
 553                         goto err;
 554                 }
 555 
 556                 act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
 557                 full_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio, act_len);
 558 
 559                 if (full_len > ATH10K_SDIO_MAX_BUFFER_SIZE) {
 560                         ath10k_warn(ar,
 561                                     "rx buffer requested with invalid htc_hdr length (%d, 0x%x): %d\n",
 562                                     htc_hdr->eid, htc_hdr->flags,
 563                                     le16_to_cpu(htc_hdr->len));
 564                         ret = -EINVAL;
 565                         goto err;
 566                 }
 567 
 568                 if (htc_hdr->flags & ATH10K_HTC_FLAG_BUNDLE_MASK) {
 569                         
 570 
 571 
 572 
 573                         size_t bndl_cnt;
 574 
 575                         ret = ath10k_sdio_mbox_alloc_pkt_bundle(ar,
 576                                                                 &ar_sdio->rx_pkts[i],
 577                                                                 htc_hdr,
 578                                                                 full_len,
 579                                                                 act_len,
 580                                                                 &bndl_cnt);
 581 
 582                         if (ret) {
 583                                 ath10k_warn(ar, "alloc_bundle error %d\n", ret);
 584                                 goto err;
 585                         }
 586 
 587                         n_lookaheads += bndl_cnt;
 588                         i += bndl_cnt;
 589                         
 590                         last_in_bundle = true;
 591                 }
 592 
 593                 
 594 
 595 
 596 
 597                 if (htc_hdr->flags & ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK)
 598                         full_len += ATH10K_HIF_MBOX_BLOCK_SIZE;
 599 
 600                 ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[i],
 601                                                     act_len,
 602                                                     full_len,
 603                                                     last_in_bundle,
 604                                                     last_in_bundle);
 605                 if (ret) {
 606                         ath10k_warn(ar, "alloc_rx_pkt error %d\n", ret);
 607                         goto err;
 608                 }
 609         }
 610 
 611         ar_sdio->n_rx_pkts = i;
 612 
 613         return 0;
 614 
 615 err:
 616         for (i = 0; i < ATH10K_SDIO_MAX_RX_MSGS; i++) {
 617                 if (!ar_sdio->rx_pkts[i].alloc_len)
 618                         break;
 619                 ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
 620         }
 621 
 622         return ret;
 623 }
 624 
 625 static int ath10k_sdio_mbox_rx_packet(struct ath10k *ar,
 626                                       struct ath10k_sdio_rx_data *pkt)
 627 {
 628         struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 629         struct sk_buff *skb = pkt->skb;
 630         struct ath10k_htc_hdr *htc_hdr;
 631         int ret;
 632 
 633         ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr,
 634                                  skb->data, pkt->alloc_len);
 635         if (ret)
 636                 goto out;
 637 
 638         
 639 
 640 
 641 
 642         htc_hdr = (struct ath10k_htc_hdr *)skb->data;
 643         pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
 644         if (pkt->act_len > pkt->alloc_len) {
 645                 ath10k_warn(ar, "rx packet too large (%zu > %zu)\n",
 646                             pkt->act_len, pkt->alloc_len);
 647                 ret = -EMSGSIZE;
 648                 goto out;
 649         }
 650 
 651         skb_put(skb, pkt->act_len);
 652 
 653 out:
 654         pkt->status = ret;
 655 
 656         return ret;
 657 }
 658 
 659 static int ath10k_sdio_mbox_rx_fetch(struct ath10k *ar)
 660 {
 661         struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 662         int ret, i;
 663 
 664         for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
 665                 ret = ath10k_sdio_mbox_rx_packet(ar,
 666                                                  &ar_sdio->rx_pkts[i]);
 667                 if (ret)
 668                         goto err;
 669         }
 670 
 671         return 0;
 672 
 673 err:
 674         
 675         for (; i < ar_sdio->n_rx_pkts; i++)
 676                 ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
 677 
 678         return ret;
 679 }
 680 
 681 
 682 
 683 
 684 
 685 
 686 #define SDIO_MBOX_PROCESSING_TIMEOUT_HZ (20 * HZ)
 687 
 688 static int ath10k_sdio_mbox_rxmsg_pending_handler(struct ath10k *ar,
 689                                                   u32 msg_lookahead, bool *done)
 690 {
 691         struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 692         u32 lookaheads[ATH10K_SDIO_MAX_RX_MSGS];
 693         int n_lookaheads = 1;
 694         unsigned long timeout;
 695         int ret;
 696 
 697         *done = true;
 698 
 699         
 700 
 701 
 702         lookaheads[0] = msg_lookahead;
 703 
 704         timeout = jiffies + SDIO_MBOX_PROCESSING_TIMEOUT_HZ;
 705         do {
 706                 
 707 
 708 
 709                 ret = ath10k_sdio_mbox_rx_alloc(ar, lookaheads,
 710                                                 n_lookaheads);
 711                 if (ret)
 712                         break;
 713 
 714                 if (ar_sdio->n_rx_pkts >= 2)
 715                         
 716 
 717 
 718                         *done = false;
 719 
 720                 ret = ath10k_sdio_mbox_rx_fetch(ar);
 721 
 722                 
 723 
 724 
 725 
 726                 n_lookaheads = 0;
 727                 ret = ath10k_sdio_mbox_rx_process_packets(ar,
 728                                                           lookaheads,
 729                                                           &n_lookaheads);
 730 
 731                 if (!n_lookaheads || ret)
 732                         break;
 733 
 734                 
 735 
 736 
 737 
 738 
 739 
 740                 *done = false;
 741         } while (time_before(jiffies, timeout));
 742 
 743         if (ret && (ret != -ECANCELED))
 744                 ath10k_warn(ar, "failed to get pending recv messages: %d\n",
 745                             ret);
 746 
 747         return ret;
 748 }
 749 
 750 static int ath10k_sdio_mbox_proc_dbg_intr(struct ath10k *ar)
 751 {
 752         u32 val;
 753         int ret;
 754 
 755         
 756         ath10k_warn(ar, "firmware crashed\n");
 757 
 758         
 759 
 760 
 761         ret = ath10k_sdio_read32(ar, MBOX_COUNT_DEC_ADDRESS, &val);
 762         if (ret)
 763                 ath10k_warn(ar, "failed to clear debug interrupt: %d\n", ret);
 764 
 765         return ret;
 766 }
 767 
 768 static int ath10k_sdio_mbox_proc_counter_intr(struct ath10k *ar)
 769 {
 770         struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 771         struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
 772         u8 counter_int_status;
 773         int ret;
 774 
 775         mutex_lock(&irq_data->mtx);
 776         counter_int_status = irq_data->irq_proc_reg->counter_int_status &
 777                              irq_data->irq_en_reg->cntr_int_status_en;
 778 
 779         
 780 
 781 
 782 
 783         if (counter_int_status & ATH10K_SDIO_TARGET_DEBUG_INTR_MASK)
 784                 ret = ath10k_sdio_mbox_proc_dbg_intr(ar);
 785         else
 786                 ret = 0;
 787 
 788         mutex_unlock(&irq_data->mtx);
 789 
 790         return ret;
 791 }
 792 
 793 static int ath10k_sdio_mbox_proc_err_intr(struct ath10k *ar)
 794 {
 795         struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 796         struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
 797         u8 error_int_status;
 798         int ret;
 799 
 800         ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio error interrupt\n");
 801 
 802         error_int_status = irq_data->irq_proc_reg->error_int_status & 0x0F;
 803         if (!error_int_status) {
 804                 ath10k_warn(ar, "invalid error interrupt status: 0x%x\n",
 805                             error_int_status);
 806                 return -EIO;
 807         }
 808 
 809         ath10k_dbg(ar, ATH10K_DBG_SDIO,
 810                    "sdio error_int_status 0x%x\n", error_int_status);
 811 
 812         if (FIELD_GET(MBOX_ERROR_INT_STATUS_WAKEUP_MASK,
 813                       error_int_status))
 814                 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio interrupt error wakeup\n");
 815 
 816         if (FIELD_GET(MBOX_ERROR_INT_STATUS_RX_UNDERFLOW_MASK,
 817                       error_int_status))
 818                 ath10k_warn(ar, "rx underflow interrupt error\n");
 819 
 820         if (FIELD_GET(MBOX_ERROR_INT_STATUS_TX_OVERFLOW_MASK,
 821                       error_int_status))
 822                 ath10k_warn(ar, "tx overflow interrupt error\n");
 823 
 824         
 825         irq_data->irq_proc_reg->error_int_status &= ~error_int_status;
 826 
 827         
 828         ret = ath10k_sdio_writesb32(ar, MBOX_ERROR_INT_STATUS_ADDRESS,
 829                                     error_int_status);
 830         if (ret) {
 831                 ath10k_warn(ar, "unable to write to error int status address: %d\n",
 832                             ret);
 833                 return ret;
 834         }
 835 
 836         return 0;
 837 }
 838 
 839 static int ath10k_sdio_mbox_proc_cpu_intr(struct ath10k *ar)
 840 {
 841         struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 842         struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
 843         u8 cpu_int_status;
 844         int ret;
 845 
 846         mutex_lock(&irq_data->mtx);
 847         cpu_int_status = irq_data->irq_proc_reg->cpu_int_status &
 848                          irq_data->irq_en_reg->cpu_int_status_en;
 849         if (!cpu_int_status) {
 850                 ath10k_warn(ar, "CPU interrupt status is zero\n");
 851                 ret = -EIO;
 852                 goto out;
 853         }
 854 
 855         
 856         irq_data->irq_proc_reg->cpu_int_status &= ~cpu_int_status;
 857 
 858         
 859 
 860 
 861 
 862 
 863 
 864 
 865         ret = ath10k_sdio_writesb32(ar, MBOX_CPU_INT_STATUS_ADDRESS,
 866                                     cpu_int_status);
 867         if (ret) {
 868                 ath10k_warn(ar, "unable to write to cpu interrupt status address: %d\n",
 869                             ret);
 870                 goto out;
 871         }
 872 
 873 out:
 874         mutex_unlock(&irq_data->mtx);
 875         if (cpu_int_status & MBOX_CPU_STATUS_ENABLE_ASSERT_MASK) {
 876                 ath10k_err(ar, "firmware crashed!\n");
 877                 queue_work(ar->workqueue, &ar->restart_work);
 878         }
 879         return ret;
 880 }
 881 
 882 static int ath10k_sdio_mbox_read_int_status(struct ath10k *ar,
 883                                             u8 *host_int_status,
 884                                             u32 *lookahead)
 885 {
 886         struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 887         struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
 888         struct ath10k_sdio_irq_proc_regs *irq_proc_reg = irq_data->irq_proc_reg;
 889         struct ath10k_sdio_irq_enable_regs *irq_en_reg = irq_data->irq_en_reg;
 890         u8 htc_mbox = FIELD_PREP(ATH10K_HTC_MAILBOX_MASK, 1);
 891         int ret;
 892 
 893         mutex_lock(&irq_data->mtx);
 894 
 895         *lookahead = 0;
 896         *host_int_status = 0;
 897 
 898         
 899 
 900 
 901 
 902 
 903 
 904         if (!irq_en_reg->int_status_en) {
 905                 ret = 0;
 906                 goto out;
 907         }
 908 
 909         
 910 
 911 
 912 
 913 
 914         ret = ath10k_sdio_read(ar, MBOX_HOST_INT_STATUS_ADDRESS,
 915                                irq_proc_reg, sizeof(*irq_proc_reg));
 916         if (ret)
 917                 goto out;
 918 
 919         
 920         *host_int_status = irq_proc_reg->host_int_status &
 921                            irq_en_reg->int_status_en;
 922 
 923         
 924         if (!(*host_int_status & htc_mbox)) {
 925                 *lookahead = 0;
 926                 ret = 0;
 927                 goto out;
 928         }
 929 
 930         
 931 
 932 
 933         *host_int_status &= ~htc_mbox;
 934         if (irq_proc_reg->rx_lookahead_valid & htc_mbox) {
 935                 *lookahead = le32_to_cpu(
 936                         irq_proc_reg->rx_lookahead[ATH10K_HTC_MAILBOX]);
 937                 if (!*lookahead)
 938                         ath10k_warn(ar, "sdio mbox lookahead is zero\n");
 939         }
 940 
 941 out:
 942         mutex_unlock(&irq_data->mtx);
 943         return ret;
 944 }
 945 
 946 static int ath10k_sdio_mbox_proc_pending_irqs(struct ath10k *ar,
 947                                               bool *done)
 948 {
 949         u8 host_int_status;
 950         u32 lookahead;
 951         int ret;
 952 
 953         
 954 
 955 
 956 
 957 
 958 
 959         ret = ath10k_sdio_mbox_read_int_status(ar,
 960                                                &host_int_status,
 961                                                &lookahead);
 962         if (ret) {
 963                 *done = true;
 964                 goto out;
 965         }
 966 
 967         if (!host_int_status && !lookahead) {
 968                 ret = 0;
 969                 *done = true;
 970                 goto out;
 971         }
 972 
 973         if (lookahead) {
 974                 ath10k_dbg(ar, ATH10K_DBG_SDIO,
 975                            "sdio pending mailbox msg lookahead 0x%08x\n",
 976                            lookahead);
 977 
 978                 ret = ath10k_sdio_mbox_rxmsg_pending_handler(ar,
 979                                                              lookahead,
 980                                                              done);
 981                 if (ret)
 982                         goto out;
 983         }
 984 
 985         
 986         ath10k_dbg(ar, ATH10K_DBG_SDIO,
 987                    "sdio host_int_status 0x%x\n", host_int_status);
 988 
 989         if (FIELD_GET(MBOX_HOST_INT_STATUS_CPU_MASK, host_int_status)) {
 990                 
 991                 ret = ath10k_sdio_mbox_proc_cpu_intr(ar);
 992                 if (ret)
 993                         goto out;
 994         }
 995 
 996         if (FIELD_GET(MBOX_HOST_INT_STATUS_ERROR_MASK, host_int_status)) {
 997                 
 998                 ret = ath10k_sdio_mbox_proc_err_intr(ar);
 999                 if (ret)
1000                         goto out;
1001         }
1002 
1003         if (FIELD_GET(MBOX_HOST_INT_STATUS_COUNTER_MASK, host_int_status))
1004                 
1005                 ret = ath10k_sdio_mbox_proc_counter_intr(ar);
1006 
1007         ret = 0;
1008 
1009 out:
1010         
1011 
1012 
1013 
1014 
1015 
1016 
1017 
1018 
1019 
1020 
1021 
1022         ath10k_dbg(ar, ATH10K_DBG_SDIO,
1023                    "sdio pending irqs done %d status %d",
1024                    *done, ret);
1025 
1026         return ret;
1027 }
1028 
1029 static void ath10k_sdio_set_mbox_info(struct ath10k *ar)
1030 {
1031         struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1032         struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info;
1033         u16 device = ar_sdio->func->device, dev_id_base, dev_id_chiprev;
1034 
1035         mbox_info->htc_addr = ATH10K_HIF_MBOX_BASE_ADDR;
1036         mbox_info->block_size = ATH10K_HIF_MBOX_BLOCK_SIZE;
1037         mbox_info->block_mask = ATH10K_HIF_MBOX_BLOCK_SIZE - 1;
1038         mbox_info->gmbox_addr = ATH10K_HIF_GMBOX_BASE_ADDR;
1039         mbox_info->gmbox_sz = ATH10K_HIF_GMBOX_WIDTH;
1040 
1041         mbox_info->ext_info[0].htc_ext_addr = ATH10K_HIF_MBOX0_EXT_BASE_ADDR;
1042 
1043         dev_id_base = FIELD_GET(QCA_MANUFACTURER_ID_BASE, device);
1044         dev_id_chiprev = FIELD_GET(QCA_MANUFACTURER_ID_REV_MASK, device);
1045         switch (dev_id_base) {
1046         case QCA_MANUFACTURER_ID_AR6005_BASE:
1047                 if (dev_id_chiprev < 4)
1048                         mbox_info->ext_info[0].htc_ext_sz =
1049                                 ATH10K_HIF_MBOX0_EXT_WIDTH;
1050                 else
1051                         
1052 
1053 
1054                         mbox_info->ext_info[0].htc_ext_sz =
1055                                 ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0;
1056                 break;
1057         case QCA_MANUFACTURER_ID_QCA9377_BASE:
1058                 mbox_info->ext_info[0].htc_ext_sz =
1059                         ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0;
1060                 break;
1061         default:
1062                 mbox_info->ext_info[0].htc_ext_sz =
1063                                 ATH10K_HIF_MBOX0_EXT_WIDTH;
1064         }
1065 
1066         mbox_info->ext_info[1].htc_ext_addr =
1067                 mbox_info->ext_info[0].htc_ext_addr +
1068                 mbox_info->ext_info[0].htc_ext_sz +
1069                 ATH10K_HIF_MBOX_DUMMY_SPACE_SIZE;
1070         mbox_info->ext_info[1].htc_ext_sz = ATH10K_HIF_MBOX1_EXT_WIDTH;
1071 }
1072 
1073 
1074 
1075 static int ath10k_sdio_bmi_credits(struct ath10k *ar)
1076 {
1077         u32 addr, cmd_credits;
1078         unsigned long timeout;
1079         int ret;
1080 
1081         
1082         addr = MBOX_COUNT_DEC_ADDRESS + ATH10K_HIF_MBOX_NUM_MAX * 4;
1083         timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1084         cmd_credits = 0;
1085 
1086         while (time_before(jiffies, timeout) && !cmd_credits) {
1087                 
1088 
1089 
1090 
1091 
1092                 ret = ath10k_sdio_read32(ar, addr, &cmd_credits);
1093                 if (ret) {
1094                         ath10k_warn(ar,
1095                                     "unable to decrement the command credit count register: %d\n",
1096                                     ret);
1097                         return ret;
1098                 }
1099 
1100                 
1101 
1102 
1103                 cmd_credits &= 0xFF;
1104         }
1105 
1106         if (!cmd_credits) {
1107                 ath10k_warn(ar, "bmi communication timeout\n");
1108                 return -ETIMEDOUT;
1109         }
1110 
1111         return 0;
1112 }
1113 
1114 static int ath10k_sdio_bmi_get_rx_lookahead(struct ath10k *ar)
1115 {
1116         unsigned long timeout;
1117         u32 rx_word;
1118         int ret;
1119 
1120         timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1121         rx_word = 0;
1122 
1123         while ((time_before(jiffies, timeout)) && !rx_word) {
1124                 ret = ath10k_sdio_read32(ar,
1125                                          MBOX_HOST_INT_STATUS_ADDRESS,
1126                                          &rx_word);
1127                 if (ret) {
1128                         ath10k_warn(ar, "unable to read RX_LOOKAHEAD_VALID: %d\n", ret);
1129                         return ret;
1130                 }
1131 
1132                  
1133                 rx_word &= 1;
1134         }
1135 
1136         if (!rx_word) {
1137                 ath10k_warn(ar, "bmi_recv_buf FIFO empty\n");
1138                 return -EINVAL;
1139         }
1140 
1141         return ret;
1142 }
1143 
1144 static int ath10k_sdio_bmi_exchange_msg(struct ath10k *ar,
1145                                         void *req, u32 req_len,
1146                                         void *resp, u32 *resp_len)
1147 {
1148         struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1149         u32 addr;
1150         int ret;
1151 
1152         if (req) {
1153                 ret = ath10k_sdio_bmi_credits(ar);
1154                 if (ret)
1155                         return ret;
1156 
1157                 addr = ar_sdio->mbox_info.htc_addr;
1158 
1159                 memcpy(ar_sdio->bmi_buf, req, req_len);
1160                 ret = ath10k_sdio_write(ar, addr, ar_sdio->bmi_buf, req_len);
1161                 if (ret) {
1162                         ath10k_warn(ar,
1163                                     "unable to send the bmi data to the device: %d\n",
1164                                     ret);
1165                         return ret;
1166                 }
1167         }
1168 
1169         if (!resp || !resp_len)
1170                 
1171                 return 0;
1172 
1173         
1174 
1175 
1176 
1177 
1178 
1179 
1180 
1181 
1182 
1183 
1184 
1185 
1186 
1187 
1188 
1189 
1190 
1191 
1192 
1193 
1194 
1195 
1196 
1197 
1198 
1199 
1200 
1201 
1202 
1203 
1204 
1205 
1206 
1207 
1208 
1209 
1210 
1211 
1212 
1213 
1214 
1215 
1216 
1217 
1218         ret = ath10k_sdio_bmi_get_rx_lookahead(ar);
1219         if (ret)
1220                 return ret;
1221 
1222         
1223         addr = ar_sdio->mbox_info.htc_addr;
1224         ret = ath10k_sdio_read(ar, addr, ar_sdio->bmi_buf, *resp_len);
1225         if (ret) {
1226                 ath10k_warn(ar,
1227                             "unable to read the bmi data from the device: %d\n",
1228                             ret);
1229                 return ret;
1230         }
1231 
1232         memcpy(resp, ar_sdio->bmi_buf, *resp_len);
1233 
1234         return 0;
1235 }
1236 
1237 
1238 
1239 static struct ath10k_sdio_bus_request
1240 *ath10k_sdio_alloc_busreq(struct ath10k *ar)
1241 {
1242         struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1243         struct ath10k_sdio_bus_request *bus_req;
1244 
1245         spin_lock_bh(&ar_sdio->lock);
1246 
1247         if (list_empty(&ar_sdio->bus_req_freeq)) {
1248                 bus_req = NULL;
1249                 goto out;
1250         }
1251 
1252         bus_req = list_first_entry(&ar_sdio->bus_req_freeq,
1253                                    struct ath10k_sdio_bus_request, list);
1254         list_del(&bus_req->list);
1255 
1256 out:
1257         spin_unlock_bh(&ar_sdio->lock);
1258         return bus_req;
1259 }
1260 
1261 static void ath10k_sdio_free_bus_req(struct ath10k *ar,
1262                                      struct ath10k_sdio_bus_request *bus_req)
1263 {
1264         struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1265 
1266         memset(bus_req, 0, sizeof(*bus_req));
1267 
1268         spin_lock_bh(&ar_sdio->lock);
1269         list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
1270         spin_unlock_bh(&ar_sdio->lock);
1271 }
1272 
1273 static void __ath10k_sdio_write_async(struct ath10k *ar,
1274                                       struct ath10k_sdio_bus_request *req)
1275 {
1276         struct ath10k_htc_ep *ep;
1277         struct sk_buff *skb;
1278         int ret;
1279 
1280         skb = req->skb;
1281         ret = ath10k_sdio_write(ar, req->address, skb->data, skb->len);
1282         if (ret)
1283                 ath10k_warn(ar, "failed to write skb to 0x%x asynchronously: %d",
1284                             req->address, ret);
1285 
1286         if (req->htc_msg) {
1287                 ep = &ar->htc.endpoint[req->eid];
1288                 ath10k_htc_notify_tx_completion(ep, skb);
1289         } else if (req->comp) {
1290                 complete(req->comp);
1291         }
1292 
1293         ath10k_sdio_free_bus_req(ar, req);
1294 }
1295 
1296 static void ath10k_sdio_write_async_work(struct work_struct *work)
1297 {
1298         struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,
1299                                                    wr_async_work);
1300         struct ath10k *ar = ar_sdio->ar;
1301         struct ath10k_sdio_bus_request *req, *tmp_req;
1302 
1303         spin_lock_bh(&ar_sdio->wr_async_lock);
1304 
1305         list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
1306                 list_del(&req->list);
1307                 spin_unlock_bh(&ar_sdio->wr_async_lock);
1308                 __ath10k_sdio_write_async(ar, req);
1309                 spin_lock_bh(&ar_sdio->wr_async_lock);
1310         }
1311 
1312         spin_unlock_bh(&ar_sdio->wr_async_lock);
1313 }
1314 
1315 static int ath10k_sdio_prep_async_req(struct ath10k *ar, u32 addr,
1316                                       struct sk_buff *skb,
1317                                       struct completion *comp,
1318                                       bool htc_msg, enum ath10k_htc_ep_id eid)
1319 {
1320         struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1321         struct ath10k_sdio_bus_request *bus_req;
1322 
1323         
1324 
1325 
1326         bus_req = ath10k_sdio_alloc_busreq(ar);
1327         if (!bus_req) {
1328                 ath10k_warn(ar,
1329                             "unable to allocate bus request for async request\n");
1330                 return -ENOMEM;
1331         }
1332 
1333         bus_req->skb = skb;
1334         bus_req->eid = eid;
1335         bus_req->address = addr;
1336         bus_req->htc_msg = htc_msg;
1337         bus_req->comp = comp;
1338 
1339         spin_lock_bh(&ar_sdio->wr_async_lock);
1340         list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
1341         spin_unlock_bh(&ar_sdio->wr_async_lock);
1342 
1343         return 0;
1344 }
1345 
1346 
1347 
1348 static void ath10k_sdio_irq_handler(struct sdio_func *func)
1349 {
1350         struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
1351         struct ath10k *ar = ar_sdio->ar;
1352         unsigned long timeout;
1353         bool done = false;
1354         int ret;
1355 
1356         
1357 
1358 
1359         sdio_release_host(ar_sdio->func);
1360 
1361         timeout = jiffies + ATH10K_SDIO_HIF_COMMUNICATION_TIMEOUT_HZ;
1362         do {
1363                 ret = ath10k_sdio_mbox_proc_pending_irqs(ar, &done);
1364                 if (ret)
1365                         break;
1366         } while (time_before(jiffies, timeout) && !done);
1367 
1368         ath10k_mac_tx_push_pending(ar);
1369 
1370         sdio_claim_host(ar_sdio->func);
1371 
1372         if (ret && ret != -ECANCELED)
1373                 ath10k_warn(ar, "failed to process pending SDIO interrupts: %d\n",
1374                             ret);
1375 }
1376 
1377 
1378 
1379 static int ath10k_sdio_hif_disable_intrs(struct ath10k *ar)
1380 {
1381         struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1382         struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
1383         struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
1384         int ret;
1385 
1386         mutex_lock(&irq_data->mtx);
1387 
1388         memset(regs, 0, sizeof(*regs));
1389         ret = ath10k_sdio_write(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
1390                                 ®s->int_status_en, sizeof(*regs));
1391         if (ret)
1392                 ath10k_warn(ar, "unable to disable sdio interrupts: %d\n", ret);
1393 
1394         mutex_unlock(&irq_data->mtx);
1395 
1396         return ret;
1397 }
1398 
1399 static int ath10k_sdio_hif_power_up(struct ath10k *ar,
1400                                     enum ath10k_firmware_mode fw_mode)
1401 {
1402         struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1403         struct sdio_func *func = ar_sdio->func;
1404         int ret;
1405 
1406         if (!ar_sdio->is_disabled)
1407                 return 0;
1408 
1409         ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power on\n");
1410 
1411         ret = ath10k_sdio_config(ar);
1412         if (ret) {
1413                 ath10k_err(ar, "failed to config sdio: %d\n", ret);
1414                 return ret;
1415         }
1416 
1417         sdio_claim_host(func);
1418 
1419         ret = sdio_enable_func(func);
1420         if (ret) {
1421                 ath10k_warn(ar, "unable to enable sdio function: %d)\n", ret);
1422                 sdio_release_host(func);
1423                 return ret;
1424         }
1425 
1426         sdio_release_host(func);
1427 
1428         
1429 
1430 
1431         msleep(20);
1432 
1433         ar_sdio->is_disabled = false;
1434 
1435         ret = ath10k_sdio_hif_disable_intrs(ar);
1436         if (ret)
1437                 return ret;
1438 
1439         return 0;
1440 }
1441 
1442 static void ath10k_sdio_hif_power_down(struct ath10k *ar)
1443 {
1444         struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1445         int ret;
1446 
1447         if (ar_sdio->is_disabled)
1448                 return;
1449 
1450         ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power off\n");
1451 
1452         
1453         sdio_claim_host(ar_sdio->func);
1454 
1455         ret = sdio_disable_func(ar_sdio->func);
1456         if (ret) {
1457                 ath10k_warn(ar, "unable to disable sdio function: %d\n", ret);
1458                 sdio_release_host(ar_sdio->func);
1459                 return;
1460         }
1461 
1462         ret = mmc_hw_reset(ar_sdio->func->card->host);
1463         if (ret)
1464                 ath10k_warn(ar, "unable to reset sdio: %d\n", ret);
1465 
1466         sdio_release_host(ar_sdio->func);
1467 
1468         ar_sdio->is_disabled = true;
1469 }
1470 
1471 static int ath10k_sdio_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
1472                                  struct ath10k_hif_sg_item *items, int n_items)
1473 {
1474         struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1475         enum ath10k_htc_ep_id eid;
1476         struct sk_buff *skb;
1477         int ret, i;
1478 
1479         eid = pipe_id_to_eid(pipe_id);
1480 
1481         for (i = 0; i < n_items; i++) {
1482                 size_t padded_len;
1483                 u32 address;
1484 
1485                 skb = items[i].transfer_context;
1486                 padded_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio,
1487                                                               skb->len);
1488                 skb_trim(skb, padded_len);
1489 
1490                 
1491                 address = ar_sdio->mbox_addr[eid] + ar_sdio->mbox_size[eid] -
1492                           skb->len;
1493                 ret = ath10k_sdio_prep_async_req(ar, address, skb,
1494                                                  NULL, true, eid);
1495                 if (ret)
1496                         return ret;
1497         }
1498 
1499         queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
1500 
1501         return 0;
1502 }
1503 
1504 static int ath10k_sdio_hif_enable_intrs(struct ath10k *ar)
1505 {
1506         struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1507         struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
1508         struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
1509         int ret;
1510 
1511         mutex_lock(&irq_data->mtx);
1512 
1513         
1514         regs->int_status_en = FIELD_PREP(MBOX_INT_STATUS_ENABLE_ERROR_MASK, 1) |
1515                               FIELD_PREP(MBOX_INT_STATUS_ENABLE_CPU_MASK, 1) |
1516                               FIELD_PREP(MBOX_INT_STATUS_ENABLE_COUNTER_MASK, 1);
1517 
1518         
1519 
1520 
1521         regs->int_status_en |=
1522                 FIELD_PREP(MBOX_INT_STATUS_ENABLE_MBOX_DATA_MASK, 1);
1523 
1524         
1525 
1526 
1527         regs->cpu_int_status_en = FIELD_PREP(MBOX_CPU_STATUS_ENABLE_ASSERT_MASK, 1);
1528 
1529         
1530         regs->err_int_status_en =
1531                 FIELD_PREP(MBOX_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK, 1) |
1532                 FIELD_PREP(MBOX_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK, 1);
1533 
1534         
1535 
1536 
1537         regs->cntr_int_status_en =
1538                 FIELD_PREP(MBOX_COUNTER_INT_STATUS_ENABLE_BIT_MASK,
1539                            ATH10K_SDIO_TARGET_DEBUG_INTR_MASK);
1540 
1541         ret = ath10k_sdio_write(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
1542                                 ®s->int_status_en, sizeof(*regs));
1543         if (ret)
1544                 ath10k_warn(ar,
1545                             "failed to update mbox interrupt status register : %d\n",
1546                             ret);
1547 
1548         mutex_unlock(&irq_data->mtx);
1549         return ret;
1550 }
1551 
1552 static int ath10k_sdio_hif_set_mbox_sleep(struct ath10k *ar, bool enable_sleep)
1553 {
1554         u32 val;
1555         int ret;
1556 
1557         ret = ath10k_sdio_read32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, &val);
1558         if (ret) {
1559                 ath10k_warn(ar, "failed to read fifo/chip control register: %d\n",
1560                             ret);
1561                 return ret;
1562         }
1563 
1564         if (enable_sleep)
1565                 val &= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF;
1566         else
1567                 val |= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON;
1568 
1569         ret = ath10k_sdio_write32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, val);
1570         if (ret) {
1571                 ath10k_warn(ar, "failed to write to FIFO_TIMEOUT_AND_CHIP_CONTROL: %d",
1572                             ret);
1573                 return ret;
1574         }
1575 
1576         return 0;
1577 }
1578 
1579 
1580 
1581 static int ath10k_sdio_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
1582                                      size_t buf_len)
1583 {
1584         int ret;
1585 
1586         
1587         ret = ath10k_sdio_write32(ar, MBOX_WINDOW_READ_ADDR_ADDRESS, address);
1588         if (ret) {
1589                 ath10k_warn(ar, "failed to set mbox window read address: %d", ret);
1590                 return ret;
1591         }
1592 
1593         
1594         ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, buf, buf_len);
1595         if (ret) {
1596                 ath10k_warn(ar, "failed to read from mbox window data address: %d\n",
1597                             ret);
1598                 return ret;
1599         }
1600 
1601         return 0;
1602 }
1603 
1604 static int ath10k_sdio_hif_diag_read32(struct ath10k *ar, u32 address,
1605                                        u32 *value)
1606 {
1607         __le32 *val;
1608         int ret;
1609 
1610         val = kzalloc(sizeof(*val), GFP_KERNEL);
1611         if (!val)
1612                 return -ENOMEM;
1613 
1614         ret = ath10k_sdio_hif_diag_read(ar, address, val, sizeof(*val));
1615         if (ret)
1616                 goto out;
1617 
1618         *value = __le32_to_cpu(*val);
1619 
1620 out:
1621         kfree(val);
1622 
1623         return ret;
1624 }
1625 
1626 static int ath10k_sdio_hif_diag_write_mem(struct ath10k *ar, u32 address,
1627                                           const void *data, int nbytes)
1628 {
1629         int ret;
1630 
1631         
1632         ret = ath10k_sdio_write(ar, MBOX_WINDOW_DATA_ADDRESS, data, nbytes);
1633         if (ret) {
1634                 ath10k_warn(ar,
1635                             "failed to write 0x%p to mbox window data address: %d\n",
1636                             data, ret);
1637                 return ret;
1638         }
1639 
1640         
1641         ret = ath10k_sdio_write32(ar, MBOX_WINDOW_WRITE_ADDR_ADDRESS, address);
1642         if (ret) {
1643                 ath10k_warn(ar, "failed to set mbox window write address: %d", ret);
1644                 return ret;
1645         }
1646 
1647         return 0;
1648 }
1649 
1650 static int ath10k_sdio_hif_swap_mailbox(struct ath10k *ar)
1651 {
1652         struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1653         u32 addr, val;
1654         int ret = 0;
1655 
1656         addr = host_interest_item_address(HI_ITEM(hi_acs_flags));
1657 
1658         ret = ath10k_sdio_hif_diag_read32(ar, addr, &val);
1659         if (ret) {
1660                 ath10k_warn(ar, "unable to read hi_acs_flags : %d\n", ret);
1661                 return ret;
1662         }
1663 
1664         if (val & HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_FW_ACK) {
1665                 ath10k_dbg(ar, ATH10K_DBG_SDIO,
1666                            "sdio mailbox swap service enabled\n");
1667                 ar_sdio->swap_mbox = true;
1668         } else {
1669                 ath10k_dbg(ar, ATH10K_DBG_SDIO,
1670                            "sdio mailbox swap service disabled\n");
1671                 ar_sdio->swap_mbox = false;
1672         }
1673 
1674         return 0;
1675 }
1676 
1677 
1678 
1679 static int ath10k_sdio_hif_start(struct ath10k *ar)
1680 {
1681         struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1682         int ret;
1683 
1684         
1685 
1686 
1687 
1688         msleep(20);
1689         ret = ath10k_sdio_hif_disable_intrs(ar);
1690         if (ret)
1691                 return ret;
1692 
1693         
1694 
1695 
1696         ar_sdio->mbox_addr[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
1697         ar_sdio->mbox_size[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
1698 
1699         sdio_claim_host(ar_sdio->func);
1700 
1701         
1702         ret =  sdio_claim_irq(ar_sdio->func, ath10k_sdio_irq_handler);
1703         if (ret) {
1704                 ath10k_warn(ar, "failed to claim sdio interrupt: %d\n", ret);
1705                 sdio_release_host(ar_sdio->func);
1706                 return ret;
1707         }
1708 
1709         sdio_release_host(ar_sdio->func);
1710 
1711         ret = ath10k_sdio_hif_enable_intrs(ar);
1712         if (ret)
1713                 ath10k_warn(ar, "failed to enable sdio interrupts: %d\n", ret);
1714 
1715         
1716         ret = ath10k_sdio_hif_set_mbox_sleep(ar, true);
1717         if (ret)
1718                 return ret;
1719 
1720         
1721         msleep(20);
1722 
1723         ret = ath10k_sdio_hif_set_mbox_sleep(ar, false);
1724         if (ret)
1725                 return ret;
1726 
1727         return 0;
1728 }
1729 
1730 #define SDIO_IRQ_DISABLE_TIMEOUT_HZ (3 * HZ)
1731 
1732 static void ath10k_sdio_irq_disable(struct ath10k *ar)
1733 {
1734         struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1735         struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
1736         struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
1737         struct sk_buff *skb;
1738         struct completion irqs_disabled_comp;
1739         int ret;
1740 
1741         skb = dev_alloc_skb(sizeof(*regs));
1742         if (!skb)
1743                 return;
1744 
1745         mutex_lock(&irq_data->mtx);
1746 
1747         memset(regs, 0, sizeof(*regs)); 
1748         memcpy(skb->data, regs, sizeof(*regs));
1749         skb_put(skb, sizeof(*regs));
1750 
1751         mutex_unlock(&irq_data->mtx);
1752 
1753         init_completion(&irqs_disabled_comp);
1754         ret = ath10k_sdio_prep_async_req(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
1755                                          skb, &irqs_disabled_comp, false, 0);
1756         if (ret)
1757                 goto out;
1758 
1759         queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
1760 
1761         
1762 
1763 
1764         ret = wait_for_completion_timeout(&irqs_disabled_comp,
1765                                           SDIO_IRQ_DISABLE_TIMEOUT_HZ);
1766         if (!ret)
1767                 ath10k_warn(ar, "sdio irq disable request timed out\n");
1768 
1769         sdio_claim_host(ar_sdio->func);
1770 
1771         ret = sdio_release_irq(ar_sdio->func);
1772         if (ret)
1773                 ath10k_warn(ar, "failed to release sdio interrupt: %d\n", ret);
1774 
1775         sdio_release_host(ar_sdio->func);
1776 
1777 out:
1778         kfree_skb(skb);
1779 }
1780 
1781 static void ath10k_sdio_hif_stop(struct ath10k *ar)
1782 {
1783         struct ath10k_sdio_bus_request *req, *tmp_req;
1784         struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1785 
1786         ath10k_sdio_irq_disable(ar);
1787 
1788         cancel_work_sync(&ar_sdio->wr_async_work);
1789 
1790         spin_lock_bh(&ar_sdio->wr_async_lock);
1791 
1792         
1793         list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
1794                 struct ath10k_htc_ep *ep;
1795 
1796                 list_del(&req->list);
1797 
1798                 if (req->htc_msg) {
1799                         ep = &ar->htc.endpoint[req->eid];
1800                         ath10k_htc_notify_tx_completion(ep, req->skb);
1801                 } else if (req->skb) {
1802                         kfree_skb(req->skb);
1803                 }
1804                 ath10k_sdio_free_bus_req(ar, req);
1805         }
1806 
1807         spin_unlock_bh(&ar_sdio->wr_async_lock);
1808 }
1809 
1810 #ifdef CONFIG_PM
1811 
1812 static int ath10k_sdio_hif_suspend(struct ath10k *ar)
1813 {
1814         return -EOPNOTSUPP;
1815 }
1816 
1817 static int ath10k_sdio_hif_resume(struct ath10k *ar)
1818 {
1819         switch (ar->state) {
1820         case ATH10K_STATE_OFF:
1821                 ath10k_dbg(ar, ATH10K_DBG_SDIO,
1822                            "sdio resume configuring sdio\n");
1823 
1824                 
1825                 ath10k_sdio_config(ar);
1826                 break;
1827 
1828         case ATH10K_STATE_ON:
1829         default:
1830                 break;
1831         }
1832 
1833         return 0;
1834 }
1835 #endif
1836 
1837 static int ath10k_sdio_hif_map_service_to_pipe(struct ath10k *ar,
1838                                                u16 service_id,
1839                                                u8 *ul_pipe, u8 *dl_pipe)
1840 {
1841         struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1842         struct ath10k_htc *htc = &ar->htc;
1843         u32 htt_addr, wmi_addr, htt_mbox_size, wmi_mbox_size;
1844         enum ath10k_htc_ep_id eid;
1845         bool ep_found = false;
1846         int i;
1847 
1848         
1849 
1850 
1851 
1852 
1853         for (i = 0; i < ATH10K_HTC_EP_COUNT; i++) {
1854                 if (htc->endpoint[i].service_id == service_id) {
1855                         eid = htc->endpoint[i].eid;
1856                         ep_found = true;
1857                         break;
1858                 }
1859         }
1860 
1861         if (!ep_found)
1862                 return -EINVAL;
1863 
1864         
1865 
1866 
1867         *ul_pipe = *dl_pipe = (u8)eid;
1868 
1869         
1870 
1871 
1872 
1873 
1874         if (ar_sdio->swap_mbox) {
1875                 htt_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
1876                 wmi_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr;
1877                 htt_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
1878                 wmi_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz;
1879         } else {
1880                 htt_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr;
1881                 wmi_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
1882                 htt_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz;
1883                 wmi_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
1884         }
1885 
1886         switch (service_id) {
1887         case ATH10K_HTC_SVC_ID_RSVD_CTRL:
1888                 
1889 
1890 
1891                 break;
1892         case ATH10K_HTC_SVC_ID_WMI_CONTROL:
1893                 ar_sdio->mbox_addr[eid] = wmi_addr;
1894                 ar_sdio->mbox_size[eid] = wmi_mbox_size;
1895                 ath10k_dbg(ar, ATH10K_DBG_SDIO,
1896                            "sdio wmi ctrl mbox_addr 0x%x mbox_size %d\n",
1897                            ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]);
1898                 break;
1899         case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
1900                 ar_sdio->mbox_addr[eid] = htt_addr;
1901                 ar_sdio->mbox_size[eid] = htt_mbox_size;
1902                 ath10k_dbg(ar, ATH10K_DBG_SDIO,
1903                            "sdio htt data mbox_addr 0x%x mbox_size %d\n",
1904                            ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]);
1905                 break;
1906         default:
1907                 ath10k_warn(ar, "unsupported HTC service id: %d\n",
1908                             service_id);
1909                 return -EINVAL;
1910         }
1911 
1912         return 0;
1913 }
1914 
1915 static void ath10k_sdio_hif_get_default_pipe(struct ath10k *ar,
1916                                              u8 *ul_pipe, u8 *dl_pipe)
1917 {
1918         ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio hif get default pipe\n");
1919 
1920         
1921 
1922 
1923         *ul_pipe = 0;
1924         *dl_pipe = 0;
1925 }
1926 
1927 
1928 
1929 
1930 
1931 
1932 
1933 static void ath10k_sdio_hif_send_complete_check(struct ath10k *ar,
1934                                                 u8 pipe, int force)
1935 {
1936 }
1937 
1938 static const struct ath10k_hif_ops ath10k_sdio_hif_ops = {
1939         .tx_sg                  = ath10k_sdio_hif_tx_sg,
1940         .diag_read              = ath10k_sdio_hif_diag_read,
1941         .diag_write             = ath10k_sdio_hif_diag_write_mem,
1942         .exchange_bmi_msg       = ath10k_sdio_bmi_exchange_msg,
1943         .start                  = ath10k_sdio_hif_start,
1944         .stop                   = ath10k_sdio_hif_stop,
1945         .swap_mailbox           = ath10k_sdio_hif_swap_mailbox,
1946         .map_service_to_pipe    = ath10k_sdio_hif_map_service_to_pipe,
1947         .get_default_pipe       = ath10k_sdio_hif_get_default_pipe,
1948         .send_complete_check    = ath10k_sdio_hif_send_complete_check,
1949         .power_up               = ath10k_sdio_hif_power_up,
1950         .power_down             = ath10k_sdio_hif_power_down,
1951 #ifdef CONFIG_PM
1952         .suspend                = ath10k_sdio_hif_suspend,
1953         .resume                 = ath10k_sdio_hif_resume,
1954 #endif
1955 };
1956 
1957 #ifdef CONFIG_PM_SLEEP
1958 
1959 
1960 
1961 
1962 static int ath10k_sdio_pm_suspend(struct device *device)
1963 {
1964         return 0;
1965 }
1966 
1967 static int ath10k_sdio_pm_resume(struct device *device)
1968 {
1969         return 0;
1970 }
1971 
1972 static SIMPLE_DEV_PM_OPS(ath10k_sdio_pm_ops, ath10k_sdio_pm_suspend,
1973                          ath10k_sdio_pm_resume);
1974 
1975 #define ATH10K_SDIO_PM_OPS (&ath10k_sdio_pm_ops)
1976 
1977 #else
1978 
1979 #define ATH10K_SDIO_PM_OPS NULL
1980 
1981 #endif 
1982 
1983 static int ath10k_sdio_probe(struct sdio_func *func,
1984                              const struct sdio_device_id *id)
1985 {
1986         struct ath10k_sdio *ar_sdio;
1987         struct ath10k *ar;
1988         enum ath10k_hw_rev hw_rev;
1989         u32 dev_id_base;
1990         struct ath10k_bus_params bus_params = {};
1991         int ret, i;
1992 
1993         
1994 
1995 
1996 
1997 
1998 
1999         hw_rev = ATH10K_HW_QCA6174;
2000 
2001         ar = ath10k_core_create(sizeof(*ar_sdio), &func->dev, ATH10K_BUS_SDIO,
2002                                 hw_rev, &ath10k_sdio_hif_ops);
2003         if (!ar) {
2004                 dev_err(&func->dev, "failed to allocate core\n");
2005                 return -ENOMEM;
2006         }
2007 
2008         ath10k_dbg(ar, ATH10K_DBG_BOOT,
2009                    "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
2010                    func->num, func->vendor, func->device,
2011                    func->max_blksize, func->cur_blksize);
2012 
2013         ar_sdio = ath10k_sdio_priv(ar);
2014 
2015         ar_sdio->irq_data.irq_proc_reg =
2016                 devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_proc_regs),
2017                              GFP_KERNEL);
2018         if (!ar_sdio->irq_data.irq_proc_reg) {
2019                 ret = -ENOMEM;
2020                 goto err_core_destroy;
2021         }
2022 
2023         ar_sdio->irq_data.irq_en_reg =
2024                 devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_enable_regs),
2025                              GFP_KERNEL);
2026         if (!ar_sdio->irq_data.irq_en_reg) {
2027                 ret = -ENOMEM;
2028                 goto err_core_destroy;
2029         }
2030 
2031         ar_sdio->bmi_buf = devm_kzalloc(ar->dev, BMI_MAX_CMDBUF_SIZE, GFP_KERNEL);
2032         if (!ar_sdio->bmi_buf) {
2033                 ret = -ENOMEM;
2034                 goto err_core_destroy;
2035         }
2036 
2037         ar_sdio->func = func;
2038         sdio_set_drvdata(func, ar_sdio);
2039 
2040         ar_sdio->is_disabled = true;
2041         ar_sdio->ar = ar;
2042 
2043         spin_lock_init(&ar_sdio->lock);
2044         spin_lock_init(&ar_sdio->wr_async_lock);
2045         mutex_init(&ar_sdio->irq_data.mtx);
2046 
2047         INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
2048         INIT_LIST_HEAD(&ar_sdio->wr_asyncq);
2049 
2050         INIT_WORK(&ar_sdio->wr_async_work, ath10k_sdio_write_async_work);
2051         ar_sdio->workqueue = create_singlethread_workqueue("ath10k_sdio_wq");
2052         if (!ar_sdio->workqueue) {
2053                 ret = -ENOMEM;
2054                 goto err_core_destroy;
2055         }
2056 
2057         for (i = 0; i < ATH10K_SDIO_BUS_REQUEST_MAX_NUM; i++)
2058                 ath10k_sdio_free_bus_req(ar, &ar_sdio->bus_req[i]);
2059 
2060         dev_id_base = FIELD_GET(QCA_MANUFACTURER_ID_BASE, id->device);
2061         switch (dev_id_base) {
2062         case QCA_MANUFACTURER_ID_AR6005_BASE:
2063         case QCA_MANUFACTURER_ID_QCA9377_BASE:
2064                 ar->dev_id = QCA9377_1_0_DEVICE_ID;
2065                 break;
2066         default:
2067                 ret = -ENODEV;
2068                 ath10k_err(ar, "unsupported device id %u (0x%x)\n",
2069                            dev_id_base, id->device);
2070                 goto err_free_wq;
2071         }
2072 
2073         ar->id.vendor = id->vendor;
2074         ar->id.device = id->device;
2075 
2076         ath10k_sdio_set_mbox_info(ar);
2077 
2078         bus_params.dev_type = ATH10K_DEV_TYPE_HL;
2079         
2080         bus_params.chip_id = 0;
2081         bus_params.hl_msdu_ids = true;
2082 
2083         ret = ath10k_core_register(ar, &bus_params);
2084         if (ret) {
2085                 ath10k_err(ar, "failed to register driver core: %d\n", ret);
2086                 goto err_free_wq;
2087         }
2088 
2089         
2090         ath10k_warn(ar, "WARNING: ath10k SDIO support is work-in-progress, problems may arise!\n");
2091 
2092         return 0;
2093 
2094 err_free_wq:
2095         destroy_workqueue(ar_sdio->workqueue);
2096 err_core_destroy:
2097         ath10k_core_destroy(ar);
2098 
2099         return ret;
2100 }
2101 
2102 static void ath10k_sdio_remove(struct sdio_func *func)
2103 {
2104         struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
2105         struct ath10k *ar = ar_sdio->ar;
2106 
2107         ath10k_dbg(ar, ATH10K_DBG_BOOT,
2108                    "sdio removed func %d vendor 0x%x device 0x%x\n",
2109                    func->num, func->vendor, func->device);
2110 
2111         ath10k_core_unregister(ar);
2112         ath10k_core_destroy(ar);
2113 
2114         flush_workqueue(ar_sdio->workqueue);
2115         destroy_workqueue(ar_sdio->workqueue);
2116 }
2117 
2118 static const struct sdio_device_id ath10k_sdio_devices[] = {
2119         {SDIO_DEVICE(QCA_MANUFACTURER_CODE,
2120                      (QCA_SDIO_ID_AR6005_BASE | 0xA))},
2121         {SDIO_DEVICE(QCA_MANUFACTURER_CODE,
2122                      (QCA_SDIO_ID_QCA9377_BASE | 0x1))},
2123         {},
2124 };
2125 
2126 MODULE_DEVICE_TABLE(sdio, ath10k_sdio_devices);
2127 
2128 static struct sdio_driver ath10k_sdio_driver = {
2129         .name = "ath10k_sdio",
2130         .id_table = ath10k_sdio_devices,
2131         .probe = ath10k_sdio_probe,
2132         .remove = ath10k_sdio_remove,
2133         .drv = {
2134                 .owner = THIS_MODULE,
2135                 .pm = ATH10K_SDIO_PM_OPS,
2136         },
2137 };
2138 
2139 static int __init ath10k_sdio_init(void)
2140 {
2141         int ret;
2142 
2143         ret = sdio_register_driver(&ath10k_sdio_driver);
2144         if (ret)
2145                 pr_err("sdio driver registration failed: %d\n", ret);
2146 
2147         return ret;
2148 }
2149 
2150 static void __exit ath10k_sdio_exit(void)
2151 {
2152         sdio_unregister_driver(&ath10k_sdio_driver);
2153 }
2154 
2155 module_init(ath10k_sdio_init);
2156 module_exit(ath10k_sdio_exit);
2157 
2158 MODULE_AUTHOR("Qualcomm Atheros");
2159 MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN SDIO devices");
2160 MODULE_LICENSE("Dual BSD/GPL");