root/drivers/net/wireless/mediatek/mt76/usb.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __mt76u_vendor_request
  2. mt76u_vendor_request
  3. __mt76u_rr
  4. mt76u_rr
  5. __mt76u_wr
  6. mt76u_wr
  7. mt76u_rmw
  8. mt76u_copy
  9. mt76u_single_wr
  10. mt76u_req_wr_rp
  11. mt76u_wr_rp
  12. mt76u_req_rd_rp
  13. mt76u_rd_rp
  14. mt76u_check_sg
  15. mt76u_set_endpoints
  16. mt76u_fill_rx_sg
  17. mt76u_refill_rx
  18. mt76u_urb_alloc
  19. mt76u_rx_urb_alloc
  20. mt76u_urb_free
  21. mt76u_fill_bulk_urb
  22. mt76u_get_next_rx_entry
  23. mt76u_get_rx_entry_len
  24. mt76u_build_rx_skb
  25. mt76u_process_rx_entry
  26. mt76u_complete_rx
  27. mt76u_submit_rx_buf
  28. mt76u_rx_tasklet
  29. mt76u_submit_rx_buffers
  30. mt76u_alloc_rx
  31. mt76u_free_rx
  32. mt76u_stop_rx
  33. mt76u_resume_rx
  34. mt76u_tx_tasklet
  35. mt76u_tx_status_data
  36. mt76u_complete_tx
  37. mt76u_tx_setup_buffers
  38. mt76u_tx_queue_skb
  39. mt76u_tx_kick
  40. mt76u_alloc_tx
  41. mt76u_free_tx
  42. mt76u_stop_tx
  43. mt76u_queues_deinit
  44. mt76u_alloc_queues
  45. mt76u_init

   1 // SPDX-License-Identifier: ISC
   2 /*
   3  * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
   4  */
   5 
   6 #include <linux/module.h>
   7 #include "mt76.h"
   8 #include "usb_trace.h"
   9 #include "dma.h"
  10 
  11 #define MT_VEND_REQ_MAX_RETRY   10
  12 #define MT_VEND_REQ_TOUT_MS     300
  13 
  14 static bool disable_usb_sg;
  15 module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644);
  16 MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support");
  17 
  18 /* should be called with usb_ctrl_mtx locked */
  19 static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
  20                                   u8 req_type, u16 val, u16 offset,
  21                                   void *buf, size_t len)
  22 {
  23         struct usb_interface *uintf = to_usb_interface(dev->dev);
  24         struct usb_device *udev = interface_to_usbdev(uintf);
  25         unsigned int pipe;
  26         int i, ret;
  27 
  28         pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
  29                                        : usb_sndctrlpipe(udev, 0);
  30         for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
  31                 if (test_bit(MT76_REMOVED, &dev->state))
  32                         return -EIO;
  33 
  34                 ret = usb_control_msg(udev, pipe, req, req_type, val,
  35                                       offset, buf, len, MT_VEND_REQ_TOUT_MS);
  36                 if (ret == -ENODEV)
  37                         set_bit(MT76_REMOVED, &dev->state);
  38                 if (ret >= 0 || ret == -ENODEV)
  39                         return ret;
  40                 usleep_range(5000, 10000);
  41         }
  42 
  43         dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n",
  44                 req, offset, ret);
  45         return ret;
  46 }
  47 
  48 int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
  49                          u8 req_type, u16 val, u16 offset,
  50                          void *buf, size_t len)
  51 {
  52         int ret;
  53 
  54         mutex_lock(&dev->usb.usb_ctrl_mtx);
  55         ret = __mt76u_vendor_request(dev, req, req_type,
  56                                      val, offset, buf, len);
  57         trace_usb_reg_wr(dev, offset, val);
  58         mutex_unlock(&dev->usb.usb_ctrl_mtx);
  59 
  60         return ret;
  61 }
  62 EXPORT_SYMBOL_GPL(mt76u_vendor_request);
  63 
  64 /* should be called with usb_ctrl_mtx locked */
  65 static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
  66 {
  67         struct mt76_usb *usb = &dev->usb;
  68         u32 data = ~0;
  69         u16 offset;
  70         int ret;
  71         u8 req;
  72 
  73         switch (addr & MT_VEND_TYPE_MASK) {
  74         case MT_VEND_TYPE_EEPROM:
  75                 req = MT_VEND_READ_EEPROM;
  76                 break;
  77         case MT_VEND_TYPE_CFG:
  78                 req = MT_VEND_READ_CFG;
  79                 break;
  80         default:
  81                 req = MT_VEND_MULTI_READ;
  82                 break;
  83         }
  84         offset = addr & ~MT_VEND_TYPE_MASK;
  85 
  86         ret = __mt76u_vendor_request(dev, req,
  87                                      USB_DIR_IN | USB_TYPE_VENDOR,
  88                                      0, offset, &usb->reg_val, sizeof(__le32));
  89         if (ret == sizeof(__le32))
  90                 data = le32_to_cpu(usb->reg_val);
  91         trace_usb_reg_rr(dev, addr, data);
  92 
  93         return data;
  94 }
  95 
  96 static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
  97 {
  98         u32 ret;
  99 
 100         mutex_lock(&dev->usb.usb_ctrl_mtx);
 101         ret = __mt76u_rr(dev, addr);
 102         mutex_unlock(&dev->usb.usb_ctrl_mtx);
 103 
 104         return ret;
 105 }
 106 
 107 /* should be called with usb_ctrl_mtx locked */
 108 static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
 109 {
 110         struct mt76_usb *usb = &dev->usb;
 111         u16 offset;
 112         u8 req;
 113 
 114         switch (addr & MT_VEND_TYPE_MASK) {
 115         case MT_VEND_TYPE_CFG:
 116                 req = MT_VEND_WRITE_CFG;
 117                 break;
 118         default:
 119                 req = MT_VEND_MULTI_WRITE;
 120                 break;
 121         }
 122         offset = addr & ~MT_VEND_TYPE_MASK;
 123 
 124         usb->reg_val = cpu_to_le32(val);
 125         __mt76u_vendor_request(dev, req,
 126                                USB_DIR_OUT | USB_TYPE_VENDOR, 0,
 127                                offset, &usb->reg_val, sizeof(__le32));
 128         trace_usb_reg_wr(dev, addr, val);
 129 }
 130 
 131 static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
 132 {
 133         mutex_lock(&dev->usb.usb_ctrl_mtx);
 134         __mt76u_wr(dev, addr, val);
 135         mutex_unlock(&dev->usb.usb_ctrl_mtx);
 136 }
 137 
 138 static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
 139                      u32 mask, u32 val)
 140 {
 141         mutex_lock(&dev->usb.usb_ctrl_mtx);
 142         val |= __mt76u_rr(dev, addr) & ~mask;
 143         __mt76u_wr(dev, addr, val);
 144         mutex_unlock(&dev->usb.usb_ctrl_mtx);
 145 
 146         return val;
 147 }
 148 
 149 static void mt76u_copy(struct mt76_dev *dev, u32 offset,
 150                        const void *data, int len)
 151 {
 152         struct mt76_usb *usb = &dev->usb;
 153         const u32 *val = data;
 154         int i, ret;
 155 
 156         mutex_lock(&usb->usb_ctrl_mtx);
 157         for (i = 0; i < DIV_ROUND_UP(len, 4); i++) {
 158                 put_unaligned(val[i], (u32 *)usb->data);
 159                 ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
 160                                              USB_DIR_OUT | USB_TYPE_VENDOR,
 161                                              0, offset + i * 4, usb->data,
 162                                              sizeof(u32));
 163                 if (ret < 0)
 164                         break;
 165         }
 166         mutex_unlock(&usb->usb_ctrl_mtx);
 167 }
 168 
 169 void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
 170                      const u16 offset, const u32 val)
 171 {
 172         mutex_lock(&dev->usb.usb_ctrl_mtx);
 173         __mt76u_vendor_request(dev, req,
 174                                USB_DIR_OUT | USB_TYPE_VENDOR,
 175                                val & 0xffff, offset, NULL, 0);
 176         __mt76u_vendor_request(dev, req,
 177                                USB_DIR_OUT | USB_TYPE_VENDOR,
 178                                val >> 16, offset + 2, NULL, 0);
 179         mutex_unlock(&dev->usb.usb_ctrl_mtx);
 180 }
 181 EXPORT_SYMBOL_GPL(mt76u_single_wr);
 182 
 183 static int
 184 mt76u_req_wr_rp(struct mt76_dev *dev, u32 base,
 185                 const struct mt76_reg_pair *data, int len)
 186 {
 187         struct mt76_usb *usb = &dev->usb;
 188 
 189         mutex_lock(&usb->usb_ctrl_mtx);
 190         while (len > 0) {
 191                 __mt76u_wr(dev, base + data->reg, data->value);
 192                 len--;
 193                 data++;
 194         }
 195         mutex_unlock(&usb->usb_ctrl_mtx);
 196 
 197         return 0;
 198 }
 199 
 200 static int
 201 mt76u_wr_rp(struct mt76_dev *dev, u32 base,
 202             const struct mt76_reg_pair *data, int n)
 203 {
 204         if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
 205                 return dev->mcu_ops->mcu_wr_rp(dev, base, data, n);
 206         else
 207                 return mt76u_req_wr_rp(dev, base, data, n);
 208 }
 209 
 210 static int
 211 mt76u_req_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data,
 212                 int len)
 213 {
 214         struct mt76_usb *usb = &dev->usb;
 215 
 216         mutex_lock(&usb->usb_ctrl_mtx);
 217         while (len > 0) {
 218                 data->value = __mt76u_rr(dev, base + data->reg);
 219                 len--;
 220                 data++;
 221         }
 222         mutex_unlock(&usb->usb_ctrl_mtx);
 223 
 224         return 0;
 225 }
 226 
 227 static int
 228 mt76u_rd_rp(struct mt76_dev *dev, u32 base,
 229             struct mt76_reg_pair *data, int n)
 230 {
 231         if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
 232                 return dev->mcu_ops->mcu_rd_rp(dev, base, data, n);
 233         else
 234                 return mt76u_req_rd_rp(dev, base, data, n);
 235 }
 236 
 237 static bool mt76u_check_sg(struct mt76_dev *dev)
 238 {
 239         struct usb_interface *uintf = to_usb_interface(dev->dev);
 240         struct usb_device *udev = interface_to_usbdev(uintf);
 241 
 242         return (!disable_usb_sg && udev->bus->sg_tablesize > 0 &&
 243                 (udev->bus->no_sg_constraint ||
 244                  udev->speed == USB_SPEED_WIRELESS));
 245 }
 246 
 247 static int
 248 mt76u_set_endpoints(struct usb_interface *intf,
 249                     struct mt76_usb *usb)
 250 {
 251         struct usb_host_interface *intf_desc = intf->cur_altsetting;
 252         struct usb_endpoint_descriptor *ep_desc;
 253         int i, in_ep = 0, out_ep = 0;
 254 
 255         for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
 256                 ep_desc = &intf_desc->endpoint[i].desc;
 257 
 258                 if (usb_endpoint_is_bulk_in(ep_desc) &&
 259                     in_ep < __MT_EP_IN_MAX) {
 260                         usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
 261                         in_ep++;
 262                 } else if (usb_endpoint_is_bulk_out(ep_desc) &&
 263                            out_ep < __MT_EP_OUT_MAX) {
 264                         usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
 265                         out_ep++;
 266                 }
 267         }
 268 
 269         if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX)
 270                 return -EINVAL;
 271         return 0;
 272 }
 273 
 274 static int
 275 mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
 276                  int nsgs, gfp_t gfp)
 277 {
 278         int i;
 279 
 280         for (i = 0; i < nsgs; i++) {
 281                 struct page *page;
 282                 void *data;
 283                 int offset;
 284 
 285                 data = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
 286                 if (!data)
 287                         break;
 288 
 289                 page = virt_to_head_page(data);
 290                 offset = data - page_address(page);
 291                 sg_set_page(&urb->sg[i], page, q->buf_size, offset);
 292         }
 293 
 294         if (i < nsgs) {
 295                 int j;
 296 
 297                 for (j = nsgs; j < urb->num_sgs; j++)
 298                         skb_free_frag(sg_virt(&urb->sg[j]));
 299                 urb->num_sgs = i;
 300         }
 301 
 302         urb->num_sgs = max_t(int, i, urb->num_sgs);
 303         urb->transfer_buffer_length = urb->num_sgs * q->buf_size;
 304         sg_init_marker(urb->sg, urb->num_sgs);
 305 
 306         return i ? : -ENOMEM;
 307 }
 308 
 309 static int
 310 mt76u_refill_rx(struct mt76_dev *dev, struct urb *urb, int nsgs, gfp_t gfp)
 311 {
 312         struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
 313 
 314         if (dev->usb.sg_en)
 315                 return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp);
 316 
 317         urb->transfer_buffer_length = q->buf_size;
 318         urb->transfer_buffer = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
 319 
 320         return urb->transfer_buffer ? 0 : -ENOMEM;
 321 }
 322 
 323 static int
 324 mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e,
 325                 int sg_max_size)
 326 {
 327         unsigned int size = sizeof(struct urb);
 328 
 329         if (dev->usb.sg_en)
 330                 size += sg_max_size * sizeof(struct scatterlist);
 331 
 332         e->urb = kzalloc(size, GFP_KERNEL);
 333         if (!e->urb)
 334                 return -ENOMEM;
 335 
 336         usb_init_urb(e->urb);
 337 
 338         if (dev->usb.sg_en)
 339                 e->urb->sg = (struct scatterlist *)(e->urb + 1);
 340 
 341         return 0;
 342 }
 343 
 344 static int
 345 mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e)
 346 {
 347         int err;
 348 
 349         err = mt76u_urb_alloc(dev, e, MT_RX_SG_MAX_SIZE);
 350         if (err)
 351                 return err;
 352 
 353         return mt76u_refill_rx(dev, e->urb, MT_RX_SG_MAX_SIZE,
 354                                GFP_KERNEL);
 355 }
 356 
 357 static void mt76u_urb_free(struct urb *urb)
 358 {
 359         int i;
 360 
 361         for (i = 0; i < urb->num_sgs; i++)
 362                 skb_free_frag(sg_virt(&urb->sg[i]));
 363 
 364         if (urb->transfer_buffer)
 365                 skb_free_frag(urb->transfer_buffer);
 366 
 367         usb_free_urb(urb);
 368 }
 369 
 370 static void
 371 mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
 372                     struct urb *urb, usb_complete_t complete_fn,
 373                     void *context)
 374 {
 375         struct usb_interface *uintf = to_usb_interface(dev->dev);
 376         struct usb_device *udev = interface_to_usbdev(uintf);
 377         unsigned int pipe;
 378 
 379         if (dir == USB_DIR_IN)
 380                 pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]);
 381         else
 382                 pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
 383 
 384         urb->dev = udev;
 385         urb->pipe = pipe;
 386         urb->complete = complete_fn;
 387         urb->context = context;
 388 }
 389 
 390 static inline struct urb *
 391 mt76u_get_next_rx_entry(struct mt76_dev *dev)
 392 {
 393         struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
 394         struct urb *urb = NULL;
 395         unsigned long flags;
 396 
 397         spin_lock_irqsave(&q->lock, flags);
 398         if (q->queued > 0) {
 399                 urb = q->entry[q->head].urb;
 400                 q->head = (q->head + 1) % q->ndesc;
 401                 q->queued--;
 402         }
 403         spin_unlock_irqrestore(&q->lock, flags);
 404 
 405         return urb;
 406 }
 407 
 408 static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
 409 {
 410         u16 dma_len, min_len;
 411 
 412         dma_len = get_unaligned_le16(data);
 413         min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN +
 414                   MT_FCE_INFO_LEN;
 415 
 416         if (data_len < min_len || !dma_len ||
 417             dma_len + MT_DMA_HDR_LEN > data_len ||
 418             (dma_len & 0x3))
 419                 return -EINVAL;
 420         return dma_len;
 421 }
 422 
 423 static struct sk_buff *
 424 mt76u_build_rx_skb(void *data, int len, int buf_size)
 425 {
 426         struct sk_buff *skb;
 427 
 428         if (SKB_WITH_OVERHEAD(buf_size) < MT_DMA_HDR_LEN + len) {
 429                 struct page *page;
 430 
 431                 /* slow path, not enough space for data and
 432                  * skb_shared_info
 433                  */
 434                 skb = alloc_skb(MT_SKB_HEAD_LEN, GFP_ATOMIC);
 435                 if (!skb)
 436                         return NULL;
 437 
 438                 skb_put_data(skb, data + MT_DMA_HDR_LEN, MT_SKB_HEAD_LEN);
 439                 data += (MT_DMA_HDR_LEN + MT_SKB_HEAD_LEN);
 440                 page = virt_to_head_page(data);
 441                 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
 442                                 page, data - page_address(page),
 443                                 len - MT_SKB_HEAD_LEN, buf_size);
 444 
 445                 return skb;
 446         }
 447 
 448         /* fast path */
 449         skb = build_skb(data, buf_size);
 450         if (!skb)
 451                 return NULL;
 452 
 453         skb_reserve(skb, MT_DMA_HDR_LEN);
 454         __skb_put(skb, len);
 455 
 456         return skb;
 457 }
 458 
 459 static int
 460 mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
 461 {
 462         struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
 463         u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : urb->transfer_buffer;
 464         int data_len = urb->num_sgs ? urb->sg[0].length : urb->actual_length;
 465         int len, nsgs = 1;
 466         struct sk_buff *skb;
 467 
 468         if (!test_bit(MT76_STATE_INITIALIZED, &dev->state))
 469                 return 0;
 470 
 471         len = mt76u_get_rx_entry_len(data, urb->actual_length);
 472         if (len < 0)
 473                 return 0;
 474 
 475         data_len = min_t(int, len, data_len - MT_DMA_HDR_LEN);
 476         skb = mt76u_build_rx_skb(data, data_len, q->buf_size);
 477         if (!skb)
 478                 return 0;
 479 
 480         len -= data_len;
 481         while (len > 0 && nsgs < urb->num_sgs) {
 482                 data_len = min_t(int, len, urb->sg[nsgs].length);
 483                 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
 484                                 sg_page(&urb->sg[nsgs]),
 485                                 urb->sg[nsgs].offset,
 486                                 data_len, q->buf_size);
 487                 len -= data_len;
 488                 nsgs++;
 489         }
 490         dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);
 491 
 492         return nsgs;
 493 }
 494 
 495 static void mt76u_complete_rx(struct urb *urb)
 496 {
 497         struct mt76_dev *dev = urb->context;
 498         struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
 499         unsigned long flags;
 500 
 501         trace_rx_urb(dev, urb);
 502 
 503         switch (urb->status) {
 504         case -ECONNRESET:
 505         case -ESHUTDOWN:
 506         case -ENOENT:
 507                 return;
 508         default:
 509                 dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
 510                                     urb->status);
 511                 /* fall through */
 512         case 0:
 513                 break;
 514         }
 515 
 516         spin_lock_irqsave(&q->lock, flags);
 517         if (WARN_ONCE(q->entry[q->tail].urb != urb, "rx urb mismatch"))
 518                 goto out;
 519 
 520         q->tail = (q->tail + 1) % q->ndesc;
 521         q->queued++;
 522         tasklet_schedule(&dev->usb.rx_tasklet);
 523 out:
 524         spin_unlock_irqrestore(&q->lock, flags);
 525 }
 526 
 527 static int
 528 mt76u_submit_rx_buf(struct mt76_dev *dev, struct urb *urb)
 529 {
 530         mt76u_fill_bulk_urb(dev, USB_DIR_IN, MT_EP_IN_PKT_RX, urb,
 531                             mt76u_complete_rx, dev);
 532         trace_submit_urb(dev, urb);
 533 
 534         return usb_submit_urb(urb, GFP_ATOMIC);
 535 }
 536 
 537 static void mt76u_rx_tasklet(unsigned long data)
 538 {
 539         struct mt76_dev *dev = (struct mt76_dev *)data;
 540         struct urb *urb;
 541         int err, count;
 542 
 543         rcu_read_lock();
 544 
 545         while (true) {
 546                 urb = mt76u_get_next_rx_entry(dev);
 547                 if (!urb)
 548                         break;
 549 
 550                 count = mt76u_process_rx_entry(dev, urb);
 551                 if (count > 0) {
 552                         err = mt76u_refill_rx(dev, urb, count, GFP_ATOMIC);
 553                         if (err < 0)
 554                                 break;
 555                 }
 556                 mt76u_submit_rx_buf(dev, urb);
 557         }
 558         mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
 559 
 560         rcu_read_unlock();
 561 }
 562 
 563 static int mt76u_submit_rx_buffers(struct mt76_dev *dev)
 564 {
 565         struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
 566         unsigned long flags;
 567         int i, err = 0;
 568 
 569         spin_lock_irqsave(&q->lock, flags);
 570         for (i = 0; i < q->ndesc; i++) {
 571                 err = mt76u_submit_rx_buf(dev, q->entry[i].urb);
 572                 if (err < 0)
 573                         break;
 574         }
 575         q->head = q->tail = 0;
 576         q->queued = 0;
 577         spin_unlock_irqrestore(&q->lock, flags);
 578 
 579         return err;
 580 }
 581 
 582 static int mt76u_alloc_rx(struct mt76_dev *dev)
 583 {
 584         struct mt76_usb *usb = &dev->usb;
 585         struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
 586         int i, err;
 587 
 588         usb->mcu.data = devm_kmalloc(dev->dev, MCU_RESP_URB_SIZE, GFP_KERNEL);
 589         if (!usb->mcu.data)
 590                 return -ENOMEM;
 591 
 592         spin_lock_init(&q->lock);
 593         q->entry = devm_kcalloc(dev->dev,
 594                                 MT_NUM_RX_ENTRIES, sizeof(*q->entry),
 595                                 GFP_KERNEL);
 596         if (!q->entry)
 597                 return -ENOMEM;
 598 
 599         q->ndesc = MT_NUM_RX_ENTRIES;
 600         q->buf_size = PAGE_SIZE;
 601 
 602         for (i = 0; i < q->ndesc; i++) {
 603                 err = mt76u_rx_urb_alloc(dev, &q->entry[i]);
 604                 if (err < 0)
 605                         return err;
 606         }
 607 
 608         return mt76u_submit_rx_buffers(dev);
 609 }
 610 
 611 static void mt76u_free_rx(struct mt76_dev *dev)
 612 {
 613         struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
 614         struct page *page;
 615         int i;
 616 
 617         for (i = 0; i < q->ndesc; i++)
 618                 mt76u_urb_free(q->entry[i].urb);
 619 
 620         if (!q->rx_page.va)
 621                 return;
 622 
 623         page = virt_to_page(q->rx_page.va);
 624         __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
 625         memset(&q->rx_page, 0, sizeof(q->rx_page));
 626 }
 627 
 628 void mt76u_stop_rx(struct mt76_dev *dev)
 629 {
 630         struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
 631         int i;
 632 
 633         for (i = 0; i < q->ndesc; i++)
 634                 usb_poison_urb(q->entry[i].urb);
 635 
 636         tasklet_kill(&dev->usb.rx_tasklet);
 637 }
 638 EXPORT_SYMBOL_GPL(mt76u_stop_rx);
 639 
 640 int mt76u_resume_rx(struct mt76_dev *dev)
 641 {
 642         struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
 643         int i;
 644 
 645         for (i = 0; i < q->ndesc; i++)
 646                 usb_unpoison_urb(q->entry[i].urb);
 647 
 648         return mt76u_submit_rx_buffers(dev);
 649 }
 650 EXPORT_SYMBOL_GPL(mt76u_resume_rx);
 651 
 652 static void mt76u_tx_tasklet(unsigned long data)
 653 {
 654         struct mt76_dev *dev = (struct mt76_dev *)data;
 655         struct mt76_queue_entry entry;
 656         struct mt76_sw_queue *sq;
 657         struct mt76_queue *q;
 658         bool wake;
 659         int i;
 660 
 661         for (i = 0; i < IEEE80211_NUM_ACS; i++) {
 662                 u32 n_dequeued = 0, n_sw_dequeued = 0;
 663 
 664                 sq = &dev->q_tx[i];
 665                 q = sq->q;
 666 
 667                 while (q->queued > n_dequeued) {
 668                         if (!q->entry[q->head].done)
 669                                 break;
 670 
 671                         if (q->entry[q->head].schedule) {
 672                                 q->entry[q->head].schedule = false;
 673                                 n_sw_dequeued++;
 674                         }
 675 
 676                         entry = q->entry[q->head];
 677                         q->entry[q->head].done = false;
 678                         q->head = (q->head + 1) % q->ndesc;
 679                         n_dequeued++;
 680 
 681                         dev->drv->tx_complete_skb(dev, i, &entry);
 682                 }
 683 
 684                 spin_lock_bh(&q->lock);
 685 
 686                 sq->swq_queued -= n_sw_dequeued;
 687                 q->queued -= n_dequeued;
 688 
 689                 wake = q->stopped && q->queued < q->ndesc - 8;
 690                 if (wake)
 691                         q->stopped = false;
 692 
 693                 if (!q->queued)
 694                         wake_up(&dev->tx_wait);
 695 
 696                 spin_unlock_bh(&q->lock);
 697 
 698                 mt76_txq_schedule(dev, i);
 699 
 700                 if (!test_and_set_bit(MT76_READING_STATS, &dev->state))
 701                         ieee80211_queue_delayed_work(dev->hw,
 702                                                      &dev->usb.stat_work,
 703                                                      msecs_to_jiffies(10));
 704 
 705                 if (wake)
 706                         ieee80211_wake_queue(dev->hw, i);
 707         }
 708 }
 709 
 710 static void mt76u_tx_status_data(struct work_struct *work)
 711 {
 712         struct mt76_usb *usb;
 713         struct mt76_dev *dev;
 714         u8 update = 1;
 715         u16 count = 0;
 716 
 717         usb = container_of(work, struct mt76_usb, stat_work.work);
 718         dev = container_of(usb, struct mt76_dev, usb);
 719 
 720         while (true) {
 721                 if (test_bit(MT76_REMOVED, &dev->state))
 722                         break;
 723 
 724                 if (!dev->drv->tx_status_data(dev, &update))
 725                         break;
 726                 count++;
 727         }
 728 
 729         if (count && test_bit(MT76_STATE_RUNNING, &dev->state))
 730                 ieee80211_queue_delayed_work(dev->hw, &usb->stat_work,
 731                                              msecs_to_jiffies(10));
 732         else
 733                 clear_bit(MT76_READING_STATS, &dev->state);
 734 }
 735 
 736 static void mt76u_complete_tx(struct urb *urb)
 737 {
 738         struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
 739         struct mt76_queue_entry *e = urb->context;
 740 
 741         if (mt76u_urb_error(urb))
 742                 dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
 743         e->done = true;
 744 
 745         tasklet_schedule(&dev->tx_tasklet);
 746 }
 747 
 748 static int
 749 mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb,
 750                        struct urb *urb)
 751 {
 752         urb->transfer_buffer_length = skb->len;
 753 
 754         if (!dev->usb.sg_en) {
 755                 urb->transfer_buffer = skb->data;
 756                 return 0;
 757         }
 758 
 759         sg_init_table(urb->sg, MT_TX_SG_MAX_SIZE);
 760         urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len);
 761         if (!urb->num_sgs)
 762                 return -ENOMEM;
 763 
 764         return urb->num_sgs;
 765 }
 766 
 767 static int
 768 mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
 769                    struct sk_buff *skb, struct mt76_wcid *wcid,
 770                    struct ieee80211_sta *sta)
 771 {
 772         struct mt76_queue *q = dev->q_tx[qid].q;
 773         struct mt76_tx_info tx_info = {
 774                 .skb = skb,
 775         };
 776         u16 idx = q->tail;
 777         int err;
 778 
 779         if (q->queued == q->ndesc)
 780                 return -ENOSPC;
 781 
 782         skb->prev = skb->next = NULL;
 783         err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info);
 784         if (err < 0)
 785                 return err;
 786 
 787         err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb);
 788         if (err < 0)
 789                 return err;
 790 
 791         mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx),
 792                             q->entry[idx].urb, mt76u_complete_tx,
 793                             &q->entry[idx]);
 794 
 795         q->tail = (q->tail + 1) % q->ndesc;
 796         q->entry[idx].skb = tx_info.skb;
 797         q->queued++;
 798 
 799         return idx;
 800 }
 801 
 802 static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
 803 {
 804         struct urb *urb;
 805         int err;
 806 
 807         while (q->first != q->tail) {
 808                 urb = q->entry[q->first].urb;
 809 
 810                 trace_submit_urb(dev, urb);
 811                 err = usb_submit_urb(urb, GFP_ATOMIC);
 812                 if (err < 0) {
 813                         if (err == -ENODEV)
 814                                 set_bit(MT76_REMOVED, &dev->state);
 815                         else
 816                                 dev_err(dev->dev, "tx urb submit failed:%d\n",
 817                                         err);
 818                         break;
 819                 }
 820                 q->first = (q->first + 1) % q->ndesc;
 821         }
 822 }
 823 
 824 static int mt76u_alloc_tx(struct mt76_dev *dev)
 825 {
 826         struct mt76_queue *q;
 827         int i, j, err;
 828 
 829         for (i = 0; i <= MT_TXQ_PSD; i++) {
 830                 INIT_LIST_HEAD(&dev->q_tx[i].swq);
 831 
 832                 if (i >= IEEE80211_NUM_ACS) {
 833                         dev->q_tx[i].q = dev->q_tx[0].q;
 834                         continue;
 835                 }
 836 
 837                 q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
 838                 if (!q)
 839                         return -ENOMEM;
 840 
 841                 spin_lock_init(&q->lock);
 842                 q->hw_idx = mt76_ac_to_hwq(i);
 843                 dev->q_tx[i].q = q;
 844 
 845                 q->entry = devm_kcalloc(dev->dev,
 846                                         MT_NUM_TX_ENTRIES, sizeof(*q->entry),
 847                                         GFP_KERNEL);
 848                 if (!q->entry)
 849                         return -ENOMEM;
 850 
 851                 q->ndesc = MT_NUM_TX_ENTRIES;
 852                 for (j = 0; j < q->ndesc; j++) {
 853                         err = mt76u_urb_alloc(dev, &q->entry[j],
 854                                               MT_TX_SG_MAX_SIZE);
 855                         if (err < 0)
 856                                 return err;
 857                 }
 858         }
 859         return 0;
 860 }
 861 
 862 static void mt76u_free_tx(struct mt76_dev *dev)
 863 {
 864         struct mt76_queue *q;
 865         int i, j;
 866 
 867         for (i = 0; i < IEEE80211_NUM_ACS; i++) {
 868                 q = dev->q_tx[i].q;
 869                 for (j = 0; j < q->ndesc; j++)
 870                         usb_free_urb(q->entry[j].urb);
 871         }
 872 }
 873 
 874 void mt76u_stop_tx(struct mt76_dev *dev)
 875 {
 876         struct mt76_queue_entry entry;
 877         struct mt76_queue *q;
 878         int i, j, ret;
 879 
 880         ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(dev),
 881                                  HZ / 5);
 882         if (!ret) {
 883                 dev_err(dev->dev, "timed out waiting for pending tx\n");
 884 
 885                 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
 886                         q = dev->q_tx[i].q;
 887                         for (j = 0; j < q->ndesc; j++)
 888                                 usb_kill_urb(q->entry[j].urb);
 889                 }
 890 
 891                 tasklet_kill(&dev->tx_tasklet);
 892 
 893                 /* On device removal we maight queue skb's, but mt76u_tx_kick()
 894                  * will fail to submit urb, cleanup those skb's manually.
 895                  */
 896                 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
 897                         q = dev->q_tx[i].q;
 898 
 899                         /* Assure we are in sync with killed tasklet. */
 900                         spin_lock_bh(&q->lock);
 901                         while (q->queued) {
 902                                 entry = q->entry[q->head];
 903                                 q->head = (q->head + 1) % q->ndesc;
 904                                 q->queued--;
 905 
 906                                 dev->drv->tx_complete_skb(dev, i, &entry);
 907                         }
 908                         spin_unlock_bh(&q->lock);
 909                 }
 910         }
 911 
 912         cancel_delayed_work_sync(&dev->usb.stat_work);
 913         clear_bit(MT76_READING_STATS, &dev->state);
 914 
 915         mt76_tx_status_check(dev, NULL, true);
 916 }
 917 EXPORT_SYMBOL_GPL(mt76u_stop_tx);
 918 
 919 void mt76u_queues_deinit(struct mt76_dev *dev)
 920 {
 921         mt76u_stop_rx(dev);
 922         mt76u_stop_tx(dev);
 923 
 924         mt76u_free_rx(dev);
 925         mt76u_free_tx(dev);
 926 }
 927 EXPORT_SYMBOL_GPL(mt76u_queues_deinit);
 928 
 929 int mt76u_alloc_queues(struct mt76_dev *dev)
 930 {
 931         int err;
 932 
 933         err = mt76u_alloc_rx(dev);
 934         if (err < 0)
 935                 return err;
 936 
 937         return mt76u_alloc_tx(dev);
 938 }
 939 EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
 940 
 941 static const struct mt76_queue_ops usb_queue_ops = {
 942         .tx_queue_skb = mt76u_tx_queue_skb,
 943         .kick = mt76u_tx_kick,
 944 };
 945 
 946 int mt76u_init(struct mt76_dev *dev,
 947                struct usb_interface *intf)
 948 {
 949         static const struct mt76_bus_ops mt76u_ops = {
 950                 .rr = mt76u_rr,
 951                 .wr = mt76u_wr,
 952                 .rmw = mt76u_rmw,
 953                 .write_copy = mt76u_copy,
 954                 .wr_rp = mt76u_wr_rp,
 955                 .rd_rp = mt76u_rd_rp,
 956                 .type = MT76_BUS_USB,
 957         };
 958         struct usb_device *udev = interface_to_usbdev(intf);
 959         struct mt76_usb *usb = &dev->usb;
 960 
 961         tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
 962         tasklet_init(&dev->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
 963         INIT_DELAYED_WORK(&usb->stat_work, mt76u_tx_status_data);
 964         skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]);
 965 
 966         mutex_init(&usb->mcu.mutex);
 967 
 968         mutex_init(&usb->usb_ctrl_mtx);
 969         dev->bus = &mt76u_ops;
 970         dev->queue_ops = &usb_queue_ops;
 971 
 972         dev_set_drvdata(&udev->dev, dev);
 973 
 974         usb->sg_en = mt76u_check_sg(dev);
 975 
 976         return mt76u_set_endpoints(intf, usb);
 977 }
 978 EXPORT_SYMBOL_GPL(mt76u_init);
 979 
 980 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
 981 MODULE_LICENSE("Dual BSD/GPL");

/* [<][>][^][v][top][bottom][index][help] */