root/drivers/net/wimax/i2400m/usb-rx.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. i2400mu_rx_size_grow
  2. i2400mu_rx_size_maybe_shrink
  3. i2400mu_rx
  4. i2400mu_rxd
  5. i2400mu_rx_kick
  6. i2400mu_rx_setup
  7. i2400mu_rx_release

   1 /*
   2  * Intel Wireless WiMAX Connection 2400m
   3  * USB RX handling
   4  *
   5  *
   6  * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
   7  *
   8  * Redistribution and use in source and binary forms, with or without
   9  * modification, are permitted provided that the following conditions
  10  * are met:
  11  *
  12  *   * Redistributions of source code must retain the above copyright
  13  *     notice, this list of conditions and the following disclaimer.
  14  *   * Redistributions in binary form must reproduce the above copyright
  15  *     notice, this list of conditions and the following disclaimer in
  16  *     the documentation and/or other materials provided with the
  17  *     distribution.
  18  *   * Neither the name of Intel Corporation nor the names of its
  19  *     contributors may be used to endorse or promote products derived
  20  *     from this software without specific prior written permission.
  21  *
  22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  25  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  26  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  27  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  28  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  32  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  33  *
  34  *
  35  * Intel Corporation <linux-wimax@intel.com>
  36  * Yanir Lubetkin <yanirx.lubetkin@intel.com>
  37  *  - Initial implementation
  38  * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
  39  *  - Use skb_clone(), break up processing in chunks
  40  *  - Split transport/device specific
  41  *  - Make buffer size dynamic to exert less memory pressure
  42  *
  43  *
  44  * This handles the RX path on USB.
  45  *
  46  * When a notification is received that says 'there is RX data ready',
  47  * we call i2400mu_rx_kick(); that wakes up the RX kthread, which
  48  * reads a buffer from USB and passes it to i2400m_rx() in the generic
  49  * handling code. The RX buffer has an specific format that is
  50  * described in rx.c.
  51  *
  52  * We use a kernel thread in a loop because:
  53  *
  54  *  - we want to be able to call the USB power management get/put
  55  *    functions (blocking) before each transaction.
  56  *
  57  *  - We might get a lot of notifications and we don't want to submit
  58  *    a zillion reads; by serializing, we are throttling.
  59  *
  60  *  - RX data processing can get heavy enough so that it is not
  61  *    appropriate for doing it in the USB callback; thus we run it in a
  62  *    process context.
  63  *
  64  * We provide a read buffer of an arbitrary size (short of a page); if
  65  * the callback reports -EOVERFLOW, it means it was too small, so we
  66  * just double the size and retry (being careful to append, as
  67  * sometimes the device provided some data). Every now and then we
  68  * check if the average packet size is smaller than the current packet
  69  * size and if so, we halve it. At the end, the size of the
  70  * preallocated buffer should be following the average received
  71  * transaction size, adapting dynamically to it.
  72  *
  73  * ROADMAP
  74  *
  75  * i2400mu_rx_kick()               Called from notif.c when we get a
  76  *                                 'data ready' notification
  77  * i2400mu_rxd()                   Kernel RX daemon
  78  *   i2400mu_rx()                  Receive USB data
  79  *   i2400m_rx()                   Send data to generic i2400m RX handling
  80  *
  81  * i2400mu_rx_setup()              called from i2400mu_bus_dev_start()
  82  *
  83  * i2400mu_rx_release()            called from i2400mu_bus_dev_stop()
  84  */
  85 #include <linux/workqueue.h>
  86 #include <linux/slab.h>
  87 #include <linux/usb.h>
  88 #include "i2400m-usb.h"
  89 
  90 
  91 #define D_SUBMODULE rx
  92 #include "usb-debug-levels.h"
  93 
  94 /*
  95  * Dynamic RX size
  96  *
  97  * We can't let the rx_size be a multiple of 512 bytes (the RX
  98  * endpoint's max packet size). On some USB host controllers (we
  99  * haven't been able to fully characterize which), if the device is
 100  * about to send (for example) X bytes and we only post a buffer to
 101  * receive n*512, it will fail to mark that as babble (so that
 102  * i2400mu_rx() [case -EOVERFLOW] can resize the buffer and get the
 103  * rest).
 104  *
 105  * So on growing or shrinking, if it is a multiple of the
 106  * maxpacketsize, we remove some (instead of incresing some, so in a
 107  * buddy allocator we try to waste less space).
 108  *
 109  * Note we also need a hook for this on i2400mu_rx() -- when we do the
 110  * first read, we are sure we won't hit this spot because
 111  * i240mm->rx_size has been set properly. However, if we have to
 112  * double because of -EOVERFLOW, when we launch the read to get the
 113  * rest of the data, we *have* to make sure that also is not a
 114  * multiple of the max_pkt_size.
 115  */
 116 
 117 static
 118 size_t i2400mu_rx_size_grow(struct i2400mu *i2400mu)
 119 {
 120         struct device *dev = &i2400mu->usb_iface->dev;
 121         size_t rx_size;
 122         const size_t max_pkt_size = 512;
 123 
 124         rx_size = 2 * i2400mu->rx_size;
 125         if (rx_size % max_pkt_size == 0) {
 126                 rx_size -= 8;
 127                 d_printf(1, dev,
 128                          "RX: expected size grew to %zu [adjusted -8] "
 129                          "from %zu\n",
 130                          rx_size, i2400mu->rx_size);
 131         } else
 132                 d_printf(1, dev,
 133                          "RX: expected size grew to %zu from %zu\n",
 134                          rx_size, i2400mu->rx_size);
 135         return rx_size;
 136 }
 137 
 138 
 139 static
 140 void i2400mu_rx_size_maybe_shrink(struct i2400mu *i2400mu)
 141 {
 142         const size_t max_pkt_size = 512;
 143         struct device *dev = &i2400mu->usb_iface->dev;
 144 
 145         if (unlikely(i2400mu->rx_size_cnt >= 100
 146                      && i2400mu->rx_size_auto_shrink)) {
 147                 size_t avg_rx_size =
 148                         i2400mu->rx_size_acc / i2400mu->rx_size_cnt;
 149                 size_t new_rx_size = i2400mu->rx_size / 2;
 150                 if (avg_rx_size < new_rx_size) {
 151                         if (new_rx_size % max_pkt_size == 0) {
 152                                 new_rx_size -= 8;
 153                                 d_printf(1, dev,
 154                                          "RX: expected size shrank to %zu "
 155                                          "[adjusted -8] from %zu\n",
 156                                          new_rx_size, i2400mu->rx_size);
 157                         } else
 158                                 d_printf(1, dev,
 159                                          "RX: expected size shrank to %zu "
 160                                          "from %zu\n",
 161                                          new_rx_size, i2400mu->rx_size);
 162                         i2400mu->rx_size = new_rx_size;
 163                         i2400mu->rx_size_cnt = 0;
 164                         i2400mu->rx_size_acc = i2400mu->rx_size;
 165                 }
 166         }
 167 }
 168 
 169 /*
 170  * Receive a message with payloads from the USB bus into an skb
 171  *
 172  * @i2400mu: USB device descriptor
 173  * @rx_skb: skb where to place the received message
 174  *
 175  * Deals with all the USB-specifics of receiving, dynamically
 176  * increasing the buffer size if so needed. Returns the payload in the
 177  * skb, ready to process. On a zero-length packet, we retry.
 178  *
 179  * On soft USB errors, we retry (until they become too frequent and
 180  * then are promoted to hard); on hard USB errors, we reset the
 181  * device. On other errors (skb realloacation, we just drop it and
 182  * hope for the next invocation to solve it).
 183  *
 184  * Returns: pointer to the skb if ok, ERR_PTR on error.
 185  *   NOTE: this function might realloc the skb (if it is too small),
 186  *   so always update with the one returned.
 187  *   ERR_PTR() is < 0 on error.
 188  *   Will return NULL if it cannot reallocate -- this can be
 189  *   considered a transient retryable error.
 190  */
 191 static
 192 struct sk_buff *i2400mu_rx(struct i2400mu *i2400mu, struct sk_buff *rx_skb)
 193 {
 194         int result = 0;
 195         struct device *dev = &i2400mu->usb_iface->dev;
 196         int usb_pipe, read_size, rx_size, do_autopm;
 197         struct usb_endpoint_descriptor *epd;
 198         const size_t max_pkt_size = 512;
 199 
 200         d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
 201         do_autopm = atomic_read(&i2400mu->do_autopm);
 202         result = do_autopm ?
 203                 usb_autopm_get_interface(i2400mu->usb_iface) : 0;
 204         if (result < 0) {
 205                 dev_err(dev, "RX: can't get autopm: %d\n", result);
 206                 do_autopm = 0;
 207         }
 208         epd = usb_get_epd(i2400mu->usb_iface, i2400mu->endpoint_cfg.bulk_in);
 209         usb_pipe = usb_rcvbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress);
 210 retry:
 211         rx_size = skb_end_pointer(rx_skb) - rx_skb->data - rx_skb->len;
 212         if (unlikely(rx_size % max_pkt_size == 0)) {
 213                 rx_size -= 8;
 214                 d_printf(1, dev, "RX: rx_size adapted to %d [-8]\n", rx_size);
 215         }
 216         result = usb_bulk_msg(
 217                 i2400mu->usb_dev, usb_pipe, rx_skb->data + rx_skb->len,
 218                 rx_size, &read_size, 200);
 219         usb_mark_last_busy(i2400mu->usb_dev);
 220         switch (result) {
 221         case 0:
 222                 if (read_size == 0)
 223                         goto retry;     /* ZLP, just resubmit */
 224                 skb_put(rx_skb, read_size);
 225                 break;
 226         case -EPIPE:
 227                 /*
 228                  * Stall -- maybe the device is choking with our
 229                  * requests. Clear it and give it some time. If they
 230                  * happen to often, it might be another symptom, so we
 231                  * reset.
 232                  *
 233                  * No error handling for usb_clear_halt(0; if it
 234                  * works, the retry works; if it fails, this switch
 235                  * does the error handling for us.
 236                  */
 237                 if (edc_inc(&i2400mu->urb_edc,
 238                             10 * EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
 239                         dev_err(dev, "BM-CMD: too many stalls in "
 240                                 "URB; resetting device\n");
 241                         goto do_reset;
 242                 }
 243                 usb_clear_halt(i2400mu->usb_dev, usb_pipe);
 244                 msleep(10);     /* give the device some time */
 245                 goto retry;
 246         case -EINVAL:                   /* while removing driver */
 247         case -ENODEV:                   /* dev disconnect ... */
 248         case -ENOENT:                   /* just ignore it */
 249         case -ESHUTDOWN:
 250         case -ECONNRESET:
 251                 break;
 252         case -EOVERFLOW: {              /* too small, reallocate */
 253                 struct sk_buff *new_skb;
 254                 rx_size = i2400mu_rx_size_grow(i2400mu);
 255                 if (rx_size <= (1 << 16))       /* cap it */
 256                         i2400mu->rx_size = rx_size;
 257                 else if (printk_ratelimit()) {
 258                         dev_err(dev, "BUG? rx_size up to %d\n", rx_size);
 259                         result = -EINVAL;
 260                         goto out;
 261                 }
 262                 skb_put(rx_skb, read_size);
 263                 new_skb = skb_copy_expand(rx_skb, 0, rx_size - rx_skb->len,
 264                                           GFP_KERNEL);
 265                 if (new_skb == NULL) {
 266                         kfree_skb(rx_skb);
 267                         rx_skb = NULL;
 268                         goto out;       /* drop it...*/
 269                 }
 270                 kfree_skb(rx_skb);
 271                 rx_skb = new_skb;
 272                 i2400mu->rx_size_cnt = 0;
 273                 i2400mu->rx_size_acc = i2400mu->rx_size;
 274                 d_printf(1, dev, "RX: size changed to %d, received %d, "
 275                          "copied %d, capacity %ld\n",
 276                          rx_size, read_size, rx_skb->len,
 277                          (long) skb_end_offset(new_skb));
 278                 goto retry;
 279         }
 280                 /* In most cases, it happens due to the hardware scheduling a
 281                  * read when there was no data - unfortunately, we have no way
 282                  * to tell this timeout from a USB timeout. So we just ignore
 283                  * it. */
 284         case -ETIMEDOUT:
 285                 dev_err(dev, "RX: timeout: %d\n", result);
 286                 result = 0;
 287                 break;
 288         default:                        /* Any error */
 289                 if (edc_inc(&i2400mu->urb_edc,
 290                             EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME))
 291                         goto error_reset;
 292                 dev_err(dev, "RX: error receiving URB: %d, retrying\n", result);
 293                 goto retry;
 294         }
 295 out:
 296         if (do_autopm)
 297                 usb_autopm_put_interface(i2400mu->usb_iface);
 298         d_fnend(4, dev, "(i2400mu %p) = %p\n", i2400mu, rx_skb);
 299         return rx_skb;
 300 
 301 error_reset:
 302         dev_err(dev, "RX: maximum errors in URB exceeded; "
 303                 "resetting device\n");
 304 do_reset:
 305         usb_queue_reset_device(i2400mu->usb_iface);
 306         rx_skb = ERR_PTR(result);
 307         goto out;
 308 }
 309 
 310 
 311 /*
 312  * Kernel thread for USB reception of data
 313  *
 314  * This thread waits for a kick; once kicked, it will allocate an skb
 315  * and receive a single message to it from USB (using
 316  * i2400mu_rx()). Once received, it is passed to the generic i2400m RX
 317  * code for processing.
 318  *
 319  * When done processing, it runs some dirty statistics to verify if
 320  * the last 100 messages received were smaller than half of the
 321  * current RX buffer size. In that case, the RX buffer size is
 322  * halved. This will helps lowering the pressure on the memory
 323  * allocator.
 324  *
 325  * Hard errors force the thread to exit.
 326  */
 327 static
 328 int i2400mu_rxd(void *_i2400mu)
 329 {
 330         int result = 0;
 331         struct i2400mu *i2400mu = _i2400mu;
 332         struct i2400m *i2400m = &i2400mu->i2400m;
 333         struct device *dev = &i2400mu->usb_iface->dev;
 334         struct net_device *net_dev = i2400m->wimax_dev.net_dev;
 335         size_t pending;
 336         int rx_size;
 337         struct sk_buff *rx_skb;
 338         unsigned long flags;
 339 
 340         d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
 341         spin_lock_irqsave(&i2400m->rx_lock, flags);
 342         BUG_ON(i2400mu->rx_kthread != NULL);
 343         i2400mu->rx_kthread = current;
 344         spin_unlock_irqrestore(&i2400m->rx_lock, flags);
 345         while (1) {
 346                 d_printf(2, dev, "RX: waiting for messages\n");
 347                 pending = 0;
 348                 wait_event_interruptible(
 349                         i2400mu->rx_wq,
 350                         (kthread_should_stop()  /* check this first! */
 351                          || (pending = atomic_read(&i2400mu->rx_pending_count)))
 352                         );
 353                 if (kthread_should_stop())
 354                         break;
 355                 if (pending == 0)
 356                         continue;
 357                 rx_size = i2400mu->rx_size;
 358                 d_printf(2, dev, "RX: reading up to %d bytes\n", rx_size);
 359                 rx_skb = __netdev_alloc_skb(net_dev, rx_size, GFP_KERNEL);
 360                 if (rx_skb == NULL) {
 361                         dev_err(dev, "RX: can't allocate skb [%d bytes]\n",
 362                                 rx_size);
 363                         msleep(50);     /* give it some time? */
 364                         continue;
 365                 }
 366 
 367                 /* Receive the message with the payloads */
 368                 rx_skb = i2400mu_rx(i2400mu, rx_skb);
 369                 result = PTR_ERR(rx_skb);
 370                 if (IS_ERR(rx_skb))
 371                         goto out;
 372                 atomic_dec(&i2400mu->rx_pending_count);
 373                 if (rx_skb == NULL || rx_skb->len == 0) {
 374                         /* some "ignorable" condition */
 375                         kfree_skb(rx_skb);
 376                         continue;
 377                 }
 378 
 379                 /* Deliver the message to the generic i2400m code */
 380                 i2400mu->rx_size_cnt++;
 381                 i2400mu->rx_size_acc += rx_skb->len;
 382                 result = i2400m_rx(i2400m, rx_skb);
 383                 if (result == -EIO
 384                     && edc_inc(&i2400mu->urb_edc,
 385                                EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
 386                         goto error_reset;
 387                 }
 388 
 389                 /* Maybe adjust RX buffer size */
 390                 i2400mu_rx_size_maybe_shrink(i2400mu);
 391         }
 392         result = 0;
 393 out:
 394         spin_lock_irqsave(&i2400m->rx_lock, flags);
 395         i2400mu->rx_kthread = NULL;
 396         spin_unlock_irqrestore(&i2400m->rx_lock, flags);
 397         d_fnend(4, dev, "(i2400mu %p) = %d\n", i2400mu, result);
 398         return result;
 399 
 400 error_reset:
 401         dev_err(dev, "RX: maximum errors in received buffer exceeded; "
 402                 "resetting device\n");
 403         usb_queue_reset_device(i2400mu->usb_iface);
 404         goto out;
 405 }
 406 
 407 
 408 /*
 409  * Start reading from the device
 410  *
 411  * @i2400m: device instance
 412  *
 413  * Notify the RX thread that there is data pending.
 414  */
 415 void i2400mu_rx_kick(struct i2400mu *i2400mu)
 416 {
 417         struct i2400m *i2400m = &i2400mu->i2400m;
 418         struct device *dev = &i2400mu->usb_iface->dev;
 419 
 420         d_fnstart(3, dev, "(i2400mu %p)\n", i2400m);
 421         atomic_inc(&i2400mu->rx_pending_count);
 422         wake_up_all(&i2400mu->rx_wq);
 423         d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
 424 }
 425 
 426 
 427 int i2400mu_rx_setup(struct i2400mu *i2400mu)
 428 {
 429         int result = 0;
 430         struct i2400m *i2400m = &i2400mu->i2400m;
 431         struct device *dev = &i2400mu->usb_iface->dev;
 432         struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
 433         struct task_struct *kthread;
 434 
 435         kthread = kthread_run(i2400mu_rxd, i2400mu, "%s-rx",
 436                               wimax_dev->name);
 437         /* the kthread function sets i2400mu->rx_thread */
 438         if (IS_ERR(kthread)) {
 439                 result = PTR_ERR(kthread);
 440                 dev_err(dev, "RX: cannot start thread: %d\n", result);
 441         }
 442         return result;
 443 }
 444 
 445 
 446 void i2400mu_rx_release(struct i2400mu *i2400mu)
 447 {
 448         unsigned long flags;
 449         struct i2400m *i2400m = &i2400mu->i2400m;
 450         struct device *dev = i2400m_dev(i2400m);
 451         struct task_struct *kthread;
 452 
 453         spin_lock_irqsave(&i2400m->rx_lock, flags);
 454         kthread = i2400mu->rx_kthread;
 455         i2400mu->rx_kthread = NULL;
 456         spin_unlock_irqrestore(&i2400m->rx_lock, flags);
 457         if (kthread)
 458                 kthread_stop(kthread);
 459         else
 460                 d_printf(1, dev, "RX: kthread had already exited\n");
 461 }
 462 

/* [<][>][^][v][top][bottom][index][help] */