This source file includes following definitions.
- wa_seg_init
- wa_xfer_init
- wa_xfer_destroy
- wa_xfer_get
- wa_xfer_put
- __wa_dto_try_get
- __wa_dto_put
- wa_check_for_delayed_rpipes
- wa_add_delayed_rpipe
- wa_xfer_giveback
- wa_xfer_completion
- wa_xfer_id_init
- wa_xfer_id
- wa_xfer_id_le32
- __wa_xfer_is_done
- __wa_xfer_mark_seg_as_done
- wa_xfer_get_by_id
- __wa_xfer_abort_cb
- __wa_xfer_abort
- __wa_seg_calculate_isoc_frame_count
- __wa_xfer_setup_sizes
- __wa_setup_isoc_packet_descr
- __wa_xfer_setup_hdr0
- wa_seg_dto_cb
- wa_seg_iso_pack_desc_cb
- wa_seg_tr_cb
- wa_xfer_create_subset_sg
- __wa_populate_dto_urb_isoc
- __wa_populate_dto_urb
- __wa_xfer_setup_segs
- __wa_xfer_setup
- __wa_seg_submit
- __wa_xfer_delayed_run
- wa_xfer_delayed_run
- __wa_xfer_submit
- wa_urb_enqueue_b
- wa_urb_enqueue_run
- wa_process_errored_transfers_run
- wa_urb_enqueue
- wa_urb_dequeue
- wa_xfer_status_to_errno
- wa_complete_remaining_xfer_segs
- __wa_populate_buf_in_urb_isoc
- wa_populate_buf_in_urb
- wa_xfer_result_chew
- wa_process_iso_packet_status
- wa_buf_in_cb
- wa_dti_cb
- wa_dti_start
- wa_handle_notif_xfer
   1 
   2 
   3 
   4 
   5 
   6 
   7 
   8 
   9 
  10 
  11 
  12 
  13 
  14 
  15 
  16 
  17 
  18 
  19 
  20 
  21 
  22 
  23 
  24 
  25 
  26 
  27 
  28 
  29 
  30 
  31 
  32 
  33 
  34 
  35 
  36 
  37 
  38 
  39 
  40 
  41 
  42 
  43 
  44 
  45 
  46 
  47 
  48 
  49 
  50 
  51 
  52 
  53 
  54 
  55 
  56 
  57 
  58 
  59 
  60 
  61 
  62 
  63 
  64 
  65 
  66 
  67 
  68 #include <linux/spinlock.h>
  69 #include <linux/slab.h>
  70 #include <linux/hash.h>
  71 #include <linux/ratelimit.h>
  72 #include <linux/export.h>
  73 #include <linux/scatterlist.h>
  74 
  75 #include "wa-hc.h"
  76 #include "wusbhc.h"
  77 
  78 enum {
  79         
  80         WA_SEGS_MAX = 128,
  81 };
  82 
  83 enum wa_seg_status {
  84         WA_SEG_NOTREADY,
  85         WA_SEG_READY,
  86         WA_SEG_DELAYED,
  87         WA_SEG_SUBMITTED,
  88         WA_SEG_PENDING,
  89         WA_SEG_DTI_PENDING,
  90         WA_SEG_DONE,
  91         WA_SEG_ERROR,
  92         WA_SEG_ABORTED,
  93 };
  94 
  95 static void wa_xfer_delayed_run(struct wa_rpipe *);
  96 static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting);
  97 
  98 
  99 
 100 
 101 
 102 
 103 struct wa_seg {
 104         struct urb tr_urb;              
 105         struct urb *isoc_pack_desc_urb; 
 106         struct urb *dto_urb;            
 107         struct list_head list_node;     
 108         struct wa_xfer *xfer;           
 109         u8 index;                       
 110         int isoc_frame_count;   
 111         int isoc_frame_offset;  
 112         
 113         int isoc_frame_index;
 114         int isoc_size;  
 115         enum wa_seg_status status;
 116         ssize_t result;                 
 117         struct wa_xfer_hdr xfer_hdr;
 118 };
 119 
 120 static inline void wa_seg_init(struct wa_seg *seg)
 121 {
 122         usb_init_urb(&seg->tr_urb);
 123 
 124         
 125         memset(((void *)seg) + sizeof(seg->tr_urb), 0,
 126                 sizeof(*seg) - sizeof(seg->tr_urb));
 127 }
 128 
 129 
 130 
 131 
 132 
 133 struct wa_xfer {
 134         struct kref refcnt;
 135         struct list_head list_node;
 136         spinlock_t lock;
 137         u32 id;
 138 
 139         struct wahc *wa;                
 140         struct usb_host_endpoint *ep;
 141         struct urb *urb;                
 142         struct wa_seg **seg;            
 143         u8 segs, segs_submitted, segs_done;
 144         unsigned is_inbound:1;
 145         unsigned is_dma:1;
 146         size_t seg_size;
 147         int result;
 148 
 149         gfp_t gfp;                      
 150 
 151         struct wusb_dev *wusb_dev;      
 152 };
 153 
 154 static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer,
 155         struct wa_seg *seg, int curr_iso_frame);
 156 static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
 157                 int starting_index, enum wa_seg_status status);
 158 
 159 static inline void wa_xfer_init(struct wa_xfer *xfer)
 160 {
 161         kref_init(&xfer->refcnt);
 162         INIT_LIST_HEAD(&xfer->list_node);
 163         spin_lock_init(&xfer->lock);
 164 }
 165 
 166 
 167 
 168 
 169 
 170 
 171 
 172 static void wa_xfer_destroy(struct kref *_xfer)
 173 {
 174         struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt);
 175         if (xfer->seg) {
 176                 unsigned cnt;
 177                 for (cnt = 0; cnt < xfer->segs; cnt++) {
 178                         struct wa_seg *seg = xfer->seg[cnt];
 179                         if (seg) {
 180                                 usb_free_urb(seg->isoc_pack_desc_urb);
 181                                 if (seg->dto_urb) {
 182                                         kfree(seg->dto_urb->sg);
 183                                         usb_free_urb(seg->dto_urb);
 184                                 }
 185                                 usb_free_urb(&seg->tr_urb);
 186                         }
 187                 }
 188                 kfree(xfer->seg);
 189         }
 190         kfree(xfer);
 191 }
 192 
 193 static void wa_xfer_get(struct wa_xfer *xfer)
 194 {
 195         kref_get(&xfer->refcnt);
 196 }
 197 
 198 static void wa_xfer_put(struct wa_xfer *xfer)
 199 {
 200         kref_put(&xfer->refcnt, wa_xfer_destroy);
 201 }
 202 
 203 
 204 
 205 
 206 
 207 static inline int __wa_dto_try_get(struct wahc *wa)
 208 {
 209         return (test_and_set_bit(0, &wa->dto_in_use) == 0);
 210 }
 211 
 212 
 213 static inline void __wa_dto_put(struct wahc *wa)
 214 {
 215         clear_bit_unlock(0, &wa->dto_in_use);
 216 }
 217 
 218 
 219 static void wa_check_for_delayed_rpipes(struct wahc *wa)
 220 {
 221         unsigned long flags;
 222         int dto_waiting = 0;
 223         struct wa_rpipe *rpipe;
 224 
 225         spin_lock_irqsave(&wa->rpipe_lock, flags);
 226         while (!list_empty(&wa->rpipe_delayed_list) && !dto_waiting) {
 227                 rpipe = list_first_entry(&wa->rpipe_delayed_list,
 228                                 struct wa_rpipe, list_node);
 229                 __wa_xfer_delayed_run(rpipe, &dto_waiting);
 230                 
 231                 if (!dto_waiting) {
 232                         pr_debug("%s: RPIPE %d serviced and removed from delayed list.\n",
 233                                 __func__,
 234                                 le16_to_cpu(rpipe->descr.wRPipeIndex));
 235                         list_del_init(&rpipe->list_node);
 236                 }
 237         }
 238         spin_unlock_irqrestore(&wa->rpipe_lock, flags);
 239 }
 240 
 241 
 242 static void wa_add_delayed_rpipe(struct wahc *wa, struct wa_rpipe *rpipe)
 243 {
 244         unsigned long flags;
 245 
 246         spin_lock_irqsave(&wa->rpipe_lock, flags);
 247         
 248         if (list_empty(&rpipe->list_node)) {
 249                 pr_debug("%s: adding RPIPE %d to the delayed list.\n",
 250                         __func__, le16_to_cpu(rpipe->descr.wRPipeIndex));
 251                 list_add_tail(&rpipe->list_node, &wa->rpipe_delayed_list);
 252         }
 253         spin_unlock_irqrestore(&wa->rpipe_lock, flags);
 254 }
 255 
 256 
 257 
 258 
 259 
 260 
 261 
 262 
 263 
 264 
 265 
 266 static void wa_xfer_giveback(struct wa_xfer *xfer)
 267 {
 268         unsigned long flags;
 269 
 270         spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
 271         list_del_init(&xfer->list_node);
 272         usb_hcd_unlink_urb_from_ep(&(xfer->wa->wusb->usb_hcd), xfer->urb);
 273         spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
 274         
 275         wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
 276         wa_put(xfer->wa);
 277         wa_xfer_put(xfer);
 278 }
 279 
 280 
 281 
 282 
 283 
 284 
 285 static void wa_xfer_completion(struct wa_xfer *xfer)
 286 {
 287         if (xfer->wusb_dev)
 288                 wusb_dev_put(xfer->wusb_dev);
 289         rpipe_put(xfer->ep->hcpriv);
 290         wa_xfer_giveback(xfer);
 291 }
 292 
 293 
 294 
 295 
 296 
 297 
 298 
 299 
 300 
 301 static void wa_xfer_id_init(struct wa_xfer *xfer)
 302 {
 303         xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
 304 }
 305 
 306 
 307 static inline u32 wa_xfer_id(struct wa_xfer *xfer)
 308 {
 309         return xfer->id;
 310 }
 311 
 312 
 313 static inline __le32 wa_xfer_id_le32(struct wa_xfer *xfer)
 314 {
 315         return cpu_to_le32(xfer->id);
 316 }
 317 
 318 
 319 
 320 
 321 
 322 
 323 static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
 324 {
 325         struct device *dev = &xfer->wa->usb_iface->dev;
 326         unsigned result, cnt;
 327         struct wa_seg *seg;
 328         struct urb *urb = xfer->urb;
 329         unsigned found_short = 0;
 330 
 331         result = xfer->segs_done == xfer->segs_submitted;
 332         if (result == 0)
 333                 goto out;
 334         urb->actual_length = 0;
 335         for (cnt = 0; cnt < xfer->segs; cnt++) {
 336                 seg = xfer->seg[cnt];
 337                 switch (seg->status) {
 338                 case WA_SEG_DONE:
 339                         if (found_short && seg->result > 0) {
 340                                 dev_dbg(dev, "xfer %p ID %08X#%u: bad short segments (%zu)\n",
 341                                         xfer, wa_xfer_id(xfer), cnt,
 342                                         seg->result);
 343                                 urb->status = -EINVAL;
 344                                 goto out;
 345                         }
 346                         urb->actual_length += seg->result;
 347                         if (!(usb_pipeisoc(xfer->urb->pipe))
 348                                 && seg->result < xfer->seg_size
 349                             && cnt != xfer->segs-1)
 350                                 found_short = 1;
 351                         dev_dbg(dev, "xfer %p ID %08X#%u: DONE short %d "
 352                                 "result %zu urb->actual_length %d\n",
 353                                 xfer, wa_xfer_id(xfer), seg->index, found_short,
 354                                 seg->result, urb->actual_length);
 355                         break;
 356                 case WA_SEG_ERROR:
 357                         xfer->result = seg->result;
 358                         dev_dbg(dev, "xfer %p ID %08X#%u: ERROR result %zi(0x%08zX)\n",
 359                                 xfer, wa_xfer_id(xfer), seg->index, seg->result,
 360                                 seg->result);
 361                         goto out;
 362                 case WA_SEG_ABORTED:
 363                         xfer->result = seg->result;
 364                         dev_dbg(dev, "xfer %p ID %08X#%u: ABORTED result %zi(0x%08zX)\n",
 365                                 xfer, wa_xfer_id(xfer), seg->index, seg->result,
 366                                 seg->result);
 367                         goto out;
 368                 default:
 369                         dev_warn(dev, "xfer %p ID %08X#%u: is_done bad state %d\n",
 370                                  xfer, wa_xfer_id(xfer), cnt, seg->status);
 371                         xfer->result = -EINVAL;
 372                         goto out;
 373                 }
 374         }
 375         xfer->result = 0;
 376 out:
 377         return result;
 378 }
 379 
 380 
 381 
 382 
 383 
 384 
 385 
 386 
 387 
 388 static unsigned __wa_xfer_mark_seg_as_done(struct wa_xfer *xfer,
 389         struct wa_seg *seg, enum wa_seg_status status)
 390 {
 391         seg->status = status;
 392         xfer->segs_done++;
 393 
 394         
 395         return __wa_xfer_is_done(xfer);
 396 }
 397 
 398 
 399 
 400 
 401 
 402 
 403 
 404 
 405 
 406 static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id)
 407 {
 408         unsigned long flags;
 409         struct wa_xfer *xfer_itr;
 410         spin_lock_irqsave(&wa->xfer_list_lock, flags);
 411         list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) {
 412                 if (id == xfer_itr->id) {
 413                         wa_xfer_get(xfer_itr);
 414                         goto out;
 415                 }
 416         }
 417         xfer_itr = NULL;
 418 out:
 419         spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
 420         return xfer_itr;
 421 }
 422 
 423 struct wa_xfer_abort_buffer {
 424         struct urb urb;
 425         struct wahc *wa;
 426         struct wa_xfer_abort cmd;
 427 };
 428 
 429 static void __wa_xfer_abort_cb(struct urb *urb)
 430 {
 431         struct wa_xfer_abort_buffer *b = urb->context;
 432         struct wahc *wa = b->wa;
 433 
 434         
 435 
 436 
 437 
 438 
 439         if (urb->status < 0) {
 440                 struct wa_xfer *xfer;
 441                 struct device *dev = &wa->usb_iface->dev;
 442 
 443                 xfer = wa_xfer_get_by_id(wa, le32_to_cpu(b->cmd.dwTransferID));
 444                 dev_err(dev, "%s: Transfer Abort request failed. result: %d\n",
 445                         __func__, urb->status);
 446                 if (xfer) {
 447                         unsigned long flags;
 448                         int done, seg_index = 0;
 449                         struct wa_rpipe *rpipe = xfer->ep->hcpriv;
 450 
 451                         dev_err(dev, "%s: cleaning up xfer %p ID 0x%08X.\n",
 452                                 __func__, xfer, wa_xfer_id(xfer));
 453                         spin_lock_irqsave(&xfer->lock, flags);
 454                         
 455                         while (seg_index < xfer->segs) {
 456                                 struct wa_seg *seg = xfer->seg[seg_index];
 457 
 458                                 if ((seg->status == WA_SEG_DONE) ||
 459                                         (seg->status == WA_SEG_ERROR)) {
 460                                         ++seg_index;
 461                                 } else {
 462                                         break;
 463                                 }
 464                         }
 465                         
 466                         wa_complete_remaining_xfer_segs(xfer, seg_index,
 467                                 WA_SEG_ABORTED);
 468                         done = __wa_xfer_is_done(xfer);
 469                         spin_unlock_irqrestore(&xfer->lock, flags);
 470                         if (done)
 471                                 wa_xfer_completion(xfer);
 472                         wa_xfer_delayed_run(rpipe);
 473                         wa_xfer_put(xfer);
 474                 } else {
 475                         dev_err(dev, "%s: xfer ID 0x%08X already gone.\n",
 476                                  __func__, le32_to_cpu(b->cmd.dwTransferID));
 477                 }
 478         }
 479 
 480         wa_put(wa);     
 481         usb_put_urb(&b->urb);
 482 }
 483 
 484 
 485 
 486 
 487 
 488 
 489 
 490 
 491 
 492 
 493 
 494 static int __wa_xfer_abort(struct wa_xfer *xfer)
 495 {
 496         int result = -ENOMEM;
 497         struct device *dev = &xfer->wa->usb_iface->dev;
 498         struct wa_xfer_abort_buffer *b;
 499         struct wa_rpipe *rpipe = xfer->ep->hcpriv;
 500 
 501         b = kmalloc(sizeof(*b), GFP_ATOMIC);
 502         if (b == NULL)
 503                 goto error_kmalloc;
 504         b->cmd.bLength =  sizeof(b->cmd);
 505         b->cmd.bRequestType = WA_XFER_ABORT;
 506         b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
 507         b->cmd.dwTransferID = wa_xfer_id_le32(xfer);
 508         b->wa = wa_get(xfer->wa);
 509 
 510         usb_init_urb(&b->urb);
 511         usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
 512                 usb_sndbulkpipe(xfer->wa->usb_dev,
 513                                 xfer->wa->dto_epd->bEndpointAddress),
 514                 &b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b);
 515         result = usb_submit_urb(&b->urb, GFP_ATOMIC);
 516         if (result < 0)
 517                 goto error_submit;
 518         return result;                          
 519 
 520 
 521 error_submit:
 522         wa_put(xfer->wa);
 523         if (printk_ratelimit())
 524                 dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
 525                         xfer, result);
 526         kfree(b);
 527 error_kmalloc:
 528         return result;
 529 
 530 }
 531 
 532 
 533 
 534 
 535 
 536 static int __wa_seg_calculate_isoc_frame_count(struct wa_xfer *xfer,
 537         int isoc_frame_offset, int *total_size)
 538 {
 539         int segment_size = 0, frame_count = 0;
 540         int index = isoc_frame_offset;
 541         struct usb_iso_packet_descriptor *iso_frame_desc =
 542                 xfer->urb->iso_frame_desc;
 543 
 544         while ((index < xfer->urb->number_of_packets)
 545                 && ((segment_size + iso_frame_desc[index].length)
 546                                 <= xfer->seg_size)) {
 547                 
 548 
 549 
 550 
 551 
 552 
 553 
 554                 if ((xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
 555                         && (xfer->is_inbound == 0)
 556                         && (index > isoc_frame_offset)
 557                         && ((iso_frame_desc[index - 1].offset +
 558                                 iso_frame_desc[index - 1].length) !=
 559                                 iso_frame_desc[index].offset))
 560                         break;
 561 
 562                 
 563                 ++frame_count;
 564                 segment_size += iso_frame_desc[index].length;
 565 
 566                 
 567                 ++index;
 568         }
 569 
 570         *total_size = segment_size;
 571         return frame_count;
 572 }
 573 
 574 
 575 
 576 
 577 
 578 static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
 579                                      enum wa_xfer_type *pxfer_type)
 580 {
 581         ssize_t result;
 582         struct device *dev = &xfer->wa->usb_iface->dev;
 583         size_t maxpktsize;
 584         struct urb *urb = xfer->urb;
 585         struct wa_rpipe *rpipe = xfer->ep->hcpriv;
 586 
 587         switch (rpipe->descr.bmAttribute & 0x3) {
 588         case USB_ENDPOINT_XFER_CONTROL:
 589                 *pxfer_type = WA_XFER_TYPE_CTL;
 590                 result = sizeof(struct wa_xfer_ctl);
 591                 break;
 592         case USB_ENDPOINT_XFER_INT:
 593         case USB_ENDPOINT_XFER_BULK:
 594                 *pxfer_type = WA_XFER_TYPE_BI;
 595                 result = sizeof(struct wa_xfer_bi);
 596                 break;
 597         case USB_ENDPOINT_XFER_ISOC:
 598                 *pxfer_type = WA_XFER_TYPE_ISO;
 599                 result = sizeof(struct wa_xfer_hwaiso);
 600                 break;
 601         default:
 602                 
 603                 BUG();
 604                 result = -EINVAL;       
 605         }
 606         xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
 607         xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
 608 
 609         maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
 610         xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
 611                 * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
 612         
 613 
 614 
 615         if (xfer->seg_size < maxpktsize) {
 616                 dev_err(dev,
 617                         "HW BUG? seg_size %zu smaller than maxpktsize %zu\n",
 618                         xfer->seg_size, maxpktsize);
 619                 result = -EINVAL;
 620                 goto error;
 621         }
 622         xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
 623         if ((rpipe->descr.bmAttribute & 0x3) == USB_ENDPOINT_XFER_ISOC) {
 624                 int index = 0;
 625 
 626                 xfer->segs = 0;
 627                 
 628 
 629 
 630 
 631                 while (index < urb->number_of_packets) {
 632                         int seg_size; 
 633                         index += __wa_seg_calculate_isoc_frame_count(xfer,
 634                                         index, &seg_size);
 635                         ++xfer->segs;
 636                 }
 637         } else {
 638                 xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length,
 639                                                 xfer->seg_size);
 640                 if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
 641                         xfer->segs = 1;
 642         }
 643 
 644         if (xfer->segs > WA_SEGS_MAX) {
 645                 dev_err(dev, "BUG? oops, number of segments %zu bigger than %d\n",
 646                         (urb->transfer_buffer_length/xfer->seg_size),
 647                         WA_SEGS_MAX);
 648                 result = -EINVAL;
 649                 goto error;
 650         }
 651 error:
 652         return result;
 653 }
 654 
 655 static void __wa_setup_isoc_packet_descr(
 656                 struct wa_xfer_packet_info_hwaiso *packet_desc,
 657                 struct wa_xfer *xfer,
 658                 struct wa_seg *seg) {
 659         struct usb_iso_packet_descriptor *iso_frame_desc =
 660                 xfer->urb->iso_frame_desc;
 661         int frame_index;
 662 
 663         
 664         packet_desc->bPacketType = WA_XFER_ISO_PACKET_INFO;
 665         packet_desc->wLength = cpu_to_le16(struct_size(packet_desc,
 666                                            PacketLength,
 667                                            seg->isoc_frame_count));
 668         for (frame_index = 0; frame_index < seg->isoc_frame_count;
 669                 ++frame_index) {
 670                 int offset_index = frame_index + seg->isoc_frame_offset;
 671                 packet_desc->PacketLength[frame_index] =
 672                         cpu_to_le16(iso_frame_desc[offset_index].length);
 673         }
 674 }
 675 
 676 
 677 
 678 static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
 679                                  struct wa_xfer_hdr *xfer_hdr0,
 680                                  enum wa_xfer_type xfer_type,
 681                                  size_t xfer_hdr_size)
 682 {
 683         struct wa_rpipe *rpipe = xfer->ep->hcpriv;
 684         struct wa_seg *seg = xfer->seg[0];
 685 
 686         xfer_hdr0 = &seg->xfer_hdr;
 687         xfer_hdr0->bLength = xfer_hdr_size;
 688         xfer_hdr0->bRequestType = xfer_type;
 689         xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex;
 690         xfer_hdr0->dwTransferID = wa_xfer_id_le32(xfer);
 691         xfer_hdr0->bTransferSegment = 0;
 692         switch (xfer_type) {
 693         case WA_XFER_TYPE_CTL: {
 694                 struct wa_xfer_ctl *xfer_ctl =
 695                         container_of(xfer_hdr0, struct wa_xfer_ctl, hdr);
 696                 xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0;
 697                 memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet,
 698                        sizeof(xfer_ctl->baSetupData));
 699                 break;
 700         }
 701         case WA_XFER_TYPE_BI:
 702                 break;
 703         case WA_XFER_TYPE_ISO: {
 704                 struct wa_xfer_hwaiso *xfer_iso =
 705                         container_of(xfer_hdr0, struct wa_xfer_hwaiso, hdr);
 706                 struct wa_xfer_packet_info_hwaiso *packet_desc =
 707                         ((void *)xfer_iso) + xfer_hdr_size;
 708 
 709                 
 710                 xfer_iso->dwNumOfPackets = cpu_to_le32(seg->isoc_frame_count);
 711                 
 712                 __wa_setup_isoc_packet_descr(packet_desc, xfer, seg);
 713                 break;
 714         }
 715         default:
 716                 BUG();
 717         };
 718 }
 719 
 720 
 721 
 722 
 723 
 724 
 725 
 726 
 727 
 728 
 729 
 730 
 731 
 732 static void wa_seg_dto_cb(struct urb *urb)
 733 {
 734         struct wa_seg *seg = urb->context;
 735         struct wa_xfer *xfer = seg->xfer;
 736         struct wahc *wa;
 737         struct device *dev;
 738         struct wa_rpipe *rpipe;
 739         unsigned long flags;
 740         unsigned rpipe_ready = 0;
 741         int data_send_done = 1, release_dto = 0, holding_dto = 0;
 742         u8 done = 0;
 743         int result;
 744 
 745         
 746         kfree(urb->sg);
 747         urb->sg = NULL;
 748 
 749         spin_lock_irqsave(&xfer->lock, flags);
 750         wa = xfer->wa;
 751         dev = &wa->usb_iface->dev;
 752         if (usb_pipeisoc(xfer->urb->pipe)) {
 753                 
 754                 if (wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
 755                         seg->isoc_frame_index += seg->isoc_frame_count;
 756                 else
 757                         seg->isoc_frame_index += 1;
 758                 if (seg->isoc_frame_index < seg->isoc_frame_count) {
 759                         data_send_done = 0;
 760                         holding_dto = 1; 
 761                         
 762 
 763 
 764 
 765                         if ((seg->isoc_frame_index + 1) >=
 766                                 seg->isoc_frame_count)
 767                                 release_dto = 1;
 768                 }
 769                 dev_dbg(dev, "xfer 0x%08X#%u: isoc frame = %d, holding_dto = %d, release_dto = %d.\n",
 770                         wa_xfer_id(xfer), seg->index, seg->isoc_frame_index,
 771                         holding_dto, release_dto);
 772         }
 773         spin_unlock_irqrestore(&xfer->lock, flags);
 774 
 775         switch (urb->status) {
 776         case 0:
 777                 spin_lock_irqsave(&xfer->lock, flags);
 778                 seg->result += urb->actual_length;
 779                 if (data_send_done) {
 780                         dev_dbg(dev, "xfer 0x%08X#%u: data out done (%zu bytes)\n",
 781                                 wa_xfer_id(xfer), seg->index, seg->result);
 782                         if (seg->status < WA_SEG_PENDING)
 783                                 seg->status = WA_SEG_PENDING;
 784                 } else {
 785                         
 786                         
 787 
 788 
 789 
 790                          __wa_populate_dto_urb_isoc(xfer, seg,
 791                                 seg->isoc_frame_offset + seg->isoc_frame_index);
 792 
 793                         
 794                         
 795                         wa_xfer_get(xfer);
 796                         result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
 797                         if (result < 0) {
 798                                 dev_err(dev, "xfer 0x%08X#%u: DTO submit failed: %d\n",
 799                                        wa_xfer_id(xfer), seg->index, result);
 800                                 spin_unlock_irqrestore(&xfer->lock, flags);
 801                                 goto error_dto_submit;
 802                         }
 803                 }
 804                 spin_unlock_irqrestore(&xfer->lock, flags);
 805                 if (release_dto) {
 806                         __wa_dto_put(wa);
 807                         wa_check_for_delayed_rpipes(wa);
 808                 }
 809                 break;
 810         case -ECONNRESET:       
 811         case -ENOENT:           
 812                 if (holding_dto) {
 813                         __wa_dto_put(wa);
 814                         wa_check_for_delayed_rpipes(wa);
 815                 }
 816                 break;
 817         default:                
 818                 dev_err(dev, "xfer 0x%08X#%u: data out error %d\n",
 819                         wa_xfer_id(xfer), seg->index, urb->status);
 820                 goto error_default;
 821         }
 822 
 823         
 824         wa_xfer_put(xfer);
 825         return;
 826 
 827 error_dto_submit:
 828         
 829         wa_xfer_put(xfer);
 830 error_default:
 831         spin_lock_irqsave(&xfer->lock, flags);
 832         rpipe = xfer->ep->hcpriv;
 833         if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
 834                     EDC_ERROR_TIMEFRAME)){
 835                 dev_err(dev, "DTO: URB max acceptable errors exceeded, resetting device\n");
 836                 wa_reset_all(wa);
 837         }
 838         if (seg->status != WA_SEG_ERROR) {
 839                 seg->result = urb->status;
 840                 __wa_xfer_abort(xfer);
 841                 rpipe_ready = rpipe_avail_inc(rpipe);
 842                 done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR);
 843         }
 844         spin_unlock_irqrestore(&xfer->lock, flags);
 845         if (holding_dto) {
 846                 __wa_dto_put(wa);
 847                 wa_check_for_delayed_rpipes(wa);
 848         }
 849         if (done)
 850                 wa_xfer_completion(xfer);
 851         if (rpipe_ready)
 852                 wa_xfer_delayed_run(rpipe);
 853         
 854         wa_xfer_put(xfer);
 855 }
 856 
 857 
 858 
 859 
 860 
 861 
 862 
 863 
 864 
 865 
 866 
 867 
 868 
 869 static void wa_seg_iso_pack_desc_cb(struct urb *urb)
 870 {
 871         struct wa_seg *seg = urb->context;
 872         struct wa_xfer *xfer = seg->xfer;
 873         struct wahc *wa;
 874         struct device *dev;
 875         struct wa_rpipe *rpipe;
 876         unsigned long flags;
 877         unsigned rpipe_ready = 0;
 878         u8 done = 0;
 879 
 880         switch (urb->status) {
 881         case 0:
 882                 spin_lock_irqsave(&xfer->lock, flags);
 883                 wa = xfer->wa;
 884                 dev = &wa->usb_iface->dev;
 885                 dev_dbg(dev, "iso xfer %08X#%u: packet descriptor done\n",
 886                         wa_xfer_id(xfer), seg->index);
 887                 if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
 888                         seg->status = WA_SEG_PENDING;
 889                 spin_unlock_irqrestore(&xfer->lock, flags);
 890                 break;
 891         case -ECONNRESET:       
 892         case -ENOENT:           
 893                 break;
 894         default:                
 895                 spin_lock_irqsave(&xfer->lock, flags);
 896                 wa = xfer->wa;
 897                 dev = &wa->usb_iface->dev;
 898                 rpipe = xfer->ep->hcpriv;
 899                 pr_err_ratelimited("iso xfer %08X#%u: packet descriptor error %d\n",
 900                                 wa_xfer_id(xfer), seg->index, urb->status);
 901                 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
 902                             EDC_ERROR_TIMEFRAME)){
 903                         dev_err(dev, "iso xfer: URB max acceptable errors exceeded, resetting device\n");
 904                         wa_reset_all(wa);
 905                 }
 906                 if (seg->status != WA_SEG_ERROR) {
 907                         usb_unlink_urb(seg->dto_urb);
 908                         seg->result = urb->status;
 909                         __wa_xfer_abort(xfer);
 910                         rpipe_ready = rpipe_avail_inc(rpipe);
 911                         done = __wa_xfer_mark_seg_as_done(xfer, seg,
 912                                         WA_SEG_ERROR);
 913                 }
 914                 spin_unlock_irqrestore(&xfer->lock, flags);
 915                 if (done)
 916                         wa_xfer_completion(xfer);
 917                 if (rpipe_ready)
 918                         wa_xfer_delayed_run(rpipe);
 919         }
 920         
 921         wa_xfer_put(xfer);
 922 }
 923 
 924 
 925 
 926 
 927 
 928 
 929 
 930 
 931 
 932 
 933 
 934 
 935 
 936 
 937 
 938 
 939 
 940 
 941 
 942 static void wa_seg_tr_cb(struct urb *urb)
 943 {
 944         struct wa_seg *seg = urb->context;
 945         struct wa_xfer *xfer = seg->xfer;
 946         struct wahc *wa;
 947         struct device *dev;
 948         struct wa_rpipe *rpipe;
 949         unsigned long flags;
 950         unsigned rpipe_ready;
 951         u8 done = 0;
 952 
 953         switch (urb->status) {
 954         case 0:
 955                 spin_lock_irqsave(&xfer->lock, flags);
 956                 wa = xfer->wa;
 957                 dev = &wa->usb_iface->dev;
 958                 dev_dbg(dev, "xfer %p ID 0x%08X#%u: request done\n",
 959                         xfer, wa_xfer_id(xfer), seg->index);
 960                 if (xfer->is_inbound &&
 961                         seg->status < WA_SEG_PENDING &&
 962                         !(usb_pipeisoc(xfer->urb->pipe)))
 963                         seg->status = WA_SEG_PENDING;
 964                 spin_unlock_irqrestore(&xfer->lock, flags);
 965                 break;
 966         case -ECONNRESET:       
 967         case -ENOENT:           
 968                 break;
 969         default:                
 970                 spin_lock_irqsave(&xfer->lock, flags);
 971                 wa = xfer->wa;
 972                 dev = &wa->usb_iface->dev;
 973                 rpipe = xfer->ep->hcpriv;
 974                 if (printk_ratelimit())
 975                         dev_err(dev, "xfer %p ID 0x%08X#%u: request error %d\n",
 976                                 xfer, wa_xfer_id(xfer), seg->index,
 977                                 urb->status);
 978                 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
 979                             EDC_ERROR_TIMEFRAME)){
 980                         dev_err(dev, "DTO: URB max acceptable errors "
 981                                 "exceeded, resetting device\n");
 982                         wa_reset_all(wa);
 983                 }
 984                 usb_unlink_urb(seg->isoc_pack_desc_urb);
 985                 usb_unlink_urb(seg->dto_urb);
 986                 seg->result = urb->status;
 987                 __wa_xfer_abort(xfer);
 988                 rpipe_ready = rpipe_avail_inc(rpipe);
 989                 done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR);
 990                 spin_unlock_irqrestore(&xfer->lock, flags);
 991                 if (done)
 992                         wa_xfer_completion(xfer);
 993                 if (rpipe_ready)
 994                         wa_xfer_delayed_run(rpipe);
 995         }
 996         
 997         wa_xfer_put(xfer);
 998 }
 999 
1000 
1001 
1002 
1003 
1004 
1005 static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg,
1006         const unsigned int bytes_transferred,
1007         const unsigned int bytes_to_transfer, int *out_num_sgs)
1008 {
1009         struct scatterlist *out_sg;
1010         unsigned int bytes_processed = 0, offset_into_current_page_data = 0,
1011                 nents;
1012         struct scatterlist *current_xfer_sg = in_sg;
1013         struct scatterlist *current_seg_sg, *last_seg_sg;
1014 
1015         
1016         while ((current_xfer_sg) &&
1017                         (bytes_processed < bytes_transferred)) {
1018                 bytes_processed += current_xfer_sg->length;
1019 
1020                 
1021 
1022                 if (bytes_processed <= bytes_transferred)
1023                         current_xfer_sg = sg_next(current_xfer_sg);
1024         }
1025 
1026         
1027 
1028         if (bytes_processed > bytes_transferred) {
1029                 offset_into_current_page_data = current_xfer_sg->length -
1030                         (bytes_processed - bytes_transferred);
1031         }
1032 
1033         
1034         nents = DIV_ROUND_UP((bytes_to_transfer +
1035                 offset_into_current_page_data +
1036                 current_xfer_sg->offset),
1037                 PAGE_SIZE);
1038 
1039         out_sg = kmalloc((sizeof(struct scatterlist) * nents), GFP_ATOMIC);
1040         if (out_sg) {
1041                 sg_init_table(out_sg, nents);
1042 
1043                 
1044 
1045                 last_seg_sg = current_seg_sg = out_sg;
1046                 bytes_processed = 0;
1047 
1048                 
1049 
1050                 nents = 0;
1051                 while ((bytes_processed < bytes_to_transfer) &&
1052                                 current_seg_sg && current_xfer_sg) {
1053                         unsigned int page_len = min((current_xfer_sg->length -
1054                                 offset_into_current_page_data),
1055                                 (bytes_to_transfer - bytes_processed));
1056 
1057                         sg_set_page(current_seg_sg, sg_page(current_xfer_sg),
1058                                 page_len,
1059                                 current_xfer_sg->offset +
1060                                 offset_into_current_page_data);
1061 
1062                         bytes_processed += page_len;
1063 
1064                         last_seg_sg = current_seg_sg;
1065                         current_seg_sg = sg_next(current_seg_sg);
1066                         current_xfer_sg = sg_next(current_xfer_sg);
1067 
1068                         
1069                         offset_into_current_page_data = 0;
1070                         nents++;
1071                 }
1072 
1073                 
1074 
1075                 sg_mark_end(last_seg_sg);
1076                 *out_num_sgs = nents;
1077         }
1078 
1079         return out_sg;
1080 }
1081 
1082 
1083 
1084 
1085 static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer,
1086         struct wa_seg *seg, int curr_iso_frame)
1087 {
1088         seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
1089         seg->dto_urb->sg = NULL;
1090         seg->dto_urb->num_sgs = 0;
1091         
1092         seg->dto_urb->transfer_dma = xfer->urb->transfer_dma +
1093                 xfer->urb->iso_frame_desc[curr_iso_frame].offset;
1094         
1095         if (xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
1096                 seg->dto_urb->transfer_buffer_length = seg->isoc_size;
1097         else
1098                 seg->dto_urb->transfer_buffer_length =
1099                         xfer->urb->iso_frame_desc[curr_iso_frame].length;
1100 }
1101 
1102 
1103 
1104 
1105 static int __wa_populate_dto_urb(struct wa_xfer *xfer,
1106         struct wa_seg *seg, size_t buf_itr_offset, size_t buf_itr_size)
1107 {
1108         int result = 0;
1109 
1110         if (xfer->is_dma) {
1111                 seg->dto_urb->transfer_dma =
1112                         xfer->urb->transfer_dma + buf_itr_offset;
1113                 seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
1114                 seg->dto_urb->sg = NULL;
1115                 seg->dto_urb->num_sgs = 0;
1116         } else {
1117                 
1118                 seg->dto_urb->transfer_flags &=
1119                         ~URB_NO_TRANSFER_DMA_MAP;
1120                 
1121                 seg->dto_urb->num_mapped_sgs = 0;
1122 
1123                 if (xfer->urb->transfer_buffer) {
1124                         seg->dto_urb->transfer_buffer =
1125                                 xfer->urb->transfer_buffer +
1126                                 buf_itr_offset;
1127                         seg->dto_urb->sg = NULL;
1128                         seg->dto_urb->num_sgs = 0;
1129                 } else {
1130                         seg->dto_urb->transfer_buffer = NULL;
1131 
1132                         
1133 
1134 
1135 
1136 
1137 
1138                         seg->dto_urb->sg = wa_xfer_create_subset_sg(
1139                                 xfer->urb->sg,
1140                                 buf_itr_offset, buf_itr_size,
1141                                 &(seg->dto_urb->num_sgs));
1142                         if (!(seg->dto_urb->sg))
1143                                 result = -ENOMEM;
1144                 }
1145         }
1146         seg->dto_urb->transfer_buffer_length = buf_itr_size;
1147 
1148         return result;
1149 }
1150 
1151 
1152 
1153 
1154 
1155 
1156 
1157 
1158 
1159 static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
1160 {
1161         int result, cnt, isoc_frame_offset = 0;
1162         size_t alloc_size = sizeof(*xfer->seg[0])
1163                 - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
1164         struct usb_device *usb_dev = xfer->wa->usb_dev;
1165         const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
1166         struct wa_seg *seg;
1167         size_t buf_itr, buf_size, buf_itr_size;
1168 
1169         result = -ENOMEM;
1170         xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
1171         if (xfer->seg == NULL)
1172                 goto error_segs_kzalloc;
1173         buf_itr = 0;
1174         buf_size = xfer->urb->transfer_buffer_length;
1175         for (cnt = 0; cnt < xfer->segs; cnt++) {
1176                 size_t iso_pkt_descr_size = 0;
1177                 int seg_isoc_frame_count = 0, seg_isoc_size = 0;
1178 
1179                 
1180 
1181 
1182 
1183                 if (usb_pipeisoc(xfer->urb->pipe)) {
1184                         seg_isoc_frame_count =
1185                                 __wa_seg_calculate_isoc_frame_count(xfer,
1186                                         isoc_frame_offset, &seg_isoc_size);
1187 
1188                         iso_pkt_descr_size =
1189                                 sizeof(struct wa_xfer_packet_info_hwaiso) +
1190                                 (seg_isoc_frame_count * sizeof(__le16));
1191                 }
1192                 result = -ENOMEM;
1193                 seg = xfer->seg[cnt] = kmalloc(alloc_size + iso_pkt_descr_size,
1194                                                 GFP_ATOMIC);
1195                 if (seg == NULL)
1196                         goto error_seg_kmalloc;
1197                 wa_seg_init(seg);
1198                 seg->xfer = xfer;
1199                 seg->index = cnt;
1200                 usb_fill_bulk_urb(&seg->tr_urb, usb_dev,
1201                                   usb_sndbulkpipe(usb_dev,
1202                                                   dto_epd->bEndpointAddress),
1203                                   &seg->xfer_hdr, xfer_hdr_size,
1204                                   wa_seg_tr_cb, seg);
1205                 buf_itr_size = min(buf_size, xfer->seg_size);
1206 
1207                 if (usb_pipeisoc(xfer->urb->pipe)) {
1208                         seg->isoc_frame_count = seg_isoc_frame_count;
1209                         seg->isoc_frame_offset = isoc_frame_offset;
1210                         seg->isoc_size = seg_isoc_size;
1211                         
1212                         seg->isoc_pack_desc_urb =
1213                                         usb_alloc_urb(0, GFP_ATOMIC);
1214                         if (seg->isoc_pack_desc_urb == NULL)
1215                                 goto error_iso_pack_desc_alloc;
1216                         
1217 
1218 
1219 
1220 
1221                         usb_fill_bulk_urb(
1222                                 seg->isoc_pack_desc_urb, usb_dev,
1223                                 usb_sndbulkpipe(usb_dev,
1224                                         dto_epd->bEndpointAddress),
1225                                 (void *)(&seg->xfer_hdr) +
1226                                         xfer_hdr_size,
1227                                 iso_pkt_descr_size,
1228                                 wa_seg_iso_pack_desc_cb, seg);
1229 
1230                         
1231                         isoc_frame_offset += seg_isoc_frame_count;
1232                 }
1233 
1234                 if (xfer->is_inbound == 0 && buf_size > 0) {
1235                         
1236                         seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
1237                         if (seg->dto_urb == NULL)
1238                                 goto error_dto_alloc;
1239                         usb_fill_bulk_urb(
1240                                 seg->dto_urb, usb_dev,
1241                                 usb_sndbulkpipe(usb_dev,
1242                                                 dto_epd->bEndpointAddress),
1243                                 NULL, 0, wa_seg_dto_cb, seg);
1244 
1245                         if (usb_pipeisoc(xfer->urb->pipe)) {
1246                                 
1247 
1248 
1249 
1250 
1251 
1252                                 __wa_populate_dto_urb_isoc(xfer, seg,
1253                                         seg->isoc_frame_offset);
1254                         } else {
1255                                 
1256                                 result = __wa_populate_dto_urb(xfer, seg,
1257                                                         buf_itr, buf_itr_size);
1258                                 if (result < 0)
1259                                         goto error_seg_outbound_populate;
1260 
1261                                 buf_itr += buf_itr_size;
1262                                 buf_size -= buf_itr_size;
1263                         }
1264                 }
1265                 seg->status = WA_SEG_READY;
1266         }
1267         return 0;
1268 
1269         
1270 
1271 
1272 
1273 
1274 error_seg_outbound_populate:
1275         usb_free_urb(xfer->seg[cnt]->dto_urb);
1276 error_dto_alloc:
1277         usb_free_urb(xfer->seg[cnt]->isoc_pack_desc_urb);
1278 error_iso_pack_desc_alloc:
1279         kfree(xfer->seg[cnt]);
1280         xfer->seg[cnt] = NULL;
1281 error_seg_kmalloc:
1282 error_segs_kzalloc:
1283         return result;
1284 }
1285 
1286 
1287 
1288 
1289 
1290 
1291 
1292 
1293 
1294 
1295 
1296 static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
1297 {
1298         int result;
1299         struct device *dev = &xfer->wa->usb_iface->dev;
1300         enum wa_xfer_type xfer_type = 0; 
1301         size_t xfer_hdr_size, cnt, transfer_size;
1302         struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr;
1303 
1304         result = __wa_xfer_setup_sizes(xfer, &xfer_type);
1305         if (result < 0)
1306                 goto error_setup_sizes;
1307         xfer_hdr_size = result;
1308         result = __wa_xfer_setup_segs(xfer, xfer_hdr_size);
1309         if (result < 0) {
1310                 dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n",
1311                         xfer, xfer->segs, result);
1312                 goto error_setup_segs;
1313         }
1314         
1315         xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
1316         wa_xfer_id_init(xfer);
1317         __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
1318 
1319         
1320         xfer_hdr = xfer_hdr0;
1321         if (xfer_type == WA_XFER_TYPE_ISO) {
1322                 xfer_hdr0->dwTransferLength =
1323                         cpu_to_le32(xfer->seg[0]->isoc_size);
1324                 for (cnt = 1; cnt < xfer->segs; cnt++) {
1325                         struct wa_xfer_packet_info_hwaiso *packet_desc;
1326                         struct wa_seg *seg = xfer->seg[cnt];
1327                         struct wa_xfer_hwaiso *xfer_iso;
1328 
1329                         xfer_hdr = &seg->xfer_hdr;
1330                         xfer_iso = container_of(xfer_hdr,
1331                                                 struct wa_xfer_hwaiso, hdr);
1332                         packet_desc = ((void *)xfer_hdr) + xfer_hdr_size;
1333                         
1334 
1335 
1336 
1337                         memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
1338                         xfer_hdr->bTransferSegment = cnt;
1339                         xfer_hdr->dwTransferLength =
1340                                 cpu_to_le32(seg->isoc_size);
1341                         xfer_iso->dwNumOfPackets =
1342                                         cpu_to_le32(seg->isoc_frame_count);
1343                         __wa_setup_isoc_packet_descr(packet_desc, xfer, seg);
1344                         seg->status = WA_SEG_READY;
1345                 }
1346         } else {
1347                 transfer_size = urb->transfer_buffer_length;
1348                 xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
1349                         cpu_to_le32(xfer->seg_size) :
1350                         cpu_to_le32(transfer_size);
1351                 transfer_size -=  xfer->seg_size;
1352                 for (cnt = 1; cnt < xfer->segs; cnt++) {
1353                         xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
1354                         memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
1355                         xfer_hdr->bTransferSegment = cnt;
1356                         xfer_hdr->dwTransferLength =
1357                                 transfer_size > xfer->seg_size ?
1358                                         cpu_to_le32(xfer->seg_size)
1359                                         : cpu_to_le32(transfer_size);
1360                         xfer->seg[cnt]->status = WA_SEG_READY;
1361                         transfer_size -=  xfer->seg_size;
1362                 }
1363         }
1364         xfer_hdr->bTransferSegment |= 0x80;     
1365         result = 0;
1366 error_setup_segs:
1367 error_setup_sizes:
1368         return result;
1369 }
1370 
1371 
1372 
1373 
1374 
1375 
1376 static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
1377                            struct wa_seg *seg, int *dto_done)
1378 {
1379         int result;
1380 
1381         
1382         *dto_done = 1;
1383 
1384         
1385 
1386 
1387 
1388         wa_xfer_get(xfer);
1389         
1390         seg->status = WA_SEG_SUBMITTED;
1391         result = usb_submit_urb(&seg->tr_urb, GFP_ATOMIC);
1392         if (result < 0) {
1393                 pr_err("%s: xfer %p#%u: REQ submit failed: %d\n",
1394                        __func__, xfer, seg->index, result);
1395                 wa_xfer_put(xfer);
1396                 goto error_tr_submit;
1397         }
1398         
1399         if (seg->isoc_pack_desc_urb) {
1400                 wa_xfer_get(xfer);
1401                 result = usb_submit_urb(seg->isoc_pack_desc_urb, GFP_ATOMIC);
1402                 seg->isoc_frame_index = 0;
1403                 if (result < 0) {
1404                         pr_err("%s: xfer %p#%u: ISO packet descriptor submit failed: %d\n",
1405                                __func__, xfer, seg->index, result);
1406                         wa_xfer_put(xfer);
1407                         goto error_iso_pack_desc_submit;
1408                 }
1409         }
1410         
1411         if (seg->dto_urb) {
1412                 struct wahc *wa = xfer->wa;
1413                 wa_xfer_get(xfer);
1414                 result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
1415                 if (result < 0) {
1416                         pr_err("%s: xfer %p#%u: DTO submit failed: %d\n",
1417                                __func__, xfer, seg->index, result);
1418                         wa_xfer_put(xfer);
1419                         goto error_dto_submit;
1420                 }
1421                 
1422 
1423 
1424 
1425 
1426                 if (((wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC) == 0)
1427                         && (seg->isoc_frame_count > 1))
1428                         *dto_done = 0;
1429         }
1430         rpipe_avail_dec(rpipe);
1431         return 0;
1432 
1433 error_dto_submit:
1434         usb_unlink_urb(seg->isoc_pack_desc_urb);
1435 error_iso_pack_desc_submit:
1436         usb_unlink_urb(&seg->tr_urb);
1437 error_tr_submit:
1438         seg->status = WA_SEG_ERROR;
1439         seg->result = result;
1440         *dto_done = 1;
1441         return result;
1442 }
1443 
1444 
1445 
1446 
1447 
1448 
1449 
1450 
1451 static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting)
1452 {
1453         int result, dto_acquired = 0, dto_done = 0;
1454         struct device *dev = &rpipe->wa->usb_iface->dev;
1455         struct wa_seg *seg;
1456         struct wa_xfer *xfer;
1457         unsigned long flags;
1458 
1459         *dto_waiting = 0;
1460 
1461         spin_lock_irqsave(&rpipe->seg_lock, flags);
1462         while (atomic_read(&rpipe->segs_available) > 0
1463               && !list_empty(&rpipe->seg_list)
1464               && (dto_acquired = __wa_dto_try_get(rpipe->wa))) {
1465                 seg = list_first_entry(&(rpipe->seg_list), struct wa_seg,
1466                                  list_node);
1467                 list_del(&seg->list_node);
1468                 xfer = seg->xfer;
1469                 
1470 
1471 
1472 
1473 
1474                 wa_xfer_get(xfer);
1475                 result = __wa_seg_submit(rpipe, xfer, seg, &dto_done);
1476                 
1477                 if (dto_done)
1478                         __wa_dto_put(rpipe->wa);
1479                 dev_dbg(dev, "xfer %p ID %08X#%u submitted from delayed [%d segments available] %d\n",
1480                         xfer, wa_xfer_id(xfer), seg->index,
1481                         atomic_read(&rpipe->segs_available), result);
1482                 if (unlikely(result < 0)) {
1483                         int done;
1484 
1485                         spin_unlock_irqrestore(&rpipe->seg_lock, flags);
1486                         spin_lock_irqsave(&xfer->lock, flags);
1487                         __wa_xfer_abort(xfer);
1488                         
1489 
1490 
1491 
1492                         xfer->segs_done++;
1493                         done = __wa_xfer_is_done(xfer);
1494                         spin_unlock_irqrestore(&xfer->lock, flags);
1495                         if (done)
1496                                 wa_xfer_completion(xfer);
1497                         spin_lock_irqsave(&rpipe->seg_lock, flags);
1498                 }
1499                 wa_xfer_put(xfer);
1500         }
1501         
1502 
1503 
1504 
1505         if (!dto_acquired && !list_empty(&rpipe->seg_list)
1506                 && (atomic_read(&rpipe->segs_available) ==
1507                         le16_to_cpu(rpipe->descr.wRequests)))
1508                 *dto_waiting = 1;
1509 
1510         spin_unlock_irqrestore(&rpipe->seg_lock, flags);
1511 
1512         return dto_done;
1513 }
1514 
1515 static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
1516 {
1517         int dto_waiting;
1518         int dto_done = __wa_xfer_delayed_run(rpipe, &dto_waiting);
1519 
1520         
1521 
1522 
1523 
1524 
1525 
1526 
1527 
1528         if (dto_waiting)
1529                 wa_add_delayed_rpipe(rpipe->wa, rpipe);
1530         else if (dto_done)
1531                 wa_check_for_delayed_rpipes(rpipe->wa);
1532 }
1533 
1534 
1535 
1536 
1537 
1538 
1539 
1540 
1541 static int __wa_xfer_submit(struct wa_xfer *xfer)
1542 {
1543         int result, dto_acquired = 0, dto_done = 0, dto_waiting = 0;
1544         struct wahc *wa = xfer->wa;
1545         struct device *dev = &wa->usb_iface->dev;
1546         unsigned cnt;
1547         struct wa_seg *seg;
1548         unsigned long flags;
1549         struct wa_rpipe *rpipe = xfer->ep->hcpriv;
1550         size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests);
1551         u8 available;
1552         u8 empty;
1553 
1554         spin_lock_irqsave(&wa->xfer_list_lock, flags);
1555         list_add_tail(&xfer->list_node, &wa->xfer_list);
1556         spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
1557 
1558         BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests);
1559         result = 0;
1560         spin_lock_irqsave(&rpipe->seg_lock, flags);
1561         for (cnt = 0; cnt < xfer->segs; cnt++) {
1562                 int delay_seg = 1;
1563 
1564                 available = atomic_read(&rpipe->segs_available);
1565                 empty = list_empty(&rpipe->seg_list);
1566                 seg = xfer->seg[cnt];
1567                 if (available && empty) {
1568                         
1569 
1570 
1571 
1572                         dto_acquired = __wa_dto_try_get(rpipe->wa);
1573                         if (dto_acquired) {
1574                                 delay_seg = 0;
1575                                 result = __wa_seg_submit(rpipe, xfer, seg,
1576                                                         &dto_done);
1577                                 dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u submitted\n",
1578                                         xfer, wa_xfer_id(xfer), cnt, available,
1579                                         empty);
1580                                 if (dto_done)
1581                                         __wa_dto_put(rpipe->wa);
1582 
1583                                 if (result < 0) {
1584                                         __wa_xfer_abort(xfer);
1585                                         goto error_seg_submit;
1586                                 }
1587                         }
1588                 }
1589 
1590                 if (delay_seg) {
1591                         dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u delayed\n",
1592                                 xfer, wa_xfer_id(xfer), cnt, available,  empty);
1593                         seg->status = WA_SEG_DELAYED;
1594                         list_add_tail(&seg->list_node, &rpipe->seg_list);
1595                 }
1596                 xfer->segs_submitted++;
1597         }
1598 error_seg_submit:
1599         
1600 
1601 
1602 
1603         if (!dto_acquired && !list_empty(&rpipe->seg_list)
1604                 && (atomic_read(&rpipe->segs_available) ==
1605                         le16_to_cpu(rpipe->descr.wRequests)))
1606                 dto_waiting = 1;
1607         spin_unlock_irqrestore(&rpipe->seg_lock, flags);
1608 
1609         if (dto_waiting)
1610                 wa_add_delayed_rpipe(rpipe->wa, rpipe);
1611         else if (dto_done)
1612                 wa_check_for_delayed_rpipes(rpipe->wa);
1613 
1614         return result;
1615 }
1616 
1617 
1618 
1619 
1620 
1621 
1622 
1623 
1624 
1625 
1626 
1627 
1628 
1629 
1630 
1631 
1632 
1633 
1634 
1635 
1636 
1637 
1638 
1639 static int wa_urb_enqueue_b(struct wa_xfer *xfer)
1640 {
1641         int result;
1642         unsigned long flags;
1643         struct urb *urb = xfer->urb;
1644         struct wahc *wa = xfer->wa;
1645         struct wusbhc *wusbhc = wa->wusb;
1646         struct wusb_dev *wusb_dev;
1647         unsigned done;
1648 
1649         result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
1650         if (result < 0) {
1651                 pr_err("%s: error_rpipe_get\n", __func__);
1652                 goto error_rpipe_get;
1653         }
1654         result = -ENODEV;
1655         
1656         mutex_lock(&wusbhc->mutex);             
1657         if (urb->dev == NULL) {
1658                 mutex_unlock(&wusbhc->mutex);
1659                 pr_err("%s: error usb dev gone\n", __func__);
1660                 goto error_dev_gone;
1661         }
1662         wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
1663         if (wusb_dev == NULL) {
1664                 mutex_unlock(&wusbhc->mutex);
1665                 dev_err(&(urb->dev->dev), "%s: error wusb dev gone\n",
1666                         __func__);
1667                 goto error_dev_gone;
1668         }
1669         mutex_unlock(&wusbhc->mutex);
1670 
1671         spin_lock_irqsave(&xfer->lock, flags);
1672         xfer->wusb_dev = wusb_dev;
1673         result = urb->status;
1674         if (urb->status != -EINPROGRESS) {
1675                 dev_err(&(urb->dev->dev), "%s: error_dequeued\n", __func__);
1676                 goto error_dequeued;
1677         }
1678 
1679         result = __wa_xfer_setup(xfer, urb);
1680         if (result < 0) {
1681                 dev_err(&(urb->dev->dev), "%s: error_xfer_setup\n", __func__);
1682                 goto error_xfer_setup;
1683         }
1684         
1685 
1686 
1687 
1688 
1689         wa_xfer_get(xfer);
1690         result = __wa_xfer_submit(xfer);
1691         if (result < 0) {
1692                 dev_err(&(urb->dev->dev), "%s: error_xfer_submit\n", __func__);
1693                 goto error_xfer_submit;
1694         }
1695         spin_unlock_irqrestore(&xfer->lock, flags);
1696         wa_xfer_put(xfer);
1697         return 0;
1698 
1699         
1700 
1701 
1702 
1703 
1704 error_xfer_setup:
1705 error_dequeued:
1706         spin_unlock_irqrestore(&xfer->lock, flags);
1707         
1708         if (wusb_dev)
1709                 wusb_dev_put(wusb_dev);
1710 error_dev_gone:
1711         rpipe_put(xfer->ep->hcpriv);
1712 error_rpipe_get:
1713         xfer->result = result;
1714         return result;
1715 
1716 error_xfer_submit:
1717         done = __wa_xfer_is_done(xfer);
1718         xfer->result = result;
1719         spin_unlock_irqrestore(&xfer->lock, flags);
1720         if (done)
1721                 wa_xfer_completion(xfer);
1722         wa_xfer_put(xfer);
1723         
1724         return 0;
1725 }
1726 
1727 
1728 
1729 
1730 
1731 
1732 
1733 
1734 
1735 
1736 
1737 void wa_urb_enqueue_run(struct work_struct *ws)
1738 {
1739         struct wahc *wa = container_of(ws, struct wahc, xfer_enqueue_work);
1740         struct wa_xfer *xfer, *next;
1741         struct urb *urb;
1742         LIST_HEAD(tmp_list);
1743 
1744         
1745         spin_lock_irq(&wa->xfer_list_lock);
1746         list_cut_position(&tmp_list, &wa->xfer_delayed_list,
1747                         wa->xfer_delayed_list.prev);
1748         spin_unlock_irq(&wa->xfer_list_lock);
1749 
1750         
1751 
1752 
1753 
1754         list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
1755                 list_del_init(&xfer->list_node);
1756 
1757                 urb = xfer->urb;
1758                 if (wa_urb_enqueue_b(xfer) < 0)
1759                         wa_xfer_giveback(xfer);
1760                 usb_put_urb(urb);       
1761         }
1762 }
1763 EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
1764 
1765 
1766 
1767 
1768 void wa_process_errored_transfers_run(struct work_struct *ws)
1769 {
1770         struct wahc *wa = container_of(ws, struct wahc, xfer_error_work);
1771         struct wa_xfer *xfer, *next;
1772         LIST_HEAD(tmp_list);
1773 
1774         pr_info("%s: Run delayed STALL processing.\n", __func__);
1775 
1776         
1777         spin_lock_irq(&wa->xfer_list_lock);
1778         list_cut_position(&tmp_list, &wa->xfer_errored_list,
1779                         wa->xfer_errored_list.prev);
1780         spin_unlock_irq(&wa->xfer_list_lock);
1781 
1782         
1783 
1784 
1785 
1786         list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
1787                 struct usb_host_endpoint *ep;
1788                 unsigned long flags;
1789                 struct wa_rpipe *rpipe;
1790 
1791                 spin_lock_irqsave(&xfer->lock, flags);
1792                 ep = xfer->ep;
1793                 rpipe = ep->hcpriv;
1794                 spin_unlock_irqrestore(&xfer->lock, flags);
1795 
1796                 
1797                 rpipe_clear_feature_stalled(wa, ep);
1798 
1799                 
1800                 wa_xfer_completion(xfer);
1801 
1802                 
1803                 wa_xfer_delayed_run(rpipe);
1804         }
1805 }
1806 EXPORT_SYMBOL_GPL(wa_process_errored_transfers_run);
1807 
1808 
1809 
1810 
1811 
1812 
1813 
1814 
1815 
1816 
1817 
1818 
1819 
1820 int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
1821                    struct urb *urb, gfp_t gfp)
1822 {
1823         int result;
1824         struct device *dev = &wa->usb_iface->dev;
1825         struct wa_xfer *xfer;
1826         unsigned long my_flags;
1827         unsigned cant_sleep = irqs_disabled() | in_atomic();
1828 
1829         if ((urb->transfer_buffer == NULL)
1830             && (urb->sg == NULL)
1831             && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
1832             && urb->transfer_buffer_length != 0) {
1833                 dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
1834                 dump_stack();
1835         }
1836 
1837         spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1838         result = usb_hcd_link_urb_to_ep(&(wa->wusb->usb_hcd), urb);
1839         spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1840         if (result < 0)
1841                 goto error_link_urb;
1842 
1843         result = -ENOMEM;
1844         xfer = kzalloc(sizeof(*xfer), gfp);
1845         if (xfer == NULL)
1846                 goto error_kmalloc;
1847 
1848         result = -ENOENT;
1849         if (urb->status != -EINPROGRESS)        
1850                 goto error_dequeued;            
1851         wa_xfer_init(xfer);
1852         xfer->wa = wa_get(wa);
1853         xfer->urb = urb;
1854         xfer->gfp = gfp;
1855         xfer->ep = ep;
1856         urb->hcpriv = xfer;
1857 
1858         dev_dbg(dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
1859                 xfer, urb, urb->pipe, urb->transfer_buffer_length,
1860                 urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma",
1861                 urb->pipe & USB_DIR_IN ? "inbound" : "outbound",
1862                 cant_sleep ? "deferred" : "inline");
1863 
1864         if (cant_sleep) {
1865                 usb_get_urb(urb);
1866                 spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1867                 list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
1868                 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1869                 queue_work(wusbd, &wa->xfer_enqueue_work);
1870         } else {
1871                 result = wa_urb_enqueue_b(xfer);
1872                 if (result < 0) {
1873                         
1874 
1875 
1876 
1877 
1878                         dev_err(dev, "%s: URB enqueue failed: %d\n",
1879                            __func__, result);
1880                         wa_put(xfer->wa);
1881                         wa_xfer_put(xfer);
1882                         spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1883                         usb_hcd_unlink_urb_from_ep(&(wa->wusb->usb_hcd), urb);
1884                         spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1885                         return result;
1886                 }
1887         }
1888         return 0;
1889 
1890 error_dequeued:
1891         kfree(xfer);
1892 error_kmalloc:
1893         spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1894         usb_hcd_unlink_urb_from_ep(&(wa->wusb->usb_hcd), urb);
1895         spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1896 error_link_urb:
1897         return result;
1898 }
1899 EXPORT_SYMBOL_GPL(wa_urb_enqueue);
1900 
1901 
1902 
1903 
1904 
1905 
1906 
1907 
1908 
1909 
1910 
1911 
1912 
1913 
1914 
1915 
1916 
1917 
1918 
1919 int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status)
1920 {
1921         unsigned long flags;
1922         struct wa_xfer *xfer;
1923         struct wa_seg *seg;
1924         struct wa_rpipe *rpipe;
1925         unsigned cnt, done = 0, xfer_abort_pending;
1926         unsigned rpipe_ready = 0;
1927         int result;
1928 
1929         
1930         spin_lock_irqsave(&wa->xfer_list_lock, flags);
1931         result = usb_hcd_check_unlink_urb(&(wa->wusb->usb_hcd), urb, status);
1932         if ((result == 0) && urb->hcpriv) {
1933                 
1934 
1935 
1936 
1937                 wa_xfer_get(urb->hcpriv);
1938         }
1939         spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
1940         if (result)
1941                 return result;
1942 
1943         xfer = urb->hcpriv;
1944         if (xfer == NULL)
1945                 return -ENOENT;
1946         spin_lock_irqsave(&xfer->lock, flags);
1947         pr_debug("%s: DEQUEUE xfer id 0x%08X\n", __func__, wa_xfer_id(xfer));
1948         rpipe = xfer->ep->hcpriv;
1949         if (rpipe == NULL) {
1950                 pr_debug("%s: xfer %p id 0x%08X has no RPIPE.  %s",
1951                         __func__, xfer, wa_xfer_id(xfer),
1952                         "Probably already aborted.\n" );
1953                 result = -ENOENT;
1954                 goto out_unlock;
1955         }
1956         
1957 
1958 
1959 
1960         if (__wa_xfer_is_done(xfer)) {
1961                 pr_debug("%s: xfer %p id 0x%08X already done.\n", __func__,
1962                         xfer, wa_xfer_id(xfer));
1963                 result = -ENOENT;
1964                 goto out_unlock;
1965         }
1966         
1967         spin_lock(&wa->xfer_list_lock);
1968         if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
1969                 goto dequeue_delayed;
1970         spin_unlock(&wa->xfer_list_lock);
1971         if (xfer->seg == NULL)          
1972                 goto out_unlock;        
1973         
1974         xfer_abort_pending = __wa_xfer_abort(xfer) >= 0;
1975         
1976 
1977 
1978 
1979         spin_lock(&rpipe->seg_lock);
1980         for (cnt = 0; cnt < xfer->segs; cnt++) {
1981                 seg = xfer->seg[cnt];
1982                 pr_debug("%s: xfer id 0x%08X#%d status = %d\n",
1983                         __func__, wa_xfer_id(xfer), cnt, seg->status);
1984                 switch (seg->status) {
1985                 case WA_SEG_NOTREADY:
1986                 case WA_SEG_READY:
1987                         printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n",
1988                                xfer, cnt, seg->status);
1989                         WARN_ON(1);
1990                         break;
1991                 case WA_SEG_DELAYED:
1992                         
1993 
1994 
1995 
1996 
1997 
1998                         seg->status = WA_SEG_ABORTED;
1999                         seg->result = -ENOENT;
2000                         list_del(&seg->list_node);
2001                         xfer->segs_done++;
2002                         break;
2003                 case WA_SEG_DONE:
2004                 case WA_SEG_ERROR:
2005                 case WA_SEG_ABORTED:
2006                         break;
2007                         
2008 
2009 
2010 
2011 
2012 
2013 
2014 
2015                 case WA_SEG_DTI_PENDING:
2016                         break;
2017                         
2018 
2019 
2020 
2021 
2022 
2023 
2024                 case WA_SEG_SUBMITTED:
2025                 case WA_SEG_PENDING:
2026                         
2027 
2028 
2029 
2030 
2031                         if (!xfer_abort_pending) {
2032                                 seg->status = WA_SEG_ABORTED;
2033                                 rpipe_ready = rpipe_avail_inc(rpipe);
2034                                 xfer->segs_done++;
2035                         }
2036                         break;
2037                 }
2038         }
2039         spin_unlock(&rpipe->seg_lock);
2040         xfer->result = urb->status;     
2041         done = __wa_xfer_is_done(xfer);
2042         spin_unlock_irqrestore(&xfer->lock, flags);
2043         if (done)
2044                 wa_xfer_completion(xfer);
2045         if (rpipe_ready)
2046                 wa_xfer_delayed_run(rpipe);
2047         wa_xfer_put(xfer);
2048         return result;
2049 
2050 out_unlock:
2051         spin_unlock_irqrestore(&xfer->lock, flags);
2052         wa_xfer_put(xfer);
2053         return result;
2054 
2055 dequeue_delayed:
2056         list_del_init(&xfer->list_node);
2057         spin_unlock(&wa->xfer_list_lock);
2058         xfer->result = urb->status;
2059         spin_unlock_irqrestore(&xfer->lock, flags);
2060         wa_xfer_giveback(xfer);
2061         wa_xfer_put(xfer);
2062         usb_put_urb(urb);               
2063         return 0;
2064 }
2065 EXPORT_SYMBOL_GPL(wa_urb_dequeue);
2066 
2067 
2068 
2069 
2070 
2071 
2072 
2073 
2074 
2075 
2076 
2077 static int wa_xfer_status_to_errno(u8 status)
2078 {
2079         int errno;
2080         u8 real_status = status;
2081         static int xlat[] = {
2082                 [WA_XFER_STATUS_SUCCESS] =              0,
2083                 [WA_XFER_STATUS_HALTED] =               -EPIPE,
2084                 [WA_XFER_STATUS_DATA_BUFFER_ERROR] =    -ENOBUFS,
2085                 [WA_XFER_STATUS_BABBLE] =               -EOVERFLOW,
2086                 [WA_XFER_RESERVED] =                    EINVAL,
2087                 [WA_XFER_STATUS_NOT_FOUND] =            0,
2088                 [WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,
2089                 [WA_XFER_STATUS_TRANSACTION_ERROR] =    -EILSEQ,
2090                 [WA_XFER_STATUS_ABORTED] =              -ENOENT,
2091                 [WA_XFER_STATUS_RPIPE_NOT_READY] =      EINVAL,
2092                 [WA_XFER_INVALID_FORMAT] =              EINVAL,
2093                 [WA_XFER_UNEXPECTED_SEGMENT_NUMBER] =   EINVAL,
2094                 [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] =  EINVAL,
2095         };
2096         status &= 0x3f;
2097 
2098         if (status == 0)
2099                 return 0;
2100         if (status >= ARRAY_SIZE(xlat)) {
2101                 printk_ratelimited(KERN_ERR "%s(): BUG? "
2102                                "Unknown WA transfer status 0x%02x\n",
2103                                __func__, real_status);
2104                 return -EINVAL;
2105         }
2106         errno = xlat[status];
2107         if (unlikely(errno > 0)) {
2108                 printk_ratelimited(KERN_ERR "%s(): BUG? "
2109                                "Inconsistent WA status: 0x%02x\n",
2110                                __func__, real_status);
2111                 errno = -errno;
2112         }
2113         return errno;
2114 }
2115 
2116 
2117 
2118 
2119 
2120 
2121 
2122 
2123 
2124 
2125 static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
2126                 int starting_index, enum wa_seg_status status)
2127 {
2128         int index;
2129         struct wa_rpipe *rpipe = xfer->ep->hcpriv;
2130 
2131         for (index = starting_index; index < xfer->segs_submitted; index++) {
2132                 struct wa_seg *current_seg = xfer->seg[index];
2133 
2134                 BUG_ON(current_seg == NULL);
2135 
2136                 switch (current_seg->status) {
2137                 case WA_SEG_SUBMITTED:
2138                 case WA_SEG_PENDING:
2139                 case WA_SEG_DTI_PENDING:
2140                         rpipe_avail_inc(rpipe);
2141                 
2142 
2143 
2144 
2145                 
2146                 case WA_SEG_DELAYED:
2147                         xfer->segs_done++;
2148                         current_seg->status = status;
2149                         break;
2150                 case WA_SEG_ABORTED:
2151                         break;
2152                 default:
2153                         WARN(1, "%s: xfer 0x%08X#%d. bad seg status = %d\n",
2154                                 __func__, wa_xfer_id(xfer), index,
2155                                 current_seg->status);
2156                         break;
2157                 }
2158         }
2159 }
2160 
2161 
2162 static int __wa_populate_buf_in_urb_isoc(struct wahc *wa,
2163         struct urb *buf_in_urb, struct wa_xfer *xfer, struct wa_seg *seg)
2164 {
2165         int urb_start_frame = seg->isoc_frame_index + seg->isoc_frame_offset;
2166         int seg_index, total_len = 0, urb_frame_index = urb_start_frame;
2167         struct usb_iso_packet_descriptor *iso_frame_desc =
2168                                                 xfer->urb->iso_frame_desc;
2169         const int dti_packet_size = usb_endpoint_maxp(wa->dti_epd);
2170         int next_frame_contiguous;
2171         struct usb_iso_packet_descriptor *iso_frame;
2172 
2173         BUG_ON(buf_in_urb->status == -EINPROGRESS);
2174 
2175         
2176 
2177 
2178 
2179 
2180 
2181         seg_index = seg->isoc_frame_index;
2182         do {
2183                 next_frame_contiguous = 0;
2184 
2185                 iso_frame = &iso_frame_desc[urb_frame_index];
2186                 total_len += iso_frame->actual_length;
2187                 ++urb_frame_index;
2188                 ++seg_index;
2189 
2190                 if (seg_index < seg->isoc_frame_count) {
2191                         struct usb_iso_packet_descriptor *next_iso_frame;
2192 
2193                         next_iso_frame = &iso_frame_desc[urb_frame_index];
2194 
2195                         if ((iso_frame->offset + iso_frame->actual_length) ==
2196                                 next_iso_frame->offset)
2197                                 next_frame_contiguous = 1;
2198                 }
2199         } while (next_frame_contiguous
2200                         && ((iso_frame->actual_length % dti_packet_size) == 0));
2201 
2202         
2203         buf_in_urb->num_mapped_sgs      = 0;
2204         buf_in_urb->transfer_dma = xfer->urb->transfer_dma +
2205                 iso_frame_desc[urb_start_frame].offset;
2206         buf_in_urb->transfer_buffer_length = total_len;
2207         buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
2208         buf_in_urb->transfer_buffer = NULL;
2209         buf_in_urb->sg = NULL;
2210         buf_in_urb->num_sgs = 0;
2211         buf_in_urb->context = seg;
2212 
2213         
2214         return seg_index - seg->isoc_frame_index;
2215 }
2216 
2217 
2218 static int wa_populate_buf_in_urb(struct urb *buf_in_urb, struct wa_xfer *xfer,
2219         unsigned int seg_idx, unsigned int bytes_transferred)
2220 {
2221         int result = 0;
2222         struct wa_seg *seg = xfer->seg[seg_idx];
2223 
2224         BUG_ON(buf_in_urb->status == -EINPROGRESS);
2225         
2226         buf_in_urb->num_mapped_sgs      = 0;
2227 
2228         if (xfer->is_dma) {
2229                 buf_in_urb->transfer_dma = xfer->urb->transfer_dma
2230                         + (seg_idx * xfer->seg_size);
2231                 buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
2232                 buf_in_urb->transfer_buffer = NULL;
2233                 buf_in_urb->sg = NULL;
2234                 buf_in_urb->num_sgs = 0;
2235         } else {
2236                 
2237                 buf_in_urb->transfer_flags &= ~URB_NO_TRANSFER_DMA_MAP;
2238 
2239                 if (xfer->urb->transfer_buffer) {
2240                         buf_in_urb->transfer_buffer =
2241                                 xfer->urb->transfer_buffer
2242                                 + (seg_idx * xfer->seg_size);
2243                         buf_in_urb->sg = NULL;
2244                         buf_in_urb->num_sgs = 0;
2245                 } else {
2246                         
2247 
2248 
2249 
2250                         buf_in_urb->sg = wa_xfer_create_subset_sg(
2251                                 xfer->urb->sg,
2252                                 seg_idx * xfer->seg_size,
2253                                 bytes_transferred,
2254                                 &(buf_in_urb->num_sgs));
2255 
2256                         if (!(buf_in_urb->sg)) {
2257                                 buf_in_urb->num_sgs     = 0;
2258                                 result = -ENOMEM;
2259                         }
2260                         buf_in_urb->transfer_buffer = NULL;
2261                 }
2262         }
2263         buf_in_urb->transfer_buffer_length = bytes_transferred;
2264         buf_in_urb->context = seg;
2265 
2266         return result;
2267 }
2268 
2269 
2270 
2271 
2272 
2273 
2274 
2275 
2276 static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer,
2277                 struct wa_xfer_result *xfer_result)
2278 {
2279         int result;
2280         struct device *dev = &wa->usb_iface->dev;
2281         unsigned long flags;
2282         unsigned int seg_idx;
2283         struct wa_seg *seg;
2284         struct wa_rpipe *rpipe;
2285         unsigned done = 0;
2286         u8 usb_status;
2287         unsigned rpipe_ready = 0;
2288         unsigned bytes_transferred = le32_to_cpu(xfer_result->dwTransferLength);
2289         struct urb *buf_in_urb = &(wa->buf_in_urbs[0]);
2290 
2291         spin_lock_irqsave(&xfer->lock, flags);
2292         seg_idx = xfer_result->bTransferSegment & 0x7f;
2293         if (unlikely(seg_idx >= xfer->segs))
2294                 goto error_bad_seg;
2295         seg = xfer->seg[seg_idx];
2296         rpipe = xfer->ep->hcpriv;
2297         usb_status = xfer_result->bTransferStatus;
2298         dev_dbg(dev, "xfer %p ID 0x%08X#%u: bTransferStatus 0x%02x (seg status %u)\n",
2299                 xfer, wa_xfer_id(xfer), seg_idx, usb_status, seg->status);
2300         if (seg->status == WA_SEG_ABORTED
2301             || seg->status == WA_SEG_ERROR)     
2302                 goto segment_aborted;
2303         if (seg->status == WA_SEG_SUBMITTED)    
2304                 seg->status = WA_SEG_PENDING;   
2305         if (seg->status != WA_SEG_PENDING) {
2306                 if (printk_ratelimit())
2307                         dev_err(dev, "xfer %p#%u: Bad segment state %u\n",
2308                                 xfer, seg_idx, seg->status);
2309                 seg->status = WA_SEG_PENDING;   
2310         }
2311         if (usb_status & 0x80) {
2312                 seg->result = wa_xfer_status_to_errno(usb_status);
2313                 dev_err(dev, "DTI: xfer %p 0x%08X:#%u failed (0x%02x)\n",
2314                         xfer, xfer->id, seg->index, usb_status);
2315                 seg->status = ((usb_status & 0x7F) == WA_XFER_STATUS_ABORTED) ?
2316                         WA_SEG_ABORTED : WA_SEG_ERROR;
2317                 goto error_complete;
2318         }
2319         
2320         if (usb_status & 0x40)          
2321                 usb_status = 0;         
2322         
2323 
2324 
2325 
2326 
2327         if (xfer_result->bTransferSegment & 0x80)
2328                 wa_complete_remaining_xfer_segs(xfer, seg->index + 1,
2329                         WA_SEG_DONE);
2330         if (usb_pipeisoc(xfer->urb->pipe)
2331                 && (le32_to_cpu(xfer_result->dwNumOfPackets) > 0)) {
2332                 
2333                 wa->dti_isoc_xfer_in_progress = wa_xfer_id(xfer);
2334                 wa->dti_isoc_xfer_seg = seg_idx;
2335                 wa->dti_state = WA_DTI_ISOC_PACKET_STATUS_PENDING;
2336         } else if (xfer->is_inbound && !usb_pipeisoc(xfer->urb->pipe)
2337                         && (bytes_transferred > 0)) {
2338                 
2339                 seg->status = WA_SEG_DTI_PENDING;
2340                 result = wa_populate_buf_in_urb(buf_in_urb, xfer, seg_idx,
2341                         bytes_transferred);
2342                 if (result < 0)
2343                         goto error_buf_in_populate;
2344                 ++(wa->active_buf_in_urbs);
2345                 result = usb_submit_urb(buf_in_urb, GFP_ATOMIC);
2346                 if (result < 0) {
2347                         --(wa->active_buf_in_urbs);
2348                         goto error_submit_buf_in;
2349                 }
2350         } else {
2351                 
2352                 seg->result = bytes_transferred;
2353                 rpipe_ready = rpipe_avail_inc(rpipe);
2354                 done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE);
2355         }
2356         spin_unlock_irqrestore(&xfer->lock, flags);
2357         if (done)
2358                 wa_xfer_completion(xfer);
2359         if (rpipe_ready)
2360                 wa_xfer_delayed_run(rpipe);
2361         return;
2362 
2363 error_submit_buf_in:
2364         if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
2365                 dev_err(dev, "DTI: URB max acceptable errors "
2366                         "exceeded, resetting device\n");
2367                 wa_reset_all(wa);
2368         }
2369         if (printk_ratelimit())
2370                 dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
2371                         xfer, seg_idx, result);
2372         seg->result = result;
2373         kfree(buf_in_urb->sg);
2374         buf_in_urb->sg = NULL;
2375 error_buf_in_populate:
2376         __wa_xfer_abort(xfer);
2377         seg->status = WA_SEG_ERROR;
2378 error_complete:
2379         xfer->segs_done++;
2380         rpipe_ready = rpipe_avail_inc(rpipe);
2381         wa_complete_remaining_xfer_segs(xfer, seg->index + 1, seg->status);
2382         done = __wa_xfer_is_done(xfer);
2383         
2384 
2385 
2386 
2387         if (((usb_status & 0x3f) == WA_XFER_STATUS_HALTED) &&
2388                 usb_endpoint_xfer_control(&xfer->ep->desc) &&
2389                 done) {
2390 
2391                 dev_info(dev, "Control EP stall.  Queue delayed work.\n");
2392                 spin_lock(&wa->xfer_list_lock);
2393                 
2394                 list_move_tail(&xfer->list_node, &wa->xfer_errored_list);
2395                 spin_unlock(&wa->xfer_list_lock);
2396                 spin_unlock_irqrestore(&xfer->lock, flags);
2397                 queue_work(wusbd, &wa->xfer_error_work);
2398         } else {
2399                 spin_unlock_irqrestore(&xfer->lock, flags);
2400                 if (done)
2401                         wa_xfer_completion(xfer);
2402                 if (rpipe_ready)
2403                         wa_xfer_delayed_run(rpipe);
2404         }
2405 
2406         return;
2407 
2408 error_bad_seg:
2409         spin_unlock_irqrestore(&xfer->lock, flags);
2410         wa_urb_dequeue(wa, xfer->urb, -ENOENT);
2411         if (printk_ratelimit())
2412                 dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
2413         if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
2414                 dev_err(dev, "DTI: URB max acceptable errors "
2415                         "exceeded, resetting device\n");
2416                 wa_reset_all(wa);
2417         }
2418         return;
2419 
2420 segment_aborted:
2421         
2422         spin_unlock_irqrestore(&xfer->lock, flags);
2423 }
2424 
2425 
2426 
2427 
2428 
2429 
2430 static int wa_process_iso_packet_status(struct wahc *wa, struct urb *urb)
2431 {
2432         struct device *dev = &wa->usb_iface->dev;
2433         struct wa_xfer_packet_status_hwaiso *packet_status;
2434         struct wa_xfer_packet_status_len_hwaiso *status_array;
2435         struct wa_xfer *xfer;
2436         unsigned long flags;
2437         struct wa_seg *seg;
2438         struct wa_rpipe *rpipe;
2439         unsigned done = 0, dti_busy = 0, data_frame_count = 0, seg_index;
2440         unsigned first_frame_index = 0, rpipe_ready = 0;
2441         size_t expected_size;
2442 
2443         
2444         dev_dbg(dev, "DTI: isoc packet status %d bytes at %p\n",
2445                 urb->actual_length, urb->transfer_buffer);
2446         packet_status = (struct wa_xfer_packet_status_hwaiso *)(wa->dti_buf);
2447         if (packet_status->bPacketType != WA_XFER_ISO_PACKET_STATUS) {
2448                 dev_err(dev, "DTI Error: isoc packet status--bad type 0x%02x\n",
2449                         packet_status->bPacketType);
2450                 goto error_parse_buffer;
2451         }
2452         xfer = wa_xfer_get_by_id(wa, wa->dti_isoc_xfer_in_progress);
2453         if (xfer == NULL) {
2454                 dev_err(dev, "DTI Error: isoc packet status--unknown xfer 0x%08x\n",
2455                         wa->dti_isoc_xfer_in_progress);
2456                 goto error_parse_buffer;
2457         }
2458         spin_lock_irqsave(&xfer->lock, flags);
2459         if (unlikely(wa->dti_isoc_xfer_seg >= xfer->segs))
2460                 goto error_bad_seg;
2461         seg = xfer->seg[wa->dti_isoc_xfer_seg];
2462         rpipe = xfer->ep->hcpriv;
2463         expected_size = struct_size(packet_status, PacketStatus,
2464                                     seg->isoc_frame_count);
2465         if (urb->actual_length != expected_size) {
2466                 dev_err(dev, "DTI Error: isoc packet status--bad urb length (%d bytes vs %zu needed)\n",
2467                         urb->actual_length, expected_size);
2468                 goto error_bad_seg;
2469         }
2470         if (le16_to_cpu(packet_status->wLength) != expected_size) {
2471                 dev_err(dev, "DTI Error: isoc packet status--bad length %u\n",
2472                         le16_to_cpu(packet_status->wLength));
2473                 goto error_bad_seg;
2474         }
2475         
2476         status_array = packet_status->PacketStatus;
2477         xfer->urb->start_frame =
2478                 wa->wusb->usb_hcd.driver->get_frame_number(&wa->wusb->usb_hcd);
2479         for (seg_index = 0; seg_index < seg->isoc_frame_count; ++seg_index) {
2480                 struct usb_iso_packet_descriptor *iso_frame_desc =
2481                         xfer->urb->iso_frame_desc;
2482                 const int xfer_frame_index =
2483                         seg->isoc_frame_offset + seg_index;
2484 
2485                 iso_frame_desc[xfer_frame_index].status =
2486                         wa_xfer_status_to_errno(
2487                         le16_to_cpu(status_array[seg_index].PacketStatus));
2488                 iso_frame_desc[xfer_frame_index].actual_length =
2489                         le16_to_cpu(status_array[seg_index].PacketLength);
2490                 
2491                 if (iso_frame_desc[xfer_frame_index].actual_length > 0) {
2492                         
2493                         if (!data_frame_count)
2494                                 first_frame_index = seg_index;
2495                         ++data_frame_count;
2496                 }
2497         }
2498 
2499         if (xfer->is_inbound && data_frame_count) {
2500                 int result, total_frames_read = 0, urb_index = 0;
2501                 struct urb *buf_in_urb;
2502 
2503                 
2504                 seg->status = WA_SEG_DTI_PENDING;
2505 
2506                 
2507                 seg->isoc_frame_index = first_frame_index;
2508                 
2509                 do {
2510                         int urb_frame_index, urb_frame_count;
2511                         struct usb_iso_packet_descriptor *iso_frame_desc;
2512 
2513                         buf_in_urb = &(wa->buf_in_urbs[urb_index]);
2514                         urb_frame_count = __wa_populate_buf_in_urb_isoc(wa,
2515                                 buf_in_urb, xfer, seg);
2516                         
2517                         seg->isoc_frame_index += urb_frame_count;
2518                         total_frames_read += urb_frame_count;
2519 
2520                         ++(wa->active_buf_in_urbs);
2521                         result = usb_submit_urb(buf_in_urb, GFP_ATOMIC);
2522 
2523                         
2524                         urb_frame_index =
2525                                 seg->isoc_frame_offset + seg->isoc_frame_index;
2526                         iso_frame_desc =
2527                                 &(xfer->urb->iso_frame_desc[urb_frame_index]);
2528                         while ((seg->isoc_frame_index <
2529                                                 seg->isoc_frame_count) &&
2530                                  (iso_frame_desc->actual_length == 0)) {
2531                                 ++(seg->isoc_frame_index);
2532                                 ++iso_frame_desc;
2533                         }
2534                         ++urb_index;
2535 
2536                 } while ((result == 0) && (urb_index < WA_MAX_BUF_IN_URBS)
2537                                 && (seg->isoc_frame_index <
2538                                                 seg->isoc_frame_count));
2539 
2540                 if (result < 0) {
2541                         --(wa->active_buf_in_urbs);
2542                         dev_err(dev, "DTI Error: Could not submit buf in URB (%d)",
2543                                 result);
2544                         wa_reset_all(wa);
2545                 } else if (data_frame_count > total_frames_read)
2546                         
2547                         dti_busy = 1;
2548         } else {
2549                 
2550                 rpipe_ready = rpipe_avail_inc(rpipe);
2551                 done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE);
2552         }
2553         spin_unlock_irqrestore(&xfer->lock, flags);
2554         if (dti_busy)
2555                 wa->dti_state = WA_DTI_BUF_IN_DATA_PENDING;
2556         else
2557                 wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
2558         if (done)
2559                 wa_xfer_completion(xfer);
2560         if (rpipe_ready)
2561                 wa_xfer_delayed_run(rpipe);
2562         wa_xfer_put(xfer);
2563         return dti_busy;
2564 
2565 error_bad_seg:
2566         spin_unlock_irqrestore(&xfer->lock, flags);
2567         wa_xfer_put(xfer);
2568 error_parse_buffer:
2569         return dti_busy;
2570 }
2571 
2572 
2573 
2574 
2575 
2576 
2577 
2578 
2579 
2580 
2581 
2582 static void wa_buf_in_cb(struct urb *urb)
2583 {
2584         struct wa_seg *seg = urb->context;
2585         struct wa_xfer *xfer = seg->xfer;
2586         struct wahc *wa;
2587         struct device *dev;
2588         struct wa_rpipe *rpipe;
2589         unsigned rpipe_ready = 0, isoc_data_frame_count = 0;
2590         unsigned long flags;
2591         int resubmit_dti = 0, active_buf_in_urbs;
2592         u8 done = 0;
2593 
2594         
2595         kfree(urb->sg);
2596         urb->sg = NULL;
2597 
2598         spin_lock_irqsave(&xfer->lock, flags);
2599         wa = xfer->wa;
2600         dev = &wa->usb_iface->dev;
2601         --(wa->active_buf_in_urbs);
2602         active_buf_in_urbs = wa->active_buf_in_urbs;
2603         rpipe = xfer->ep->hcpriv;
2604 
2605         if (usb_pipeisoc(xfer->urb->pipe)) {
2606                 struct usb_iso_packet_descriptor *iso_frame_desc =
2607                         xfer->urb->iso_frame_desc;
2608                 int     seg_index;
2609 
2610                 
2611 
2612 
2613 
2614                 seg_index = seg->isoc_frame_index;
2615                 while (seg_index < seg->isoc_frame_count) {
2616                         const int urb_frame_index =
2617                                 seg->isoc_frame_offset + seg_index;
2618 
2619                         if (iso_frame_desc[urb_frame_index].actual_length > 0) {
2620                                 
2621                                 if (!isoc_data_frame_count)
2622                                         seg->isoc_frame_index = seg_index;
2623                                 ++isoc_data_frame_count;
2624                         }
2625                         ++seg_index;
2626                 }
2627         }
2628         spin_unlock_irqrestore(&xfer->lock, flags);
2629 
2630         switch (urb->status) {
2631         case 0:
2632                 spin_lock_irqsave(&xfer->lock, flags);
2633 
2634                 seg->result += urb->actual_length;
2635                 if (isoc_data_frame_count > 0) {
2636                         int result, urb_frame_count;
2637 
2638                         
2639                         urb_frame_count = __wa_populate_buf_in_urb_isoc(wa, urb,
2640                                  xfer, seg);
2641                         
2642                         seg->isoc_frame_index += urb_frame_count;
2643                         ++(wa->active_buf_in_urbs);
2644                         result = usb_submit_urb(urb, GFP_ATOMIC);
2645                         if (result < 0) {
2646                                 --(wa->active_buf_in_urbs);
2647                                 dev_err(dev, "DTI Error: Could not submit buf in URB (%d)",
2648                                         result);
2649                                 wa_reset_all(wa);
2650                         }
2651                         
2652 
2653 
2654 
2655 
2656 
2657 
2658                           resubmit_dti = (isoc_data_frame_count ==
2659                                                         urb_frame_count);
2660                 } else if (active_buf_in_urbs == 0) {
2661                         dev_dbg(dev,
2662                                 "xfer %p 0x%08X#%u: data in done (%zu bytes)\n",
2663                                 xfer, wa_xfer_id(xfer), seg->index,
2664                                 seg->result);
2665                         rpipe_ready = rpipe_avail_inc(rpipe);
2666                         done = __wa_xfer_mark_seg_as_done(xfer, seg,
2667                                         WA_SEG_DONE);
2668                 }
2669                 spin_unlock_irqrestore(&xfer->lock, flags);
2670                 if (done)
2671                         wa_xfer_completion(xfer);
2672                 if (rpipe_ready)
2673                         wa_xfer_delayed_run(rpipe);
2674                 break;
2675         case -ECONNRESET:       
2676         case -ENOENT:           
2677                 break;
2678         default:                
2679                 
2680 
2681 
2682 
2683 
2684                 resubmit_dti = wa->dti_state != WA_DTI_TRANSFER_RESULT_PENDING;
2685                 spin_lock_irqsave(&xfer->lock, flags);
2686                 if (printk_ratelimit())
2687                         dev_err(dev, "xfer %p 0x%08X#%u: data in error %d\n",
2688                                 xfer, wa_xfer_id(xfer), seg->index,
2689                                 urb->status);
2690                 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
2691                             EDC_ERROR_TIMEFRAME)){
2692                         dev_err(dev, "DTO: URB max acceptable errors "
2693                                 "exceeded, resetting device\n");
2694                         wa_reset_all(wa);
2695                 }
2696                 seg->result = urb->status;
2697                 rpipe_ready = rpipe_avail_inc(rpipe);
2698                 if (active_buf_in_urbs == 0)
2699                         done = __wa_xfer_mark_seg_as_done(xfer, seg,
2700                                 WA_SEG_ERROR);
2701                 else
2702                         __wa_xfer_abort(xfer);
2703                 spin_unlock_irqrestore(&xfer->lock, flags);
2704                 if (done)
2705                         wa_xfer_completion(xfer);
2706                 if (rpipe_ready)
2707                         wa_xfer_delayed_run(rpipe);
2708         }
2709 
2710         if (resubmit_dti) {
2711                 int result;
2712 
2713                 wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
2714 
2715                 result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
2716                 if (result < 0) {
2717                         dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n",
2718                                 result);
2719                         wa_reset_all(wa);
2720                 }
2721         }
2722 }
2723 
2724 
2725 
2726 
2727 
2728 
2729 
2730 
2731 
2732 
2733 
2734 
2735 
2736 
2737 
2738 
2739 
2740 
2741 
2742 
2743 
2744 
2745 
2746 
2747 
2748 
2749 
2750 static void wa_dti_cb(struct urb *urb)
2751 {
2752         int result, dti_busy = 0;
2753         struct wahc *wa = urb->context;
2754         struct device *dev = &wa->usb_iface->dev;
2755         u32 xfer_id;
2756         u8 usb_status;
2757 
2758         BUG_ON(wa->dti_urb != urb);
2759         switch (wa->dti_urb->status) {
2760         case 0:
2761                 if (wa->dti_state == WA_DTI_TRANSFER_RESULT_PENDING) {
2762                         struct wa_xfer_result *xfer_result;
2763                         struct wa_xfer *xfer;
2764 
2765                         
2766                         dev_dbg(dev, "DTI: xfer result %d bytes at %p\n",
2767                                 urb->actual_length, urb->transfer_buffer);
2768                         if (urb->actual_length != sizeof(*xfer_result)) {
2769                                 dev_err(dev, "DTI Error: xfer result--bad size xfer result (%d bytes vs %zu needed)\n",
2770                                         urb->actual_length,
2771                                         sizeof(*xfer_result));
2772                                 break;
2773                         }
2774                         xfer_result = (struct wa_xfer_result *)(wa->dti_buf);
2775                         if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
2776                                 dev_err(dev, "DTI Error: xfer result--bad header length %u\n",
2777                                         xfer_result->hdr.bLength);
2778                                 break;
2779                         }
2780                         if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
2781                                 dev_err(dev, "DTI Error: xfer result--bad header type 0x%02x\n",
2782                                         xfer_result->hdr.bNotifyType);
2783                                 break;
2784                         }
2785                         xfer_id = le32_to_cpu(xfer_result->dwTransferID);
2786                         usb_status = xfer_result->bTransferStatus & 0x3f;
2787                         if (usb_status == WA_XFER_STATUS_NOT_FOUND) {
2788                                 
2789                                 dev_dbg(dev, "%s: xfer 0x%08X#%u not found.\n",
2790                                         __func__, xfer_id,
2791                                         xfer_result->bTransferSegment & 0x7f);
2792                                 break;
2793                         }
2794                         xfer = wa_xfer_get_by_id(wa, xfer_id);
2795                         if (xfer == NULL) {
2796                                 
2797                                 dev_err(dev, "DTI Error: xfer result--unknown xfer 0x%08x (status 0x%02x)\n",
2798                                         xfer_id, usb_status);
2799                                 break;
2800                         }
2801                         wa_xfer_result_chew(wa, xfer, xfer_result);
2802                         wa_xfer_put(xfer);
2803                 } else if (wa->dti_state == WA_DTI_ISOC_PACKET_STATUS_PENDING) {
2804                         dti_busy = wa_process_iso_packet_status(wa, urb);
2805                 } else {
2806                         dev_err(dev, "DTI Error: unexpected EP state = %d\n",
2807                                 wa->dti_state);
2808                 }
2809                 break;
2810         case -ENOENT:           
2811         case -ESHUTDOWN:        
2812                 dev_dbg(dev, "DTI: going down! %d\n", urb->status);
2813                 goto out;
2814         default:
2815                 
2816                 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS,
2817                             EDC_ERROR_TIMEFRAME)) {
2818                         dev_err(dev, "DTI: URB max acceptable errors "
2819                                 "exceeded, resetting device\n");
2820                         wa_reset_all(wa);
2821                         goto out;
2822                 }
2823                 if (printk_ratelimit())
2824                         dev_err(dev, "DTI: URB error %d\n", urb->status);
2825                 break;
2826         }
2827 
2828         
2829         if (!dti_busy) {
2830                 result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
2831                 if (result < 0) {
2832                         dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n",
2833                                 result);
2834                         wa_reset_all(wa);
2835                 }
2836         }
2837 out:
2838         return;
2839 }
2840 
2841 
2842 
2843 
2844 
2845 int wa_dti_start(struct wahc *wa)
2846 {
2847         const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
2848         struct device *dev = &wa->usb_iface->dev;
2849         int result = -ENOMEM, index;
2850 
2851         if (wa->dti_urb != NULL)        
2852                 goto out;
2853 
2854         wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
2855         if (wa->dti_urb == NULL)
2856                 goto error_dti_urb_alloc;
2857         usb_fill_bulk_urb(
2858                 wa->dti_urb, wa->usb_dev,
2859                 usb_rcvbulkpipe(wa->usb_dev, 0x80 | dti_epd->bEndpointAddress),
2860                 wa->dti_buf, wa->dti_buf_size,
2861                 wa_dti_cb, wa);
2862 
2863         
2864         for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index) {
2865                 usb_fill_bulk_urb(
2866                         &(wa->buf_in_urbs[index]), wa->usb_dev,
2867                         usb_rcvbulkpipe(wa->usb_dev,
2868                                 0x80 | dti_epd->bEndpointAddress),
2869                         NULL, 0, wa_buf_in_cb, wa);
2870         }
2871         result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
2872         if (result < 0) {
2873                 dev_err(dev, "DTI Error: Could not submit DTI URB (%d) resetting\n",
2874                         result);
2875                 goto error_dti_urb_submit;
2876         }
2877 out:
2878         return 0;
2879 
2880 error_dti_urb_submit:
2881         usb_put_urb(wa->dti_urb);
2882         wa->dti_urb = NULL;
2883 error_dti_urb_alloc:
2884         return result;
2885 }
2886 EXPORT_SYMBOL_GPL(wa_dti_start);
2887 
2888 
2889 
2890 
2891 
2892 
2893 
2894 
2895 
2896 
2897 
2898 
2899 
2900 
2901 
2902 
2903 void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
2904 {
2905         struct device *dev = &wa->usb_iface->dev;
2906         struct wa_notif_xfer *notif_xfer;
2907         const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
2908 
2909         notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr);
2910         BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER);
2911 
2912         if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) {
2913                 
2914                 dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n",
2915                         notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
2916                 goto error;
2917         }
2918 
2919         
2920         if (wa_dti_start(wa) < 0)
2921                 goto error;
2922 
2923         return;
2924 
2925 error:
2926         wa_reset_all(wa);
2927 }