1 /*
2  * WUSB Wire Adapter
3  * Data transfer and URB enqueing
4  *
5  * Copyright (C) 2005-2006 Intel Corporation
6  * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License version
10  * 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20  * 02110-1301, USA.
21  *
22  *
23  * How transfers work: get a buffer, break it up in segments (segment
24  * size is a multiple of the maxpacket size). For each segment issue a
25  * segment request (struct wa_xfer_*), then send the data buffer if
26  * out or nothing if in (all over the DTO endpoint).
27  *
28  * For each submitted segment request, a notification will come over
29  * the NEP endpoint and a transfer result (struct xfer_result) will
30  * arrive in the DTI URB. Read it, get the xfer ID, see if there is
31  * data coming (inbound transfer), schedule a read and handle it.
32  *
33  * Sounds simple, it is a pain to implement.
34  *
35  *
36  * ENTRY POINTS
37  *
38  *   FIXME
39  *
40  * LIFE CYCLE / STATE DIAGRAM
41  *
42  *   FIXME
43  *
44  * THIS CODE IS DISGUSTING
45  *
46  *   Warned you are; it's my second try and still not happy with it.
47  *
48  * NOTES:
49  *
50  *   - No iso
51  *
52  *   - Supports DMA xfers, control, bulk and maybe interrupt
53  *
54  *   - Does not recycle unused rpipes
55  *
56  *     An rpipe is assigned to an endpoint the first time it is used,
57  *     and then it's there, assigned, until the endpoint is disabled
58  *     (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the
59  *     rpipe to the endpoint is done under the wa->rpipe_sem semaphore
60  *     (should be a mutex).
61  *
62  *     Two methods it could be done:
63  *
64  *     (a) set up a timer every time an rpipe's use count drops to 1
65  *         (which means unused) or when a transfer ends. Reset the
66  *         timer when a xfer is queued. If the timer expires, release
67  *         the rpipe [see rpipe_ep_disable()].
68  *
69  *     (b) when looking for free rpipes to attach [rpipe_get_by_ep()],
70  *         when none are found go over the list, check their endpoint
71  *         and their activity record (if no last-xfer-done-ts in the
72  *         last x seconds) take it
73  *
74  *     However, due to the fact that we have a set of limited
75  *     resources (max-segments-at-the-same-time per xfer,
76  *     xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
77  *     we are going to have to rebuild all this based on an scheduler,
78  *     to where we have a list of transactions to do and based on the
79  *     availability of the different required components (blocks,
80  *     rpipes, segment slots, etc), we go scheduling them. Painful.
81  */
82 #include <linux/spinlock.h>
83 #include <linux/slab.h>
84 #include <linux/hash.h>
85 #include <linux/ratelimit.h>
86 #include <linux/export.h>
87 #include <linux/scatterlist.h>
88 
89 #include "wa-hc.h"
90 #include "wusbhc.h"
91 
92 enum {
93 	/* [WUSB] section 8.3.3 allocates 7 bits for the segment index. */
94 	WA_SEGS_MAX = 128,
95 };
96 
97 enum wa_seg_status {
98 	WA_SEG_NOTREADY,
99 	WA_SEG_READY,
100 	WA_SEG_DELAYED,
101 	WA_SEG_SUBMITTED,
102 	WA_SEG_PENDING,
103 	WA_SEG_DTI_PENDING,
104 	WA_SEG_DONE,
105 	WA_SEG_ERROR,
106 	WA_SEG_ABORTED,
107 };
108 
109 static void wa_xfer_delayed_run(struct wa_rpipe *);
110 static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting);
111 
112 /*
113  * Life cycle governed by 'struct urb' (the refcount of the struct is
114  * that of the 'struct urb' and usb_free_urb() would free the whole
115  * struct).
116  */
117 struct wa_seg {
118 	struct urb tr_urb;		/* transfer request urb. */
119 	struct urb *isoc_pack_desc_urb;	/* for isoc packet descriptor. */
120 	struct urb *dto_urb;		/* for data output. */
121 	struct list_head list_node;	/* for rpipe->req_list */
122 	struct wa_xfer *xfer;		/* out xfer */
123 	u8 index;			/* which segment we are */
124 	int isoc_frame_count;	/* number of isoc frames in this segment. */
125 	int isoc_frame_offset;	/* starting frame offset in the xfer URB. */
126 	/* Isoc frame that the current transfer buffer corresponds to. */
127 	int isoc_frame_index;
128 	int isoc_size;	/* size of all isoc frames sent by this seg. */
129 	enum wa_seg_status status;
130 	ssize_t result;			/* bytes xfered or error */
131 	struct wa_xfer_hdr xfer_hdr;
132 };
133 
wa_seg_init(struct wa_seg * seg)134 static inline void wa_seg_init(struct wa_seg *seg)
135 {
136 	usb_init_urb(&seg->tr_urb);
137 
138 	/* set the remaining memory to 0. */
139 	memset(((void *)seg) + sizeof(seg->tr_urb), 0,
140 		sizeof(*seg) - sizeof(seg->tr_urb));
141 }
142 
143 /*
144  * Protected by xfer->lock
145  *
146  */
147 struct wa_xfer {
148 	struct kref refcnt;
149 	struct list_head list_node;
150 	spinlock_t lock;
151 	u32 id;
152 
153 	struct wahc *wa;		/* Wire adapter we are plugged to */
154 	struct usb_host_endpoint *ep;
155 	struct urb *urb;		/* URB we are transferring for */
156 	struct wa_seg **seg;		/* transfer segments */
157 	u8 segs, segs_submitted, segs_done;
158 	unsigned is_inbound:1;
159 	unsigned is_dma:1;
160 	size_t seg_size;
161 	int result;
162 
163 	gfp_t gfp;			/* allocation mask */
164 
165 	struct wusb_dev *wusb_dev;	/* for activity timestamps */
166 };
167 
168 static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer,
169 	struct wa_seg *seg, int curr_iso_frame);
170 static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
171 		int starting_index, enum wa_seg_status status);
172 
wa_xfer_init(struct wa_xfer * xfer)173 static inline void wa_xfer_init(struct wa_xfer *xfer)
174 {
175 	kref_init(&xfer->refcnt);
176 	INIT_LIST_HEAD(&xfer->list_node);
177 	spin_lock_init(&xfer->lock);
178 }
179 
180 /*
181  * Destroy a transfer structure
182  *
183  * Note that freeing xfer->seg[cnt]->tr_urb will free the containing
184  * xfer->seg[cnt] memory that was allocated by __wa_xfer_setup_segs.
185  */
wa_xfer_destroy(struct kref * _xfer)186 static void wa_xfer_destroy(struct kref *_xfer)
187 {
188 	struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt);
189 	if (xfer->seg) {
190 		unsigned cnt;
191 		for (cnt = 0; cnt < xfer->segs; cnt++) {
192 			struct wa_seg *seg = xfer->seg[cnt];
193 			if (seg) {
194 				usb_free_urb(seg->isoc_pack_desc_urb);
195 				if (seg->dto_urb) {
196 					kfree(seg->dto_urb->sg);
197 					usb_free_urb(seg->dto_urb);
198 				}
199 				usb_free_urb(&seg->tr_urb);
200 			}
201 		}
202 		kfree(xfer->seg);
203 	}
204 	kfree(xfer);
205 }
206 
wa_xfer_get(struct wa_xfer * xfer)207 static void wa_xfer_get(struct wa_xfer *xfer)
208 {
209 	kref_get(&xfer->refcnt);
210 }
211 
wa_xfer_put(struct wa_xfer * xfer)212 static void wa_xfer_put(struct wa_xfer *xfer)
213 {
214 	kref_put(&xfer->refcnt, wa_xfer_destroy);
215 }
216 
217 /*
218  * Try to get exclusive access to the DTO endpoint resource.  Return true
219  * if successful.
220  */
__wa_dto_try_get(struct wahc * wa)221 static inline int __wa_dto_try_get(struct wahc *wa)
222 {
223 	return (test_and_set_bit(0, &wa->dto_in_use) == 0);
224 }
225 
226 /* Release the DTO endpoint resource. */
__wa_dto_put(struct wahc * wa)227 static inline void __wa_dto_put(struct wahc *wa)
228 {
229 	clear_bit_unlock(0, &wa->dto_in_use);
230 }
231 
232 /* Service RPIPEs that are waiting on the DTO resource. */
wa_check_for_delayed_rpipes(struct wahc * wa)233 static void wa_check_for_delayed_rpipes(struct wahc *wa)
234 {
235 	unsigned long flags;
236 	int dto_waiting = 0;
237 	struct wa_rpipe *rpipe;
238 
239 	spin_lock_irqsave(&wa->rpipe_lock, flags);
240 	while (!list_empty(&wa->rpipe_delayed_list) && !dto_waiting) {
241 		rpipe = list_first_entry(&wa->rpipe_delayed_list,
242 				struct wa_rpipe, list_node);
243 		__wa_xfer_delayed_run(rpipe, &dto_waiting);
244 		/* remove this RPIPE from the list if it is not waiting. */
245 		if (!dto_waiting) {
246 			pr_debug("%s: RPIPE %d serviced and removed from delayed list.\n",
247 				__func__,
248 				le16_to_cpu(rpipe->descr.wRPipeIndex));
249 			list_del_init(&rpipe->list_node);
250 		}
251 	}
252 	spin_unlock_irqrestore(&wa->rpipe_lock, flags);
253 }
254 
255 /* add this RPIPE to the end of the delayed RPIPE list. */
wa_add_delayed_rpipe(struct wahc * wa,struct wa_rpipe * rpipe)256 static void wa_add_delayed_rpipe(struct wahc *wa, struct wa_rpipe *rpipe)
257 {
258 	unsigned long flags;
259 
260 	spin_lock_irqsave(&wa->rpipe_lock, flags);
261 	/* add rpipe to the list if it is not already on it. */
262 	if (list_empty(&rpipe->list_node)) {
263 		pr_debug("%s: adding RPIPE %d to the delayed list.\n",
264 			__func__, le16_to_cpu(rpipe->descr.wRPipeIndex));
265 		list_add_tail(&rpipe->list_node, &wa->rpipe_delayed_list);
266 	}
267 	spin_unlock_irqrestore(&wa->rpipe_lock, flags);
268 }
269 
270 /*
271  * xfer is referenced
272  *
273  * xfer->lock has to be unlocked
274  *
275  * We take xfer->lock for setting the result; this is a barrier
276  * against drivers/usb/core/hcd.c:unlink1() being called after we call
277  * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a
278  * reference to the transfer.
279  */
wa_xfer_giveback(struct wa_xfer * xfer)280 static void wa_xfer_giveback(struct wa_xfer *xfer)
281 {
282 	unsigned long flags;
283 
284 	spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
285 	list_del_init(&xfer->list_node);
286 	usb_hcd_unlink_urb_from_ep(&(xfer->wa->wusb->usb_hcd), xfer->urb);
287 	spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
288 	/* FIXME: segmentation broken -- kills DWA */
289 	wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
290 	wa_put(xfer->wa);
291 	wa_xfer_put(xfer);
292 }
293 
294 /*
295  * xfer is referenced
296  *
297  * xfer->lock has to be unlocked
298  */
wa_xfer_completion(struct wa_xfer * xfer)299 static void wa_xfer_completion(struct wa_xfer *xfer)
300 {
301 	if (xfer->wusb_dev)
302 		wusb_dev_put(xfer->wusb_dev);
303 	rpipe_put(xfer->ep->hcpriv);
304 	wa_xfer_giveback(xfer);
305 }
306 
307 /*
308  * Initialize a transfer's ID
309  *
310  * We need to use a sequential number; if we use the pointer or the
311  * hash of the pointer, it can repeat over sequential transfers and
312  * then it will confuse the HWA....wonder why in hell they put a 32
313  * bit handle in there then.
314  */
wa_xfer_id_init(struct wa_xfer * xfer)315 static void wa_xfer_id_init(struct wa_xfer *xfer)
316 {
317 	xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
318 }
319 
320 /* Return the xfer's ID. */
wa_xfer_id(struct wa_xfer * xfer)321 static inline u32 wa_xfer_id(struct wa_xfer *xfer)
322 {
323 	return xfer->id;
324 }
325 
326 /* Return the xfer's ID in transport format (little endian). */
wa_xfer_id_le32(struct wa_xfer * xfer)327 static inline __le32 wa_xfer_id_le32(struct wa_xfer *xfer)
328 {
329 	return cpu_to_le32(xfer->id);
330 }
331 
332 /*
333  * If transfer is done, wrap it up and return true
334  *
335  * xfer->lock has to be locked
336  */
__wa_xfer_is_done(struct wa_xfer * xfer)337 static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
338 {
339 	struct device *dev = &xfer->wa->usb_iface->dev;
340 	unsigned result, cnt;
341 	struct wa_seg *seg;
342 	struct urb *urb = xfer->urb;
343 	unsigned found_short = 0;
344 
345 	result = xfer->segs_done == xfer->segs_submitted;
346 	if (result == 0)
347 		goto out;
348 	urb->actual_length = 0;
349 	for (cnt = 0; cnt < xfer->segs; cnt++) {
350 		seg = xfer->seg[cnt];
351 		switch (seg->status) {
352 		case WA_SEG_DONE:
353 			if (found_short && seg->result > 0) {
354 				dev_dbg(dev, "xfer %p ID %08X#%u: bad short segments (%zu)\n",
355 					xfer, wa_xfer_id(xfer), cnt,
356 					seg->result);
357 				urb->status = -EINVAL;
358 				goto out;
359 			}
360 			urb->actual_length += seg->result;
361 			if (!(usb_pipeisoc(xfer->urb->pipe))
362 				&& seg->result < xfer->seg_size
363 			    && cnt != xfer->segs-1)
364 				found_short = 1;
365 			dev_dbg(dev, "xfer %p ID %08X#%u: DONE short %d "
366 				"result %zu urb->actual_length %d\n",
367 				xfer, wa_xfer_id(xfer), seg->index, found_short,
368 				seg->result, urb->actual_length);
369 			break;
370 		case WA_SEG_ERROR:
371 			xfer->result = seg->result;
372 			dev_dbg(dev, "xfer %p ID %08X#%u: ERROR result %zi(0x%08zX)\n",
373 				xfer, wa_xfer_id(xfer), seg->index, seg->result,
374 				seg->result);
375 			goto out;
376 		case WA_SEG_ABORTED:
377 			xfer->result = seg->result;
378 			dev_dbg(dev, "xfer %p ID %08X#%u: ABORTED result %zi(0x%08zX)\n",
379 				xfer, wa_xfer_id(xfer), seg->index, seg->result,
380 				seg->result);
381 			goto out;
382 		default:
383 			dev_warn(dev, "xfer %p ID %08X#%u: is_done bad state %d\n",
384 				 xfer, wa_xfer_id(xfer), cnt, seg->status);
385 			xfer->result = -EINVAL;
386 			goto out;
387 		}
388 	}
389 	xfer->result = 0;
390 out:
391 	return result;
392 }
393 
394 /*
395  * Mark the given segment as done.  Return true if this completes the xfer.
396  * This should only be called for segs that have been submitted to an RPIPE.
397  * Delayed segs are not marked as submitted so they do not need to be marked
398  * as done when cleaning up.
399  *
400  * xfer->lock has to be locked
401  */
__wa_xfer_mark_seg_as_done(struct wa_xfer * xfer,struct wa_seg * seg,enum wa_seg_status status)402 static unsigned __wa_xfer_mark_seg_as_done(struct wa_xfer *xfer,
403 	struct wa_seg *seg, enum wa_seg_status status)
404 {
405 	seg->status = status;
406 	xfer->segs_done++;
407 
408 	/* check for done. */
409 	return __wa_xfer_is_done(xfer);
410 }
411 
412 /*
413  * Search for a transfer list ID on the HCD's URB list
414  *
415  * For 32 bit architectures, we use the pointer itself; for 64 bits, a
416  * 32-bit hash of the pointer.
417  *
418  * @returns NULL if not found.
419  */
wa_xfer_get_by_id(struct wahc * wa,u32 id)420 static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id)
421 {
422 	unsigned long flags;
423 	struct wa_xfer *xfer_itr;
424 	spin_lock_irqsave(&wa->xfer_list_lock, flags);
425 	list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) {
426 		if (id == xfer_itr->id) {
427 			wa_xfer_get(xfer_itr);
428 			goto out;
429 		}
430 	}
431 	xfer_itr = NULL;
432 out:
433 	spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
434 	return xfer_itr;
435 }
436 
437 struct wa_xfer_abort_buffer {
438 	struct urb urb;
439 	struct wahc *wa;
440 	struct wa_xfer_abort cmd;
441 };
442 
__wa_xfer_abort_cb(struct urb * urb)443 static void __wa_xfer_abort_cb(struct urb *urb)
444 {
445 	struct wa_xfer_abort_buffer *b = urb->context;
446 	struct wahc *wa = b->wa;
447 
448 	/*
449 	 * If the abort request URB failed, then the HWA did not get the abort
450 	 * command.  Forcibly clean up the xfer without waiting for a Transfer
451 	 * Result from the HWA.
452 	 */
453 	if (urb->status < 0) {
454 		struct wa_xfer *xfer;
455 		struct device *dev = &wa->usb_iface->dev;
456 
457 		xfer = wa_xfer_get_by_id(wa, le32_to_cpu(b->cmd.dwTransferID));
458 		dev_err(dev, "%s: Transfer Abort request failed. result: %d\n",
459 			__func__, urb->status);
460 		if (xfer) {
461 			unsigned long flags;
462 			int done, seg_index = 0;
463 			struct wa_rpipe *rpipe = xfer->ep->hcpriv;
464 
465 			dev_err(dev, "%s: cleaning up xfer %p ID 0x%08X.\n",
466 				__func__, xfer, wa_xfer_id(xfer));
467 			spin_lock_irqsave(&xfer->lock, flags);
468 			/* skip done segs. */
469 			while (seg_index < xfer->segs) {
470 				struct wa_seg *seg = xfer->seg[seg_index];
471 
472 				if ((seg->status == WA_SEG_DONE) ||
473 					(seg->status == WA_SEG_ERROR)) {
474 					++seg_index;
475 				} else {
476 					break;
477 				}
478 			}
479 			/* mark remaining segs as aborted. */
480 			wa_complete_remaining_xfer_segs(xfer, seg_index,
481 				WA_SEG_ABORTED);
482 			done = __wa_xfer_is_done(xfer);
483 			spin_unlock_irqrestore(&xfer->lock, flags);
484 			if (done)
485 				wa_xfer_completion(xfer);
486 			wa_xfer_delayed_run(rpipe);
487 			wa_xfer_put(xfer);
488 		} else {
489 			dev_err(dev, "%s: xfer ID 0x%08X already gone.\n",
490 				 __func__, le32_to_cpu(b->cmd.dwTransferID));
491 		}
492 	}
493 
494 	wa_put(wa);	/* taken in __wa_xfer_abort */
495 	usb_put_urb(&b->urb);
496 }
497 
498 /*
499  * Aborts an ongoing transaction
500  *
501  * Assumes the transfer is referenced and locked and in a submitted
502  * state (mainly that there is an endpoint/rpipe assigned).
503  *
504  * The callback (see above) does nothing but freeing up the data by
505  * putting the URB. Because the URB is allocated at the head of the
506  * struct, the whole space we allocated is kfreed. *
507  */
__wa_xfer_abort(struct wa_xfer * xfer)508 static int __wa_xfer_abort(struct wa_xfer *xfer)
509 {
510 	int result = -ENOMEM;
511 	struct device *dev = &xfer->wa->usb_iface->dev;
512 	struct wa_xfer_abort_buffer *b;
513 	struct wa_rpipe *rpipe = xfer->ep->hcpriv;
514 
515 	b = kmalloc(sizeof(*b), GFP_ATOMIC);
516 	if (b == NULL)
517 		goto error_kmalloc;
518 	b->cmd.bLength =  sizeof(b->cmd);
519 	b->cmd.bRequestType = WA_XFER_ABORT;
520 	b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
521 	b->cmd.dwTransferID = wa_xfer_id_le32(xfer);
522 	b->wa = wa_get(xfer->wa);
523 
524 	usb_init_urb(&b->urb);
525 	usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
526 		usb_sndbulkpipe(xfer->wa->usb_dev,
527 				xfer->wa->dto_epd->bEndpointAddress),
528 		&b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b);
529 	result = usb_submit_urb(&b->urb, GFP_ATOMIC);
530 	if (result < 0)
531 		goto error_submit;
532 	return result;				/* callback frees! */
533 
534 
535 error_submit:
536 	wa_put(xfer->wa);
537 	if (printk_ratelimit())
538 		dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
539 			xfer, result);
540 	kfree(b);
541 error_kmalloc:
542 	return result;
543 
544 }
545 
546 /*
547  * Calculate the number of isoc frames starting from isoc_frame_offset
548  * that will fit a in transfer segment.
549  */
__wa_seg_calculate_isoc_frame_count(struct wa_xfer * xfer,int isoc_frame_offset,int * total_size)550 static int __wa_seg_calculate_isoc_frame_count(struct wa_xfer *xfer,
551 	int isoc_frame_offset, int *total_size)
552 {
553 	int segment_size = 0, frame_count = 0;
554 	int index = isoc_frame_offset;
555 	struct usb_iso_packet_descriptor *iso_frame_desc =
556 		xfer->urb->iso_frame_desc;
557 
558 	while ((index < xfer->urb->number_of_packets)
559 		&& ((segment_size + iso_frame_desc[index].length)
560 				<= xfer->seg_size)) {
561 		/*
562 		 * For Alereon HWA devices, only include an isoc frame in an
563 		 * out segment if it is physically contiguous with the previous
564 		 * frame.  This is required because those devices expect
565 		 * the isoc frames to be sent as a single USB transaction as
566 		 * opposed to one transaction per frame with standard HWA.
567 		 */
568 		if ((xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
569 			&& (xfer->is_inbound == 0)
570 			&& (index > isoc_frame_offset)
571 			&& ((iso_frame_desc[index - 1].offset +
572 				iso_frame_desc[index - 1].length) !=
573 				iso_frame_desc[index].offset))
574 			break;
575 
576 		/* this frame fits. count it. */
577 		++frame_count;
578 		segment_size += iso_frame_desc[index].length;
579 
580 		/* move to the next isoc frame. */
581 		++index;
582 	}
583 
584 	*total_size = segment_size;
585 	return frame_count;
586 }
587 
588 /*
589  *
590  * @returns < 0 on error, transfer segment request size if ok
591  */
__wa_xfer_setup_sizes(struct wa_xfer * xfer,enum wa_xfer_type * pxfer_type)592 static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
593 				     enum wa_xfer_type *pxfer_type)
594 {
595 	ssize_t result;
596 	struct device *dev = &xfer->wa->usb_iface->dev;
597 	size_t maxpktsize;
598 	struct urb *urb = xfer->urb;
599 	struct wa_rpipe *rpipe = xfer->ep->hcpriv;
600 
601 	switch (rpipe->descr.bmAttribute & 0x3) {
602 	case USB_ENDPOINT_XFER_CONTROL:
603 		*pxfer_type = WA_XFER_TYPE_CTL;
604 		result = sizeof(struct wa_xfer_ctl);
605 		break;
606 	case USB_ENDPOINT_XFER_INT:
607 	case USB_ENDPOINT_XFER_BULK:
608 		*pxfer_type = WA_XFER_TYPE_BI;
609 		result = sizeof(struct wa_xfer_bi);
610 		break;
611 	case USB_ENDPOINT_XFER_ISOC:
612 		*pxfer_type = WA_XFER_TYPE_ISO;
613 		result = sizeof(struct wa_xfer_hwaiso);
614 		break;
615 	default:
616 		/* never happens */
617 		BUG();
618 		result = -EINVAL;	/* shut gcc up */
619 	}
620 	xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
621 	xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
622 
623 	maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
624 	xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
625 		* 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
626 	/* Compute the segment size and make sure it is a multiple of
627 	 * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
628 	 * a check (FIXME) */
629 	if (xfer->seg_size < maxpktsize) {
630 		dev_err(dev,
631 			"HW BUG? seg_size %zu smaller than maxpktsize %zu\n",
632 			xfer->seg_size, maxpktsize);
633 		result = -EINVAL;
634 		goto error;
635 	}
636 	xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
637 	if ((rpipe->descr.bmAttribute & 0x3) == USB_ENDPOINT_XFER_ISOC) {
638 		int index = 0;
639 
640 		xfer->segs = 0;
641 		/*
642 		 * loop over urb->number_of_packets to determine how many
643 		 * xfer segments will be needed to send the isoc frames.
644 		 */
645 		while (index < urb->number_of_packets) {
646 			int seg_size; /* don't care. */
647 			index += __wa_seg_calculate_isoc_frame_count(xfer,
648 					index, &seg_size);
649 			++xfer->segs;
650 		}
651 	} else {
652 		xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length,
653 						xfer->seg_size);
654 		if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
655 			xfer->segs = 1;
656 	}
657 
658 	if (xfer->segs > WA_SEGS_MAX) {
659 		dev_err(dev, "BUG? oops, number of segments %zu bigger than %d\n",
660 			(urb->transfer_buffer_length/xfer->seg_size),
661 			WA_SEGS_MAX);
662 		result = -EINVAL;
663 		goto error;
664 	}
665 error:
666 	return result;
667 }
668 
__wa_setup_isoc_packet_descr(struct wa_xfer_packet_info_hwaiso * packet_desc,struct wa_xfer * xfer,struct wa_seg * seg)669 static void __wa_setup_isoc_packet_descr(
670 		struct wa_xfer_packet_info_hwaiso *packet_desc,
671 		struct wa_xfer *xfer,
672 		struct wa_seg *seg) {
673 	struct usb_iso_packet_descriptor *iso_frame_desc =
674 		xfer->urb->iso_frame_desc;
675 	int frame_index;
676 
677 	/* populate isoc packet descriptor. */
678 	packet_desc->bPacketType = WA_XFER_ISO_PACKET_INFO;
679 	packet_desc->wLength = cpu_to_le16(sizeof(*packet_desc) +
680 		(sizeof(packet_desc->PacketLength[0]) *
681 			seg->isoc_frame_count));
682 	for (frame_index = 0; frame_index < seg->isoc_frame_count;
683 		++frame_index) {
684 		int offset_index = frame_index + seg->isoc_frame_offset;
685 		packet_desc->PacketLength[frame_index] =
686 			cpu_to_le16(iso_frame_desc[offset_index].length);
687 	}
688 }
689 
690 
691 /* Fill in the common request header and xfer-type specific data. */
__wa_xfer_setup_hdr0(struct wa_xfer * xfer,struct wa_xfer_hdr * xfer_hdr0,enum wa_xfer_type xfer_type,size_t xfer_hdr_size)692 static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
693 				 struct wa_xfer_hdr *xfer_hdr0,
694 				 enum wa_xfer_type xfer_type,
695 				 size_t xfer_hdr_size)
696 {
697 	struct wa_rpipe *rpipe = xfer->ep->hcpriv;
698 	struct wa_seg *seg = xfer->seg[0];
699 
700 	xfer_hdr0 = &seg->xfer_hdr;
701 	xfer_hdr0->bLength = xfer_hdr_size;
702 	xfer_hdr0->bRequestType = xfer_type;
703 	xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex;
704 	xfer_hdr0->dwTransferID = wa_xfer_id_le32(xfer);
705 	xfer_hdr0->bTransferSegment = 0;
706 	switch (xfer_type) {
707 	case WA_XFER_TYPE_CTL: {
708 		struct wa_xfer_ctl *xfer_ctl =
709 			container_of(xfer_hdr0, struct wa_xfer_ctl, hdr);
710 		xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0;
711 		memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet,
712 		       sizeof(xfer_ctl->baSetupData));
713 		break;
714 	}
715 	case WA_XFER_TYPE_BI:
716 		break;
717 	case WA_XFER_TYPE_ISO: {
718 		struct wa_xfer_hwaiso *xfer_iso =
719 			container_of(xfer_hdr0, struct wa_xfer_hwaiso, hdr);
720 		struct wa_xfer_packet_info_hwaiso *packet_desc =
721 			((void *)xfer_iso) + xfer_hdr_size;
722 
723 		/* populate the isoc section of the transfer request. */
724 		xfer_iso->dwNumOfPackets = cpu_to_le32(seg->isoc_frame_count);
725 		/* populate isoc packet descriptor. */
726 		__wa_setup_isoc_packet_descr(packet_desc, xfer, seg);
727 		break;
728 	}
729 	default:
730 		BUG();
731 	};
732 }
733 
734 /*
735  * Callback for the OUT data phase of the segment request
736  *
737  * Check wa_seg_tr_cb(); most comments also apply here because this
738  * function does almost the same thing and they work closely
739  * together.
740  *
741  * If the seg request has failed but this DTO phase has succeeded,
742  * wa_seg_tr_cb() has already failed the segment and moved the
743  * status to WA_SEG_ERROR, so this will go through 'case 0' and
744  * effectively do nothing.
745  */
wa_seg_dto_cb(struct urb * urb)746 static void wa_seg_dto_cb(struct urb *urb)
747 {
748 	struct wa_seg *seg = urb->context;
749 	struct wa_xfer *xfer = seg->xfer;
750 	struct wahc *wa;
751 	struct device *dev;
752 	struct wa_rpipe *rpipe;
753 	unsigned long flags;
754 	unsigned rpipe_ready = 0;
755 	int data_send_done = 1, release_dto = 0, holding_dto = 0;
756 	u8 done = 0;
757 	int result;
758 
759 	/* free the sg if it was used. */
760 	kfree(urb->sg);
761 	urb->sg = NULL;
762 
763 	spin_lock_irqsave(&xfer->lock, flags);
764 	wa = xfer->wa;
765 	dev = &wa->usb_iface->dev;
766 	if (usb_pipeisoc(xfer->urb->pipe)) {
767 		/* Alereon HWA sends all isoc frames in a single transfer. */
768 		if (wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
769 			seg->isoc_frame_index += seg->isoc_frame_count;
770 		else
771 			seg->isoc_frame_index += 1;
772 		if (seg->isoc_frame_index < seg->isoc_frame_count) {
773 			data_send_done = 0;
774 			holding_dto = 1; /* checked in error cases. */
775 			/*
776 			 * if this is the last isoc frame of the segment, we
777 			 * can release DTO after sending this frame.
778 			 */
779 			if ((seg->isoc_frame_index + 1) >=
780 				seg->isoc_frame_count)
781 				release_dto = 1;
782 		}
783 		dev_dbg(dev, "xfer 0x%08X#%u: isoc frame = %d, holding_dto = %d, release_dto = %d.\n",
784 			wa_xfer_id(xfer), seg->index, seg->isoc_frame_index,
785 			holding_dto, release_dto);
786 	}
787 	spin_unlock_irqrestore(&xfer->lock, flags);
788 
789 	switch (urb->status) {
790 	case 0:
791 		spin_lock_irqsave(&xfer->lock, flags);
792 		seg->result += urb->actual_length;
793 		if (data_send_done) {
794 			dev_dbg(dev, "xfer 0x%08X#%u: data out done (%zu bytes)\n",
795 				wa_xfer_id(xfer), seg->index, seg->result);
796 			if (seg->status < WA_SEG_PENDING)
797 				seg->status = WA_SEG_PENDING;
798 		} else {
799 			/* should only hit this for isoc xfers. */
800 			/*
801 			 * Populate the dto URB with the next isoc frame buffer,
802 			 * send the URB and release DTO if we no longer need it.
803 			 */
804 			 __wa_populate_dto_urb_isoc(xfer, seg,
805 				seg->isoc_frame_offset + seg->isoc_frame_index);
806 
807 			/* resubmit the URB with the next isoc frame. */
808 			/* take a ref on resubmit. */
809 			wa_xfer_get(xfer);
810 			result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
811 			if (result < 0) {
812 				dev_err(dev, "xfer 0x%08X#%u: DTO submit failed: %d\n",
813 				       wa_xfer_id(xfer), seg->index, result);
814 				spin_unlock_irqrestore(&xfer->lock, flags);
815 				goto error_dto_submit;
816 			}
817 		}
818 		spin_unlock_irqrestore(&xfer->lock, flags);
819 		if (release_dto) {
820 			__wa_dto_put(wa);
821 			wa_check_for_delayed_rpipes(wa);
822 		}
823 		break;
824 	case -ECONNRESET:	/* URB unlinked; no need to do anything */
825 	case -ENOENT:		/* as it was done by the who unlinked us */
826 		if (holding_dto) {
827 			__wa_dto_put(wa);
828 			wa_check_for_delayed_rpipes(wa);
829 		}
830 		break;
831 	default:		/* Other errors ... */
832 		dev_err(dev, "xfer 0x%08X#%u: data out error %d\n",
833 			wa_xfer_id(xfer), seg->index, urb->status);
834 		goto error_default;
835 	}
836 
837 	/* taken when this URB was submitted. */
838 	wa_xfer_put(xfer);
839 	return;
840 
841 error_dto_submit:
842 	/* taken on resubmit attempt. */
843 	wa_xfer_put(xfer);
844 error_default:
845 	spin_lock_irqsave(&xfer->lock, flags);
846 	rpipe = xfer->ep->hcpriv;
847 	if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
848 		    EDC_ERROR_TIMEFRAME)){
849 		dev_err(dev, "DTO: URB max acceptable errors exceeded, resetting device\n");
850 		wa_reset_all(wa);
851 	}
852 	if (seg->status != WA_SEG_ERROR) {
853 		seg->result = urb->status;
854 		__wa_xfer_abort(xfer);
855 		rpipe_ready = rpipe_avail_inc(rpipe);
856 		done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR);
857 	}
858 	spin_unlock_irqrestore(&xfer->lock, flags);
859 	if (holding_dto) {
860 		__wa_dto_put(wa);
861 		wa_check_for_delayed_rpipes(wa);
862 	}
863 	if (done)
864 		wa_xfer_completion(xfer);
865 	if (rpipe_ready)
866 		wa_xfer_delayed_run(rpipe);
867 	/* taken when this URB was submitted. */
868 	wa_xfer_put(xfer);
869 }
870 
871 /*
872  * Callback for the isoc packet descriptor phase of the segment request
873  *
874  * Check wa_seg_tr_cb(); most comments also apply here because this
875  * function does almost the same thing and they work closely
876  * together.
877  *
878  * If the seg request has failed but this phase has succeeded,
879  * wa_seg_tr_cb() has already failed the segment and moved the
880  * status to WA_SEG_ERROR, so this will go through 'case 0' and
881  * effectively do nothing.
882  */
wa_seg_iso_pack_desc_cb(struct urb * urb)883 static void wa_seg_iso_pack_desc_cb(struct urb *urb)
884 {
885 	struct wa_seg *seg = urb->context;
886 	struct wa_xfer *xfer = seg->xfer;
887 	struct wahc *wa;
888 	struct device *dev;
889 	struct wa_rpipe *rpipe;
890 	unsigned long flags;
891 	unsigned rpipe_ready = 0;
892 	u8 done = 0;
893 
894 	switch (urb->status) {
895 	case 0:
896 		spin_lock_irqsave(&xfer->lock, flags);
897 		wa = xfer->wa;
898 		dev = &wa->usb_iface->dev;
899 		dev_dbg(dev, "iso xfer %08X#%u: packet descriptor done\n",
900 			wa_xfer_id(xfer), seg->index);
901 		if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
902 			seg->status = WA_SEG_PENDING;
903 		spin_unlock_irqrestore(&xfer->lock, flags);
904 		break;
905 	case -ECONNRESET:	/* URB unlinked; no need to do anything */
906 	case -ENOENT:		/* as it was done by the who unlinked us */
907 		break;
908 	default:		/* Other errors ... */
909 		spin_lock_irqsave(&xfer->lock, flags);
910 		wa = xfer->wa;
911 		dev = &wa->usb_iface->dev;
912 		rpipe = xfer->ep->hcpriv;
913 		pr_err_ratelimited("iso xfer %08X#%u: packet descriptor error %d\n",
914 				wa_xfer_id(xfer), seg->index, urb->status);
915 		if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
916 			    EDC_ERROR_TIMEFRAME)){
917 			dev_err(dev, "iso xfer: URB max acceptable errors exceeded, resetting device\n");
918 			wa_reset_all(wa);
919 		}
920 		if (seg->status != WA_SEG_ERROR) {
921 			usb_unlink_urb(seg->dto_urb);
922 			seg->result = urb->status;
923 			__wa_xfer_abort(xfer);
924 			rpipe_ready = rpipe_avail_inc(rpipe);
925 			done = __wa_xfer_mark_seg_as_done(xfer, seg,
926 					WA_SEG_ERROR);
927 		}
928 		spin_unlock_irqrestore(&xfer->lock, flags);
929 		if (done)
930 			wa_xfer_completion(xfer);
931 		if (rpipe_ready)
932 			wa_xfer_delayed_run(rpipe);
933 	}
934 	/* taken when this URB was submitted. */
935 	wa_xfer_put(xfer);
936 }
937 
938 /*
939  * Callback for the segment request
940  *
941  * If successful transition state (unless already transitioned or
942  * outbound transfer); otherwise, take a note of the error, mark this
943  * segment done and try completion.
944  *
945  * Note we don't access until we are sure that the transfer hasn't
946  * been cancelled (ECONNRESET, ENOENT), which could mean that
947  * seg->xfer could be already gone.
948  *
949  * We have to check before setting the status to WA_SEG_PENDING
950  * because sometimes the xfer result callback arrives before this
951  * callback (geeeeeeze), so it might happen that we are already in
952  * another state. As well, we don't set it if the transfer is not inbound,
953  * as in that case, wa_seg_dto_cb will do it when the OUT data phase
954  * finishes.
955  */
wa_seg_tr_cb(struct urb * urb)956 static void wa_seg_tr_cb(struct urb *urb)
957 {
958 	struct wa_seg *seg = urb->context;
959 	struct wa_xfer *xfer = seg->xfer;
960 	struct wahc *wa;
961 	struct device *dev;
962 	struct wa_rpipe *rpipe;
963 	unsigned long flags;
964 	unsigned rpipe_ready;
965 	u8 done = 0;
966 
967 	switch (urb->status) {
968 	case 0:
969 		spin_lock_irqsave(&xfer->lock, flags);
970 		wa = xfer->wa;
971 		dev = &wa->usb_iface->dev;
972 		dev_dbg(dev, "xfer %p ID 0x%08X#%u: request done\n",
973 			xfer, wa_xfer_id(xfer), seg->index);
974 		if (xfer->is_inbound &&
975 			seg->status < WA_SEG_PENDING &&
976 			!(usb_pipeisoc(xfer->urb->pipe)))
977 			seg->status = WA_SEG_PENDING;
978 		spin_unlock_irqrestore(&xfer->lock, flags);
979 		break;
980 	case -ECONNRESET:	/* URB unlinked; no need to do anything */
981 	case -ENOENT:		/* as it was done by the who unlinked us */
982 		break;
983 	default:		/* Other errors ... */
984 		spin_lock_irqsave(&xfer->lock, flags);
985 		wa = xfer->wa;
986 		dev = &wa->usb_iface->dev;
987 		rpipe = xfer->ep->hcpriv;
988 		if (printk_ratelimit())
989 			dev_err(dev, "xfer %p ID 0x%08X#%u: request error %d\n",
990 				xfer, wa_xfer_id(xfer), seg->index,
991 				urb->status);
992 		if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
993 			    EDC_ERROR_TIMEFRAME)){
994 			dev_err(dev, "DTO: URB max acceptable errors "
995 				"exceeded, resetting device\n");
996 			wa_reset_all(wa);
997 		}
998 		usb_unlink_urb(seg->isoc_pack_desc_urb);
999 		usb_unlink_urb(seg->dto_urb);
1000 		seg->result = urb->status;
1001 		__wa_xfer_abort(xfer);
1002 		rpipe_ready = rpipe_avail_inc(rpipe);
1003 		done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR);
1004 		spin_unlock_irqrestore(&xfer->lock, flags);
1005 		if (done)
1006 			wa_xfer_completion(xfer);
1007 		if (rpipe_ready)
1008 			wa_xfer_delayed_run(rpipe);
1009 	}
1010 	/* taken when this URB was submitted. */
1011 	wa_xfer_put(xfer);
1012 }
1013 
1014 /*
1015  * Allocate an SG list to store bytes_to_transfer bytes and copy the
1016  * subset of the in_sg that matches the buffer subset
1017  * we are about to transfer.
1018  */
wa_xfer_create_subset_sg(struct scatterlist * in_sg,const unsigned int bytes_transferred,const unsigned int bytes_to_transfer,int * out_num_sgs)1019 static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg,
1020 	const unsigned int bytes_transferred,
1021 	const unsigned int bytes_to_transfer, int *out_num_sgs)
1022 {
1023 	struct scatterlist *out_sg;
1024 	unsigned int bytes_processed = 0, offset_into_current_page_data = 0,
1025 		nents;
1026 	struct scatterlist *current_xfer_sg = in_sg;
1027 	struct scatterlist *current_seg_sg, *last_seg_sg;
1028 
1029 	/* skip previously transferred pages. */
1030 	while ((current_xfer_sg) &&
1031 			(bytes_processed < bytes_transferred)) {
1032 		bytes_processed += current_xfer_sg->length;
1033 
1034 		/* advance the sg if current segment starts on or past the
1035 			next page. */
1036 		if (bytes_processed <= bytes_transferred)
1037 			current_xfer_sg = sg_next(current_xfer_sg);
1038 	}
1039 
1040 	/* the data for the current segment starts in current_xfer_sg.
1041 		calculate the offset. */
1042 	if (bytes_processed > bytes_transferred) {
1043 		offset_into_current_page_data = current_xfer_sg->length -
1044 			(bytes_processed - bytes_transferred);
1045 	}
1046 
1047 	/* calculate the number of pages needed by this segment. */
1048 	nents = DIV_ROUND_UP((bytes_to_transfer +
1049 		offset_into_current_page_data +
1050 		current_xfer_sg->offset),
1051 		PAGE_SIZE);
1052 
1053 	out_sg = kmalloc((sizeof(struct scatterlist) * nents), GFP_ATOMIC);
1054 	if (out_sg) {
1055 		sg_init_table(out_sg, nents);
1056 
1057 		/* copy the portion of the incoming SG that correlates to the
1058 		 * data to be transferred by this segment to the segment SG. */
1059 		last_seg_sg = current_seg_sg = out_sg;
1060 		bytes_processed = 0;
1061 
1062 		/* reset nents and calculate the actual number of sg entries
1063 			needed. */
1064 		nents = 0;
1065 		while ((bytes_processed < bytes_to_transfer) &&
1066 				current_seg_sg && current_xfer_sg) {
1067 			unsigned int page_len = min((current_xfer_sg->length -
1068 				offset_into_current_page_data),
1069 				(bytes_to_transfer - bytes_processed));
1070 
1071 			sg_set_page(current_seg_sg, sg_page(current_xfer_sg),
1072 				page_len,
1073 				current_xfer_sg->offset +
1074 				offset_into_current_page_data);
1075 
1076 			bytes_processed += page_len;
1077 
1078 			last_seg_sg = current_seg_sg;
1079 			current_seg_sg = sg_next(current_seg_sg);
1080 			current_xfer_sg = sg_next(current_xfer_sg);
1081 
1082 			/* only the first page may require additional offset. */
1083 			offset_into_current_page_data = 0;
1084 			nents++;
1085 		}
1086 
1087 		/* update num_sgs and terminate the list since we may have
1088 		 *  concatenated pages. */
1089 		sg_mark_end(last_seg_sg);
1090 		*out_num_sgs = nents;
1091 	}
1092 
1093 	return out_sg;
1094 }
1095 
1096 /*
1097  * Populate DMA buffer info for the isoc dto urb.
1098  */
__wa_populate_dto_urb_isoc(struct wa_xfer * xfer,struct wa_seg * seg,int curr_iso_frame)1099 static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer,
1100 	struct wa_seg *seg, int curr_iso_frame)
1101 {
1102 	seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
1103 	seg->dto_urb->sg = NULL;
1104 	seg->dto_urb->num_sgs = 0;
1105 	/* dto urb buffer address pulled from iso_frame_desc. */
1106 	seg->dto_urb->transfer_dma = xfer->urb->transfer_dma +
1107 		xfer->urb->iso_frame_desc[curr_iso_frame].offset;
1108 	/* The Alereon HWA sends a single URB with all isoc segs. */
1109 	if (xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
1110 		seg->dto_urb->transfer_buffer_length = seg->isoc_size;
1111 	else
1112 		seg->dto_urb->transfer_buffer_length =
1113 			xfer->urb->iso_frame_desc[curr_iso_frame].length;
1114 }
1115 
1116 /*
1117  * Populate buffer ptr and size, DMA buffer or SG list for the dto urb.
1118  */
__wa_populate_dto_urb(struct wa_xfer * xfer,struct wa_seg * seg,size_t buf_itr_offset,size_t buf_itr_size)1119 static int __wa_populate_dto_urb(struct wa_xfer *xfer,
1120 	struct wa_seg *seg, size_t buf_itr_offset, size_t buf_itr_size)
1121 {
1122 	int result = 0;
1123 
1124 	if (xfer->is_dma) {
1125 		seg->dto_urb->transfer_dma =
1126 			xfer->urb->transfer_dma + buf_itr_offset;
1127 		seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
1128 		seg->dto_urb->sg = NULL;
1129 		seg->dto_urb->num_sgs = 0;
1130 	} else {
1131 		/* do buffer or SG processing. */
1132 		seg->dto_urb->transfer_flags &=
1133 			~URB_NO_TRANSFER_DMA_MAP;
1134 		/* this should always be 0 before a resubmit. */
1135 		seg->dto_urb->num_mapped_sgs = 0;
1136 
1137 		if (xfer->urb->transfer_buffer) {
1138 			seg->dto_urb->transfer_buffer =
1139 				xfer->urb->transfer_buffer +
1140 				buf_itr_offset;
1141 			seg->dto_urb->sg = NULL;
1142 			seg->dto_urb->num_sgs = 0;
1143 		} else {
1144 			seg->dto_urb->transfer_buffer = NULL;
1145 
1146 			/*
1147 			 * allocate an SG list to store seg_size bytes
1148 			 * and copy the subset of the xfer->urb->sg that
1149 			 * matches the buffer subset we are about to
1150 			 * read.
1151 			 */
1152 			seg->dto_urb->sg = wa_xfer_create_subset_sg(
1153 				xfer->urb->sg,
1154 				buf_itr_offset, buf_itr_size,
1155 				&(seg->dto_urb->num_sgs));
1156 			if (!(seg->dto_urb->sg))
1157 				result = -ENOMEM;
1158 		}
1159 	}
1160 	seg->dto_urb->transfer_buffer_length = buf_itr_size;
1161 
1162 	return result;
1163 }
1164 
1165 /*
1166  * Allocate the segs array and initialize each of them
1167  *
1168  * The segments are freed by wa_xfer_destroy() when the xfer use count
1169  * drops to zero; however, because each segment is given the same life
1170  * cycle as the USB URB it contains, it is actually freed by
1171  * usb_put_urb() on the contained USB URB (twisted, eh?).
1172  */
__wa_xfer_setup_segs(struct wa_xfer * xfer,size_t xfer_hdr_size)1173 static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
1174 {
1175 	int result, cnt, isoc_frame_offset = 0;
1176 	size_t alloc_size = sizeof(*xfer->seg[0])
1177 		- sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
1178 	struct usb_device *usb_dev = xfer->wa->usb_dev;
1179 	const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
1180 	struct wa_seg *seg;
1181 	size_t buf_itr, buf_size, buf_itr_size;
1182 
1183 	result = -ENOMEM;
1184 	xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
1185 	if (xfer->seg == NULL)
1186 		goto error_segs_kzalloc;
1187 	buf_itr = 0;
1188 	buf_size = xfer->urb->transfer_buffer_length;
1189 	for (cnt = 0; cnt < xfer->segs; cnt++) {
1190 		size_t iso_pkt_descr_size = 0;
1191 		int seg_isoc_frame_count = 0, seg_isoc_size = 0;
1192 
1193 		/*
1194 		 * Adjust the size of the segment object to contain space for
1195 		 * the isoc packet descriptor buffer.
1196 		 */
1197 		if (usb_pipeisoc(xfer->urb->pipe)) {
1198 			seg_isoc_frame_count =
1199 				__wa_seg_calculate_isoc_frame_count(xfer,
1200 					isoc_frame_offset, &seg_isoc_size);
1201 
1202 			iso_pkt_descr_size =
1203 				sizeof(struct wa_xfer_packet_info_hwaiso) +
1204 				(seg_isoc_frame_count * sizeof(__le16));
1205 		}
1206 		seg = xfer->seg[cnt] = kmalloc(alloc_size + iso_pkt_descr_size,
1207 						GFP_ATOMIC);
1208 		if (seg == NULL)
1209 			goto error_seg_kmalloc;
1210 		wa_seg_init(seg);
1211 		seg->xfer = xfer;
1212 		seg->index = cnt;
1213 		usb_fill_bulk_urb(&seg->tr_urb, usb_dev,
1214 				  usb_sndbulkpipe(usb_dev,
1215 						  dto_epd->bEndpointAddress),
1216 				  &seg->xfer_hdr, xfer_hdr_size,
1217 				  wa_seg_tr_cb, seg);
1218 		buf_itr_size = min(buf_size, xfer->seg_size);
1219 
1220 		if (usb_pipeisoc(xfer->urb->pipe)) {
1221 			seg->isoc_frame_count = seg_isoc_frame_count;
1222 			seg->isoc_frame_offset = isoc_frame_offset;
1223 			seg->isoc_size = seg_isoc_size;
1224 			/* iso packet descriptor. */
1225 			seg->isoc_pack_desc_urb =
1226 					usb_alloc_urb(0, GFP_ATOMIC);
1227 			if (seg->isoc_pack_desc_urb == NULL)
1228 				goto error_iso_pack_desc_alloc;
1229 			/*
1230 			 * The buffer for the isoc packet descriptor starts
1231 			 * after the transfer request header in the
1232 			 * segment object memory buffer.
1233 			 */
1234 			usb_fill_bulk_urb(
1235 				seg->isoc_pack_desc_urb, usb_dev,
1236 				usb_sndbulkpipe(usb_dev,
1237 					dto_epd->bEndpointAddress),
1238 				(void *)(&seg->xfer_hdr) +
1239 					xfer_hdr_size,
1240 				iso_pkt_descr_size,
1241 				wa_seg_iso_pack_desc_cb, seg);
1242 
1243 			/* adjust starting frame offset for next seg. */
1244 			isoc_frame_offset += seg_isoc_frame_count;
1245 		}
1246 
1247 		if (xfer->is_inbound == 0 && buf_size > 0) {
1248 			/* outbound data. */
1249 			seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
1250 			if (seg->dto_urb == NULL)
1251 				goto error_dto_alloc;
1252 			usb_fill_bulk_urb(
1253 				seg->dto_urb, usb_dev,
1254 				usb_sndbulkpipe(usb_dev,
1255 						dto_epd->bEndpointAddress),
1256 				NULL, 0, wa_seg_dto_cb, seg);
1257 
1258 			if (usb_pipeisoc(xfer->urb->pipe)) {
1259 				/*
1260 				 * Fill in the xfer buffer information for the
1261 				 * first isoc frame.  Subsequent frames in this
1262 				 * segment will be filled in and sent from the
1263 				 * DTO completion routine, if needed.
1264 				 */
1265 				__wa_populate_dto_urb_isoc(xfer, seg,
1266 					seg->isoc_frame_offset);
1267 			} else {
1268 				/* fill in the xfer buffer information. */
1269 				result = __wa_populate_dto_urb(xfer, seg,
1270 							buf_itr, buf_itr_size);
1271 				if (result < 0)
1272 					goto error_seg_outbound_populate;
1273 
1274 				buf_itr += buf_itr_size;
1275 				buf_size -= buf_itr_size;
1276 			}
1277 		}
1278 		seg->status = WA_SEG_READY;
1279 	}
1280 	return 0;
1281 
1282 	/*
1283 	 * Free the memory for the current segment which failed to init.
1284 	 * Use the fact that cnt is left at were it failed.  The remaining
1285 	 * segments will be cleaned up by wa_xfer_destroy.
1286 	 */
1287 error_seg_outbound_populate:
1288 	usb_free_urb(xfer->seg[cnt]->dto_urb);
1289 error_dto_alloc:
1290 	usb_free_urb(xfer->seg[cnt]->isoc_pack_desc_urb);
1291 error_iso_pack_desc_alloc:
1292 	kfree(xfer->seg[cnt]);
1293 	xfer->seg[cnt] = NULL;
1294 error_seg_kmalloc:
1295 error_segs_kzalloc:
1296 	return result;
1297 }
1298 
1299 /*
1300  * Allocates all the stuff needed to submit a transfer
1301  *
1302  * Breaks the whole data buffer in a list of segments, each one has a
1303  * structure allocated to it and linked in xfer->seg[index]
1304  *
1305  * FIXME: merge setup_segs() and the last part of this function, no
1306  *        need to do two for loops when we could run everything in a
1307  *        single one
1308  */
__wa_xfer_setup(struct wa_xfer * xfer,struct urb * urb)1309 static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
1310 {
1311 	int result;
1312 	struct device *dev = &xfer->wa->usb_iface->dev;
1313 	enum wa_xfer_type xfer_type = 0; /* shut up GCC */
1314 	size_t xfer_hdr_size, cnt, transfer_size;
1315 	struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr;
1316 
1317 	result = __wa_xfer_setup_sizes(xfer, &xfer_type);
1318 	if (result < 0)
1319 		goto error_setup_sizes;
1320 	xfer_hdr_size = result;
1321 	result = __wa_xfer_setup_segs(xfer, xfer_hdr_size);
1322 	if (result < 0) {
1323 		dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n",
1324 			xfer, xfer->segs, result);
1325 		goto error_setup_segs;
1326 	}
1327 	/* Fill the first header */
1328 	xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
1329 	wa_xfer_id_init(xfer);
1330 	__wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
1331 
1332 	/* Fill remaining headers */
1333 	xfer_hdr = xfer_hdr0;
1334 	if (xfer_type == WA_XFER_TYPE_ISO) {
1335 		xfer_hdr0->dwTransferLength =
1336 			cpu_to_le32(xfer->seg[0]->isoc_size);
1337 		for (cnt = 1; cnt < xfer->segs; cnt++) {
1338 			struct wa_xfer_packet_info_hwaiso *packet_desc;
1339 			struct wa_seg *seg = xfer->seg[cnt];
1340 			struct wa_xfer_hwaiso *xfer_iso;
1341 
1342 			xfer_hdr = &seg->xfer_hdr;
1343 			xfer_iso = container_of(xfer_hdr,
1344 						struct wa_xfer_hwaiso, hdr);
1345 			packet_desc = ((void *)xfer_hdr) + xfer_hdr_size;
1346 			/*
1347 			 * Copy values from the 0th header. Segment specific
1348 			 * values are set below.
1349 			 */
1350 			memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
1351 			xfer_hdr->bTransferSegment = cnt;
1352 			xfer_hdr->dwTransferLength =
1353 				cpu_to_le32(seg->isoc_size);
1354 			xfer_iso->dwNumOfPackets =
1355 					cpu_to_le32(seg->isoc_frame_count);
1356 			__wa_setup_isoc_packet_descr(packet_desc, xfer, seg);
1357 			seg->status = WA_SEG_READY;
1358 		}
1359 	} else {
1360 		transfer_size = urb->transfer_buffer_length;
1361 		xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
1362 			cpu_to_le32(xfer->seg_size) :
1363 			cpu_to_le32(transfer_size);
1364 		transfer_size -=  xfer->seg_size;
1365 		for (cnt = 1; cnt < xfer->segs; cnt++) {
1366 			xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
1367 			memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
1368 			xfer_hdr->bTransferSegment = cnt;
1369 			xfer_hdr->dwTransferLength =
1370 				transfer_size > xfer->seg_size ?
1371 					cpu_to_le32(xfer->seg_size)
1372 					: cpu_to_le32(transfer_size);
1373 			xfer->seg[cnt]->status = WA_SEG_READY;
1374 			transfer_size -=  xfer->seg_size;
1375 		}
1376 	}
1377 	xfer_hdr->bTransferSegment |= 0x80;	/* this is the last segment */
1378 	result = 0;
1379 error_setup_segs:
1380 error_setup_sizes:
1381 	return result;
1382 }
1383 
1384 /*
1385  *
1386  *
1387  * rpipe->seg_lock is held!
1388  */
__wa_seg_submit(struct wa_rpipe * rpipe,struct wa_xfer * xfer,struct wa_seg * seg,int * dto_done)1389 static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
1390 			   struct wa_seg *seg, int *dto_done)
1391 {
1392 	int result;
1393 
1394 	/* default to done unless we encounter a multi-frame isoc segment. */
1395 	*dto_done = 1;
1396 
1397 	/*
1398 	 * Take a ref for each segment urb so the xfer cannot disappear until
1399 	 * all of the callbacks run.
1400 	 */
1401 	wa_xfer_get(xfer);
1402 	/* submit the transfer request. */
1403 	seg->status = WA_SEG_SUBMITTED;
1404 	result = usb_submit_urb(&seg->tr_urb, GFP_ATOMIC);
1405 	if (result < 0) {
1406 		pr_err("%s: xfer %p#%u: REQ submit failed: %d\n",
1407 		       __func__, xfer, seg->index, result);
1408 		wa_xfer_put(xfer);
1409 		goto error_tr_submit;
1410 	}
1411 	/* submit the isoc packet descriptor if present. */
1412 	if (seg->isoc_pack_desc_urb) {
1413 		wa_xfer_get(xfer);
1414 		result = usb_submit_urb(seg->isoc_pack_desc_urb, GFP_ATOMIC);
1415 		seg->isoc_frame_index = 0;
1416 		if (result < 0) {
1417 			pr_err("%s: xfer %p#%u: ISO packet descriptor submit failed: %d\n",
1418 			       __func__, xfer, seg->index, result);
1419 			wa_xfer_put(xfer);
1420 			goto error_iso_pack_desc_submit;
1421 		}
1422 	}
1423 	/* submit the out data if this is an out request. */
1424 	if (seg->dto_urb) {
1425 		struct wahc *wa = xfer->wa;
1426 		wa_xfer_get(xfer);
1427 		result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
1428 		if (result < 0) {
1429 			pr_err("%s: xfer %p#%u: DTO submit failed: %d\n",
1430 			       __func__, xfer, seg->index, result);
1431 			wa_xfer_put(xfer);
1432 			goto error_dto_submit;
1433 		}
1434 		/*
1435 		 * If this segment contains more than one isoc frame, hold
1436 		 * onto the dto resource until we send all frames.
1437 		 * Only applies to non-Alereon devices.
1438 		 */
1439 		if (((wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC) == 0)
1440 			&& (seg->isoc_frame_count > 1))
1441 			*dto_done = 0;
1442 	}
1443 	rpipe_avail_dec(rpipe);
1444 	return 0;
1445 
1446 error_dto_submit:
1447 	usb_unlink_urb(seg->isoc_pack_desc_urb);
1448 error_iso_pack_desc_submit:
1449 	usb_unlink_urb(&seg->tr_urb);
1450 error_tr_submit:
1451 	seg->status = WA_SEG_ERROR;
1452 	seg->result = result;
1453 	*dto_done = 1;
1454 	return result;
1455 }
1456 
1457 /*
1458  * Execute more queued request segments until the maximum concurrent allowed.
1459  * Return true if the DTO resource was acquired and released.
1460  *
1461  * The ugly unlock/lock sequence on the error path is needed as the
1462  * xfer->lock normally nests the seg_lock and not viceversa.
1463  */
__wa_xfer_delayed_run(struct wa_rpipe * rpipe,int * dto_waiting)1464 static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting)
1465 {
1466 	int result, dto_acquired = 0, dto_done = 0;
1467 	struct device *dev = &rpipe->wa->usb_iface->dev;
1468 	struct wa_seg *seg;
1469 	struct wa_xfer *xfer;
1470 	unsigned long flags;
1471 
1472 	*dto_waiting = 0;
1473 
1474 	spin_lock_irqsave(&rpipe->seg_lock, flags);
1475 	while (atomic_read(&rpipe->segs_available) > 0
1476 	      && !list_empty(&rpipe->seg_list)
1477 	      && (dto_acquired = __wa_dto_try_get(rpipe->wa))) {
1478 		seg = list_first_entry(&(rpipe->seg_list), struct wa_seg,
1479 				 list_node);
1480 		list_del(&seg->list_node);
1481 		xfer = seg->xfer;
1482 		/*
1483 		 * Get a reference to the xfer in case the callbacks for the
1484 		 * URBs submitted by __wa_seg_submit attempt to complete
1485 		 * the xfer before this function completes.
1486 		 */
1487 		wa_xfer_get(xfer);
1488 		result = __wa_seg_submit(rpipe, xfer, seg, &dto_done);
1489 		/* release the dto resource if this RPIPE is done with it. */
1490 		if (dto_done)
1491 			__wa_dto_put(rpipe->wa);
1492 		dev_dbg(dev, "xfer %p ID %08X#%u submitted from delayed [%d segments available] %d\n",
1493 			xfer, wa_xfer_id(xfer), seg->index,
1494 			atomic_read(&rpipe->segs_available), result);
1495 		if (unlikely(result < 0)) {
1496 			int done;
1497 
1498 			spin_unlock_irqrestore(&rpipe->seg_lock, flags);
1499 			spin_lock_irqsave(&xfer->lock, flags);
1500 			__wa_xfer_abort(xfer);
1501 			/*
1502 			 * This seg was marked as submitted when it was put on
1503 			 * the RPIPE seg_list.  Mark it done.
1504 			 */
1505 			xfer->segs_done++;
1506 			done = __wa_xfer_is_done(xfer);
1507 			spin_unlock_irqrestore(&xfer->lock, flags);
1508 			if (done)
1509 				wa_xfer_completion(xfer);
1510 			spin_lock_irqsave(&rpipe->seg_lock, flags);
1511 		}
1512 		wa_xfer_put(xfer);
1513 	}
1514 	/*
1515 	 * Mark this RPIPE as waiting if dto was not acquired, there are
1516 	 * delayed segs and no active transfers to wake us up later.
1517 	 */
1518 	if (!dto_acquired && !list_empty(&rpipe->seg_list)
1519 		&& (atomic_read(&rpipe->segs_available) ==
1520 			le16_to_cpu(rpipe->descr.wRequests)))
1521 		*dto_waiting = 1;
1522 
1523 	spin_unlock_irqrestore(&rpipe->seg_lock, flags);
1524 
1525 	return dto_done;
1526 }
1527 
wa_xfer_delayed_run(struct wa_rpipe * rpipe)1528 static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
1529 {
1530 	int dto_waiting;
1531 	int dto_done = __wa_xfer_delayed_run(rpipe, &dto_waiting);
1532 
1533 	/*
1534 	 * If this RPIPE is waiting on the DTO resource, add it to the tail of
1535 	 * the waiting list.
1536 	 * Otherwise, if the WA DTO resource was acquired and released by
1537 	 *  __wa_xfer_delayed_run, another RPIPE may have attempted to acquire
1538 	 * DTO and failed during that time.  Check the delayed list and process
1539 	 * any waiters.  Start searching from the next RPIPE index.
1540 	 */
1541 	if (dto_waiting)
1542 		wa_add_delayed_rpipe(rpipe->wa, rpipe);
1543 	else if (dto_done)
1544 		wa_check_for_delayed_rpipes(rpipe->wa);
1545 }
1546 
1547 /*
1548  *
1549  * xfer->lock is taken
1550  *
1551  * On failure submitting we just stop submitting and return error;
1552  * wa_urb_enqueue_b() will execute the completion path
1553  */
__wa_xfer_submit(struct wa_xfer * xfer)1554 static int __wa_xfer_submit(struct wa_xfer *xfer)
1555 {
1556 	int result, dto_acquired = 0, dto_done = 0, dto_waiting = 0;
1557 	struct wahc *wa = xfer->wa;
1558 	struct device *dev = &wa->usb_iface->dev;
1559 	unsigned cnt;
1560 	struct wa_seg *seg;
1561 	unsigned long flags;
1562 	struct wa_rpipe *rpipe = xfer->ep->hcpriv;
1563 	size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests);
1564 	u8 available;
1565 	u8 empty;
1566 
1567 	spin_lock_irqsave(&wa->xfer_list_lock, flags);
1568 	list_add_tail(&xfer->list_node, &wa->xfer_list);
1569 	spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
1570 
1571 	BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests);
1572 	result = 0;
1573 	spin_lock_irqsave(&rpipe->seg_lock, flags);
1574 	for (cnt = 0; cnt < xfer->segs; cnt++) {
1575 		int delay_seg = 1;
1576 
1577 		available = atomic_read(&rpipe->segs_available);
1578 		empty = list_empty(&rpipe->seg_list);
1579 		seg = xfer->seg[cnt];
1580 		if (available && empty) {
1581 			/*
1582 			 * Only attempt to acquire DTO if we have a segment
1583 			 * to send.
1584 			 */
1585 			dto_acquired = __wa_dto_try_get(rpipe->wa);
1586 			if (dto_acquired) {
1587 				delay_seg = 0;
1588 				result = __wa_seg_submit(rpipe, xfer, seg,
1589 							&dto_done);
1590 				dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u submitted\n",
1591 					xfer, wa_xfer_id(xfer), cnt, available,
1592 					empty);
1593 				if (dto_done)
1594 					__wa_dto_put(rpipe->wa);
1595 
1596 				if (result < 0) {
1597 					__wa_xfer_abort(xfer);
1598 					goto error_seg_submit;
1599 				}
1600 			}
1601 		}
1602 
1603 		if (delay_seg) {
1604 			dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u delayed\n",
1605 				xfer, wa_xfer_id(xfer), cnt, available,  empty);
1606 			seg->status = WA_SEG_DELAYED;
1607 			list_add_tail(&seg->list_node, &rpipe->seg_list);
1608 		}
1609 		xfer->segs_submitted++;
1610 	}
1611 error_seg_submit:
1612 	/*
1613 	 * Mark this RPIPE as waiting if dto was not acquired, there are
1614 	 * delayed segs and no active transfers to wake us up later.
1615 	 */
1616 	if (!dto_acquired && !list_empty(&rpipe->seg_list)
1617 		&& (atomic_read(&rpipe->segs_available) ==
1618 			le16_to_cpu(rpipe->descr.wRequests)))
1619 		dto_waiting = 1;
1620 	spin_unlock_irqrestore(&rpipe->seg_lock, flags);
1621 
1622 	if (dto_waiting)
1623 		wa_add_delayed_rpipe(rpipe->wa, rpipe);
1624 	else if (dto_done)
1625 		wa_check_for_delayed_rpipes(rpipe->wa);
1626 
1627 	return result;
1628 }
1629 
1630 /*
1631  * Second part of a URB/transfer enqueuement
1632  *
1633  * Assumes this comes from wa_urb_enqueue() [maybe through
1634  * wa_urb_enqueue_run()]. At this point:
1635  *
1636  * xfer->wa	filled and refcounted
1637  * xfer->ep	filled with rpipe refcounted if
1638  *              delayed == 0
1639  * xfer->urb 	filled and refcounted (this is the case when called
1640  *              from wa_urb_enqueue() as we come from usb_submit_urb()
1641  *              and when called by wa_urb_enqueue_run(), as we took an
1642  *              extra ref dropped by _run() after we return).
1643  * xfer->gfp	filled
1644  *
1645  * If we fail at __wa_xfer_submit(), then we just check if we are done
1646  * and if so, we run the completion procedure. However, if we are not
1647  * yet done, we do nothing and wait for the completion handlers from
1648  * the submitted URBs or from the xfer-result path to kick in. If xfer
1649  * result never kicks in, the xfer will timeout from the USB code and
1650  * dequeue() will be called.
1651  */
wa_urb_enqueue_b(struct wa_xfer * xfer)1652 static int wa_urb_enqueue_b(struct wa_xfer *xfer)
1653 {
1654 	int result;
1655 	unsigned long flags;
1656 	struct urb *urb = xfer->urb;
1657 	struct wahc *wa = xfer->wa;
1658 	struct wusbhc *wusbhc = wa->wusb;
1659 	struct wusb_dev *wusb_dev;
1660 	unsigned done;
1661 
1662 	result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
1663 	if (result < 0) {
1664 		pr_err("%s: error_rpipe_get\n", __func__);
1665 		goto error_rpipe_get;
1666 	}
1667 	result = -ENODEV;
1668 	/* FIXME: segmentation broken -- kills DWA */
1669 	mutex_lock(&wusbhc->mutex);		/* get a WUSB dev */
1670 	if (urb->dev == NULL) {
1671 		mutex_unlock(&wusbhc->mutex);
1672 		pr_err("%s: error usb dev gone\n", __func__);
1673 		goto error_dev_gone;
1674 	}
1675 	wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
1676 	if (wusb_dev == NULL) {
1677 		mutex_unlock(&wusbhc->mutex);
1678 		dev_err(&(urb->dev->dev), "%s: error wusb dev gone\n",
1679 			__func__);
1680 		goto error_dev_gone;
1681 	}
1682 	mutex_unlock(&wusbhc->mutex);
1683 
1684 	spin_lock_irqsave(&xfer->lock, flags);
1685 	xfer->wusb_dev = wusb_dev;
1686 	result = urb->status;
1687 	if (urb->status != -EINPROGRESS) {
1688 		dev_err(&(urb->dev->dev), "%s: error_dequeued\n", __func__);
1689 		goto error_dequeued;
1690 	}
1691 
1692 	result = __wa_xfer_setup(xfer, urb);
1693 	if (result < 0) {
1694 		dev_err(&(urb->dev->dev), "%s: error_xfer_setup\n", __func__);
1695 		goto error_xfer_setup;
1696 	}
1697 	/*
1698 	 * Get a xfer reference since __wa_xfer_submit starts asynchronous
1699 	 * operations that may try to complete the xfer before this function
1700 	 * exits.
1701 	 */
1702 	wa_xfer_get(xfer);
1703 	result = __wa_xfer_submit(xfer);
1704 	if (result < 0) {
1705 		dev_err(&(urb->dev->dev), "%s: error_xfer_submit\n", __func__);
1706 		goto error_xfer_submit;
1707 	}
1708 	spin_unlock_irqrestore(&xfer->lock, flags);
1709 	wa_xfer_put(xfer);
1710 	return 0;
1711 
1712 	/*
1713 	 * this is basically wa_xfer_completion() broken up wa_xfer_giveback()
1714 	 * does a wa_xfer_put() that will call wa_xfer_destroy() and undo
1715 	 * setup().
1716 	 */
1717 error_xfer_setup:
1718 error_dequeued:
1719 	spin_unlock_irqrestore(&xfer->lock, flags);
1720 	/* FIXME: segmentation broken, kills DWA */
1721 	if (wusb_dev)
1722 		wusb_dev_put(wusb_dev);
1723 error_dev_gone:
1724 	rpipe_put(xfer->ep->hcpriv);
1725 error_rpipe_get:
1726 	xfer->result = result;
1727 	return result;
1728 
1729 error_xfer_submit:
1730 	done = __wa_xfer_is_done(xfer);
1731 	xfer->result = result;
1732 	spin_unlock_irqrestore(&xfer->lock, flags);
1733 	if (done)
1734 		wa_xfer_completion(xfer);
1735 	wa_xfer_put(xfer);
1736 	/* return success since the completion routine will run. */
1737 	return 0;
1738 }
1739 
1740 /*
1741  * Execute the delayed transfers in the Wire Adapter @wa
1742  *
1743  * We need to be careful here, as dequeue() could be called in the
1744  * middle.  That's why we do the whole thing under the
1745  * wa->xfer_list_lock. If dequeue() jumps in, it first locks xfer->lock
1746  * and then checks the list -- so as we would be acquiring in inverse
1747  * order, we move the delayed list to a separate list while locked and then
1748  * submit them without the list lock held.
1749  */
wa_urb_enqueue_run(struct work_struct * ws)1750 void wa_urb_enqueue_run(struct work_struct *ws)
1751 {
1752 	struct wahc *wa = container_of(ws, struct wahc, xfer_enqueue_work);
1753 	struct wa_xfer *xfer, *next;
1754 	struct urb *urb;
1755 	LIST_HEAD(tmp_list);
1756 
1757 	/* Create a copy of the wa->xfer_delayed_list while holding the lock */
1758 	spin_lock_irq(&wa->xfer_list_lock);
1759 	list_cut_position(&tmp_list, &wa->xfer_delayed_list,
1760 			wa->xfer_delayed_list.prev);
1761 	spin_unlock_irq(&wa->xfer_list_lock);
1762 
1763 	/*
1764 	 * enqueue from temp list without list lock held since wa_urb_enqueue_b
1765 	 * can take xfer->lock as well as lock mutexes.
1766 	 */
1767 	list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
1768 		list_del_init(&xfer->list_node);
1769 
1770 		urb = xfer->urb;
1771 		if (wa_urb_enqueue_b(xfer) < 0)
1772 			wa_xfer_giveback(xfer);
1773 		usb_put_urb(urb);	/* taken when queuing */
1774 	}
1775 }
1776 EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
1777 
1778 /*
1779  * Process the errored transfers on the Wire Adapter outside of interrupt.
1780  */
wa_process_errored_transfers_run(struct work_struct * ws)1781 void wa_process_errored_transfers_run(struct work_struct *ws)
1782 {
1783 	struct wahc *wa = container_of(ws, struct wahc, xfer_error_work);
1784 	struct wa_xfer *xfer, *next;
1785 	LIST_HEAD(tmp_list);
1786 
1787 	pr_info("%s: Run delayed STALL processing.\n", __func__);
1788 
1789 	/* Create a copy of the wa->xfer_errored_list while holding the lock */
1790 	spin_lock_irq(&wa->xfer_list_lock);
1791 	list_cut_position(&tmp_list, &wa->xfer_errored_list,
1792 			wa->xfer_errored_list.prev);
1793 	spin_unlock_irq(&wa->xfer_list_lock);
1794 
1795 	/*
1796 	 * run rpipe_clear_feature_stalled from temp list without list lock
1797 	 * held.
1798 	 */
1799 	list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
1800 		struct usb_host_endpoint *ep;
1801 		unsigned long flags;
1802 		struct wa_rpipe *rpipe;
1803 
1804 		spin_lock_irqsave(&xfer->lock, flags);
1805 		ep = xfer->ep;
1806 		rpipe = ep->hcpriv;
1807 		spin_unlock_irqrestore(&xfer->lock, flags);
1808 
1809 		/* clear RPIPE feature stalled without holding a lock. */
1810 		rpipe_clear_feature_stalled(wa, ep);
1811 
1812 		/* complete the xfer. This removes it from the tmp list. */
1813 		wa_xfer_completion(xfer);
1814 
1815 		/* check for work. */
1816 		wa_xfer_delayed_run(rpipe);
1817 	}
1818 }
1819 EXPORT_SYMBOL_GPL(wa_process_errored_transfers_run);
1820 
1821 /*
1822  * Submit a transfer to the Wire Adapter in a delayed way
1823  *
1824  * The process of enqueuing involves possible sleeps() [see
1825  * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are
1826  * in an atomic section, we defer the enqueue_b() call--else we call direct.
1827  *
1828  * @urb: We own a reference to it done by the HCI Linux USB stack that
1829  *       will be given up by calling usb_hcd_giveback_urb() or by
1830  *       returning error from this function -> ergo we don't have to
1831  *       refcount it.
1832  */
wa_urb_enqueue(struct wahc * wa,struct usb_host_endpoint * ep,struct urb * urb,gfp_t gfp)1833 int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
1834 		   struct urb *urb, gfp_t gfp)
1835 {
1836 	int result;
1837 	struct device *dev = &wa->usb_iface->dev;
1838 	struct wa_xfer *xfer;
1839 	unsigned long my_flags;
1840 	unsigned cant_sleep = irqs_disabled() | in_atomic();
1841 
1842 	if ((urb->transfer_buffer == NULL)
1843 	    && (urb->sg == NULL)
1844 	    && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
1845 	    && urb->transfer_buffer_length != 0) {
1846 		dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
1847 		dump_stack();
1848 	}
1849 
1850 	spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1851 	result = usb_hcd_link_urb_to_ep(&(wa->wusb->usb_hcd), urb);
1852 	spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1853 	if (result < 0)
1854 		goto error_link_urb;
1855 
1856 	result = -ENOMEM;
1857 	xfer = kzalloc(sizeof(*xfer), gfp);
1858 	if (xfer == NULL)
1859 		goto error_kmalloc;
1860 
1861 	result = -ENOENT;
1862 	if (urb->status != -EINPROGRESS)	/* cancelled */
1863 		goto error_dequeued;		/* before starting? */
1864 	wa_xfer_init(xfer);
1865 	xfer->wa = wa_get(wa);
1866 	xfer->urb = urb;
1867 	xfer->gfp = gfp;
1868 	xfer->ep = ep;
1869 	urb->hcpriv = xfer;
1870 
1871 	dev_dbg(dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
1872 		xfer, urb, urb->pipe, urb->transfer_buffer_length,
1873 		urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma",
1874 		urb->pipe & USB_DIR_IN ? "inbound" : "outbound",
1875 		cant_sleep ? "deferred" : "inline");
1876 
1877 	if (cant_sleep) {
1878 		usb_get_urb(urb);
1879 		spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1880 		list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
1881 		spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1882 		queue_work(wusbd, &wa->xfer_enqueue_work);
1883 	} else {
1884 		result = wa_urb_enqueue_b(xfer);
1885 		if (result < 0) {
1886 			/*
1887 			 * URB submit/enqueue failed.  Clean up, return an
1888 			 * error and do not run the callback.  This avoids
1889 			 * an infinite submit/complete loop.
1890 			 */
1891 			dev_err(dev, "%s: URB enqueue failed: %d\n",
1892 			   __func__, result);
1893 			wa_put(xfer->wa);
1894 			wa_xfer_put(xfer);
1895 			spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1896 			usb_hcd_unlink_urb_from_ep(&(wa->wusb->usb_hcd), urb);
1897 			spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1898 			return result;
1899 		}
1900 	}
1901 	return 0;
1902 
1903 error_dequeued:
1904 	kfree(xfer);
1905 error_kmalloc:
1906 	spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1907 	usb_hcd_unlink_urb_from_ep(&(wa->wusb->usb_hcd), urb);
1908 	spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1909 error_link_urb:
1910 	return result;
1911 }
1912 EXPORT_SYMBOL_GPL(wa_urb_enqueue);
1913 
1914 /*
1915  * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion
1916  * handler] is called.
1917  *
1918  * Until a transfer goes successfully through wa_urb_enqueue() it
1919  * needs to be dequeued with completion calling; when stuck in delayed
1920  * or before wa_xfer_setup() is called, we need to do completion.
1921  *
1922  *  not setup  If there is no hcpriv yet, that means that that enqueue
1923  *             still had no time to set the xfer up. Because
1924  *             urb->status should be other than -EINPROGRESS,
1925  *             enqueue() will catch that and bail out.
1926  *
1927  * If the transfer has gone through setup, we just need to clean it
1928  * up. If it has gone through submit(), we have to abort it [with an
1929  * asynch request] and then make sure we cancel each segment.
1930  *
1931  */
wa_urb_dequeue(struct wahc * wa,struct urb * urb,int status)1932 int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status)
1933 {
1934 	unsigned long flags, flags2;
1935 	struct wa_xfer *xfer;
1936 	struct wa_seg *seg;
1937 	struct wa_rpipe *rpipe;
1938 	unsigned cnt, done = 0, xfer_abort_pending;
1939 	unsigned rpipe_ready = 0;
1940 	int result;
1941 
1942 	/* check if it is safe to unlink. */
1943 	spin_lock_irqsave(&wa->xfer_list_lock, flags);
1944 	result = usb_hcd_check_unlink_urb(&(wa->wusb->usb_hcd), urb, status);
1945 	if ((result == 0) && urb->hcpriv) {
1946 		/*
1947 		 * Get a xfer ref to prevent a race with wa_xfer_giveback
1948 		 * cleaning up the xfer while we are working with it.
1949 		 */
1950 		wa_xfer_get(urb->hcpriv);
1951 	}
1952 	spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
1953 	if (result)
1954 		return result;
1955 
1956 	xfer = urb->hcpriv;
1957 	if (xfer == NULL)
1958 		return -ENOENT;
1959 	spin_lock_irqsave(&xfer->lock, flags);
1960 	pr_debug("%s: DEQUEUE xfer id 0x%08X\n", __func__, wa_xfer_id(xfer));
1961 	rpipe = xfer->ep->hcpriv;
1962 	if (rpipe == NULL) {
1963 		pr_debug("%s: xfer %p id 0x%08X has no RPIPE.  %s",
1964 			__func__, xfer, wa_xfer_id(xfer),
1965 			"Probably already aborted.\n" );
1966 		result = -ENOENT;
1967 		goto out_unlock;
1968 	}
1969 	/*
1970 	 * Check for done to avoid racing with wa_xfer_giveback and completing
1971 	 * twice.
1972 	 */
1973 	if (__wa_xfer_is_done(xfer)) {
1974 		pr_debug("%s: xfer %p id 0x%08X already done.\n", __func__,
1975 			xfer, wa_xfer_id(xfer));
1976 		result = -ENOENT;
1977 		goto out_unlock;
1978 	}
1979 	/* Check the delayed list -> if there, release and complete */
1980 	spin_lock_irqsave(&wa->xfer_list_lock, flags2);
1981 	if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
1982 		goto dequeue_delayed;
1983 	spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1984 	if (xfer->seg == NULL)  	/* still hasn't reached */
1985 		goto out_unlock;	/* setup(), enqueue_b() completes */
1986 	/* Ok, the xfer is in flight already, it's been setup and submitted.*/
1987 	xfer_abort_pending = __wa_xfer_abort(xfer) >= 0;
1988 	/*
1989 	 * grab the rpipe->seg_lock here to prevent racing with
1990 	 * __wa_xfer_delayed_run.
1991 	 */
1992 	spin_lock(&rpipe->seg_lock);
1993 	for (cnt = 0; cnt < xfer->segs; cnt++) {
1994 		seg = xfer->seg[cnt];
1995 		pr_debug("%s: xfer id 0x%08X#%d status = %d\n",
1996 			__func__, wa_xfer_id(xfer), cnt, seg->status);
1997 		switch (seg->status) {
1998 		case WA_SEG_NOTREADY:
1999 		case WA_SEG_READY:
2000 			printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n",
2001 			       xfer, cnt, seg->status);
2002 			WARN_ON(1);
2003 			break;
2004 		case WA_SEG_DELAYED:
2005 			/*
2006 			 * delete from rpipe delayed list.  If no segments on
2007 			 * this xfer have been submitted, __wa_xfer_is_done will
2008 			 * trigger a giveback below.  Otherwise, the submitted
2009 			 * segments will be completed in the DTI interrupt.
2010 			 */
2011 			seg->status = WA_SEG_ABORTED;
2012 			seg->result = -ENOENT;
2013 			list_del(&seg->list_node);
2014 			xfer->segs_done++;
2015 			break;
2016 		case WA_SEG_DONE:
2017 		case WA_SEG_ERROR:
2018 		case WA_SEG_ABORTED:
2019 			break;
2020 			/*
2021 			 * The buf_in data for a segment in the
2022 			 * WA_SEG_DTI_PENDING state is actively being read.
2023 			 * Let wa_buf_in_cb handle it since it will be called
2024 			 * and will increment xfer->segs_done.  Cleaning up
2025 			 * here could cause wa_buf_in_cb to access the xfer
2026 			 * after it has been completed/freed.
2027 			 */
2028 		case WA_SEG_DTI_PENDING:
2029 			break;
2030 			/*
2031 			 * In the states below, the HWA device already knows
2032 			 * about the transfer.  If an abort request was sent,
2033 			 * allow the HWA to process it and wait for the
2034 			 * results.  Otherwise, the DTI state and seg completed
2035 			 * counts can get out of sync.
2036 			 */
2037 		case WA_SEG_SUBMITTED:
2038 		case WA_SEG_PENDING:
2039 			/*
2040 			 * Check if the abort was successfully sent.  This could
2041 			 * be false if the HWA has been removed but we haven't
2042 			 * gotten the disconnect notification yet.
2043 			 */
2044 			if (!xfer_abort_pending) {
2045 				seg->status = WA_SEG_ABORTED;
2046 				rpipe_ready = rpipe_avail_inc(rpipe);
2047 				xfer->segs_done++;
2048 			}
2049 			break;
2050 		}
2051 	}
2052 	spin_unlock(&rpipe->seg_lock);
2053 	xfer->result = urb->status;	/* -ENOENT or -ECONNRESET */
2054 	done = __wa_xfer_is_done(xfer);
2055 	spin_unlock_irqrestore(&xfer->lock, flags);
2056 	if (done)
2057 		wa_xfer_completion(xfer);
2058 	if (rpipe_ready)
2059 		wa_xfer_delayed_run(rpipe);
2060 	wa_xfer_put(xfer);
2061 	return result;
2062 
2063 out_unlock:
2064 	spin_unlock_irqrestore(&xfer->lock, flags);
2065 	wa_xfer_put(xfer);
2066 	return result;
2067 
2068 dequeue_delayed:
2069 	list_del_init(&xfer->list_node);
2070 	spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
2071 	xfer->result = urb->status;
2072 	spin_unlock_irqrestore(&xfer->lock, flags);
2073 	wa_xfer_giveback(xfer);
2074 	wa_xfer_put(xfer);
2075 	usb_put_urb(urb);		/* we got a ref in enqueue() */
2076 	return 0;
2077 }
2078 EXPORT_SYMBOL_GPL(wa_urb_dequeue);
2079 
2080 /*
2081  * Translation from WA status codes (WUSB1.0 Table 8.15) to errno
2082  * codes
2083  *
2084  * Positive errno values are internal inconsistencies and should be
2085  * flagged louder. Negative are to be passed up to the user in the
2086  * normal way.
2087  *
2088  * @status: USB WA status code -- high two bits are stripped.
2089  */
wa_xfer_status_to_errno(u8 status)2090 static int wa_xfer_status_to_errno(u8 status)
2091 {
2092 	int errno;
2093 	u8 real_status = status;
2094 	static int xlat[] = {
2095 		[WA_XFER_STATUS_SUCCESS] = 		0,
2096 		[WA_XFER_STATUS_HALTED] = 		-EPIPE,
2097 		[WA_XFER_STATUS_DATA_BUFFER_ERROR] = 	-ENOBUFS,
2098 		[WA_XFER_STATUS_BABBLE] = 		-EOVERFLOW,
2099 		[WA_XFER_RESERVED] = 			EINVAL,
2100 		[WA_XFER_STATUS_NOT_FOUND] =		0,
2101 		[WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,
2102 		[WA_XFER_STATUS_TRANSACTION_ERROR] = 	-EILSEQ,
2103 		[WA_XFER_STATUS_ABORTED] =		-ENOENT,
2104 		[WA_XFER_STATUS_RPIPE_NOT_READY] = 	EINVAL,
2105 		[WA_XFER_INVALID_FORMAT] = 		EINVAL,
2106 		[WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = 	EINVAL,
2107 		[WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] = 	EINVAL,
2108 	};
2109 	status &= 0x3f;
2110 
2111 	if (status == 0)
2112 		return 0;
2113 	if (status >= ARRAY_SIZE(xlat)) {
2114 		printk_ratelimited(KERN_ERR "%s(): BUG? "
2115 			       "Unknown WA transfer status 0x%02x\n",
2116 			       __func__, real_status);
2117 		return -EINVAL;
2118 	}
2119 	errno = xlat[status];
2120 	if (unlikely(errno > 0)) {
2121 		printk_ratelimited(KERN_ERR "%s(): BUG? "
2122 			       "Inconsistent WA status: 0x%02x\n",
2123 			       __func__, real_status);
2124 		errno = -errno;
2125 	}
2126 	return errno;
2127 }
2128 
2129 /*
2130  * If a last segment flag and/or a transfer result error is encountered,
2131  * no other segment transfer results will be returned from the device.
2132  * Mark the remaining submitted or pending xfers as completed so that
2133  * the xfer will complete cleanly.
2134  *
2135  * xfer->lock must be held
2136  *
2137  */
wa_complete_remaining_xfer_segs(struct wa_xfer * xfer,int starting_index,enum wa_seg_status status)2138 static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
2139 		int starting_index, enum wa_seg_status status)
2140 {
2141 	int index;
2142 	struct wa_rpipe *rpipe = xfer->ep->hcpriv;
2143 
2144 	for (index = starting_index; index < xfer->segs_submitted; index++) {
2145 		struct wa_seg *current_seg = xfer->seg[index];
2146 
2147 		BUG_ON(current_seg == NULL);
2148 
2149 		switch (current_seg->status) {
2150 		case WA_SEG_SUBMITTED:
2151 		case WA_SEG_PENDING:
2152 		case WA_SEG_DTI_PENDING:
2153 			rpipe_avail_inc(rpipe);
2154 		/*
2155 		 * do not increment RPIPE avail for the WA_SEG_DELAYED case
2156 		 * since it has not been submitted to the RPIPE.
2157 		 */
2158 		case WA_SEG_DELAYED:
2159 			xfer->segs_done++;
2160 			current_seg->status = status;
2161 			break;
2162 		case WA_SEG_ABORTED:
2163 			break;
2164 		default:
2165 			WARN(1, "%s: xfer 0x%08X#%d. bad seg status = %d\n",
2166 				__func__, wa_xfer_id(xfer), index,
2167 				current_seg->status);
2168 			break;
2169 		}
2170 	}
2171 }
2172 
2173 /* Populate the given urb based on the current isoc transfer state. */
__wa_populate_buf_in_urb_isoc(struct wahc * wa,struct urb * buf_in_urb,struct wa_xfer * xfer,struct wa_seg * seg)2174 static int __wa_populate_buf_in_urb_isoc(struct wahc *wa,
2175 	struct urb *buf_in_urb, struct wa_xfer *xfer, struct wa_seg *seg)
2176 {
2177 	int urb_start_frame = seg->isoc_frame_index + seg->isoc_frame_offset;
2178 	int seg_index, total_len = 0, urb_frame_index = urb_start_frame;
2179 	struct usb_iso_packet_descriptor *iso_frame_desc =
2180 						xfer->urb->iso_frame_desc;
2181 	const int dti_packet_size = usb_endpoint_maxp(wa->dti_epd);
2182 	int next_frame_contiguous;
2183 	struct usb_iso_packet_descriptor *iso_frame;
2184 
2185 	BUG_ON(buf_in_urb->status == -EINPROGRESS);
2186 
2187 	/*
2188 	 * If the current frame actual_length is contiguous with the next frame
2189 	 * and actual_length is a multiple of the DTI endpoint max packet size,
2190 	 * combine the current frame with the next frame in a single URB.  This
2191 	 * reduces the number of URBs that must be submitted in that case.
2192 	 */
2193 	seg_index = seg->isoc_frame_index;
2194 	do {
2195 		next_frame_contiguous = 0;
2196 
2197 		iso_frame = &iso_frame_desc[urb_frame_index];
2198 		total_len += iso_frame->actual_length;
2199 		++urb_frame_index;
2200 		++seg_index;
2201 
2202 		if (seg_index < seg->isoc_frame_count) {
2203 			struct usb_iso_packet_descriptor *next_iso_frame;
2204 
2205 			next_iso_frame = &iso_frame_desc[urb_frame_index];
2206 
2207 			if ((iso_frame->offset + iso_frame->actual_length) ==
2208 				next_iso_frame->offset)
2209 				next_frame_contiguous = 1;
2210 		}
2211 	} while (next_frame_contiguous
2212 			&& ((iso_frame->actual_length % dti_packet_size) == 0));
2213 
2214 	/* this should always be 0 before a resubmit. */
2215 	buf_in_urb->num_mapped_sgs	= 0;
2216 	buf_in_urb->transfer_dma = xfer->urb->transfer_dma +
2217 		iso_frame_desc[urb_start_frame].offset;
2218 	buf_in_urb->transfer_buffer_length = total_len;
2219 	buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
2220 	buf_in_urb->transfer_buffer = NULL;
2221 	buf_in_urb->sg = NULL;
2222 	buf_in_urb->num_sgs = 0;
2223 	buf_in_urb->context = seg;
2224 
2225 	/* return the number of frames included in this URB. */
2226 	return seg_index - seg->isoc_frame_index;
2227 }
2228 
2229 /* Populate the given urb based on the current transfer state. */
wa_populate_buf_in_urb(struct urb * buf_in_urb,struct wa_xfer * xfer,unsigned int seg_idx,unsigned int bytes_transferred)2230 static int wa_populate_buf_in_urb(struct urb *buf_in_urb, struct wa_xfer *xfer,
2231 	unsigned int seg_idx, unsigned int bytes_transferred)
2232 {
2233 	int result = 0;
2234 	struct wa_seg *seg = xfer->seg[seg_idx];
2235 
2236 	BUG_ON(buf_in_urb->status == -EINPROGRESS);
2237 	/* this should always be 0 before a resubmit. */
2238 	buf_in_urb->num_mapped_sgs	= 0;
2239 
2240 	if (xfer->is_dma) {
2241 		buf_in_urb->transfer_dma = xfer->urb->transfer_dma
2242 			+ (seg_idx * xfer->seg_size);
2243 		buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
2244 		buf_in_urb->transfer_buffer = NULL;
2245 		buf_in_urb->sg = NULL;
2246 		buf_in_urb->num_sgs = 0;
2247 	} else {
2248 		/* do buffer or SG processing. */
2249 		buf_in_urb->transfer_flags &= ~URB_NO_TRANSFER_DMA_MAP;
2250 
2251 		if (xfer->urb->transfer_buffer) {
2252 			buf_in_urb->transfer_buffer =
2253 				xfer->urb->transfer_buffer
2254 				+ (seg_idx * xfer->seg_size);
2255 			buf_in_urb->sg = NULL;
2256 			buf_in_urb->num_sgs = 0;
2257 		} else {
2258 			/* allocate an SG list to store seg_size bytes
2259 				and copy the subset of the xfer->urb->sg
2260 				that matches the buffer subset we are
2261 				about to read. */
2262 			buf_in_urb->sg = wa_xfer_create_subset_sg(
2263 				xfer->urb->sg,
2264 				seg_idx * xfer->seg_size,
2265 				bytes_transferred,
2266 				&(buf_in_urb->num_sgs));
2267 
2268 			if (!(buf_in_urb->sg)) {
2269 				buf_in_urb->num_sgs	= 0;
2270 				result = -ENOMEM;
2271 			}
2272 			buf_in_urb->transfer_buffer = NULL;
2273 		}
2274 	}
2275 	buf_in_urb->transfer_buffer_length = bytes_transferred;
2276 	buf_in_urb->context = seg;
2277 
2278 	return result;
2279 }
2280 
2281 /*
2282  * Process a xfer result completion message
2283  *
2284  * inbound transfers: need to schedule a buf_in_urb read
2285  *
2286  * FIXME: this function needs to be broken up in parts
2287  */
wa_xfer_result_chew(struct wahc * wa,struct wa_xfer * xfer,struct wa_xfer_result * xfer_result)2288 static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer,
2289 		struct wa_xfer_result *xfer_result)
2290 {
2291 	int result;
2292 	struct device *dev = &wa->usb_iface->dev;
2293 	unsigned long flags;
2294 	unsigned int seg_idx;
2295 	struct wa_seg *seg;
2296 	struct wa_rpipe *rpipe;
2297 	unsigned done = 0;
2298 	u8 usb_status;
2299 	unsigned rpipe_ready = 0;
2300 	unsigned bytes_transferred = le32_to_cpu(xfer_result->dwTransferLength);
2301 	struct urb *buf_in_urb = &(wa->buf_in_urbs[0]);
2302 
2303 	spin_lock_irqsave(&xfer->lock, flags);
2304 	seg_idx = xfer_result->bTransferSegment & 0x7f;
2305 	if (unlikely(seg_idx >= xfer->segs))
2306 		goto error_bad_seg;
2307 	seg = xfer->seg[seg_idx];
2308 	rpipe = xfer->ep->hcpriv;
2309 	usb_status = xfer_result->bTransferStatus;
2310 	dev_dbg(dev, "xfer %p ID 0x%08X#%u: bTransferStatus 0x%02x (seg status %u)\n",
2311 		xfer, wa_xfer_id(xfer), seg_idx, usb_status, seg->status);
2312 	if (seg->status == WA_SEG_ABORTED
2313 	    || seg->status == WA_SEG_ERROR)	/* already handled */
2314 		goto segment_aborted;
2315 	if (seg->status == WA_SEG_SUBMITTED)	/* ops, got here */
2316 		seg->status = WA_SEG_PENDING;	/* before wa_seg{_dto}_cb() */
2317 	if (seg->status != WA_SEG_PENDING) {
2318 		if (printk_ratelimit())
2319 			dev_err(dev, "xfer %p#%u: Bad segment state %u\n",
2320 				xfer, seg_idx, seg->status);
2321 		seg->status = WA_SEG_PENDING;	/* workaround/"fix" it */
2322 	}
2323 	if (usb_status & 0x80) {
2324 		seg->result = wa_xfer_status_to_errno(usb_status);
2325 		dev_err(dev, "DTI: xfer %p 0x%08X:#%u failed (0x%02x)\n",
2326 			xfer, xfer->id, seg->index, usb_status);
2327 		seg->status = ((usb_status & 0x7F) == WA_XFER_STATUS_ABORTED) ?
2328 			WA_SEG_ABORTED : WA_SEG_ERROR;
2329 		goto error_complete;
2330 	}
2331 	/* FIXME: we ignore warnings, tally them for stats */
2332 	if (usb_status & 0x40) 		/* Warning?... */
2333 		usb_status = 0;		/* ... pass */
2334 	/*
2335 	 * If the last segment bit is set, complete the remaining segments.
2336 	 * When the current segment is completed, either in wa_buf_in_cb for
2337 	 * transfers with data or below for no data, the xfer will complete.
2338 	 */
2339 	if (xfer_result->bTransferSegment & 0x80)
2340 		wa_complete_remaining_xfer_segs(xfer, seg->index + 1,
2341 			WA_SEG_DONE);
2342 	if (usb_pipeisoc(xfer->urb->pipe)
2343 		&& (le32_to_cpu(xfer_result->dwNumOfPackets) > 0)) {
2344 		/* set up WA state to read the isoc packet status next. */
2345 		wa->dti_isoc_xfer_in_progress = wa_xfer_id(xfer);
2346 		wa->dti_isoc_xfer_seg = seg_idx;
2347 		wa->dti_state = WA_DTI_ISOC_PACKET_STATUS_PENDING;
2348 	} else if (xfer->is_inbound && !usb_pipeisoc(xfer->urb->pipe)
2349 			&& (bytes_transferred > 0)) {
2350 		/* IN data phase: read to buffer */
2351 		seg->status = WA_SEG_DTI_PENDING;
2352 		result = wa_populate_buf_in_urb(buf_in_urb, xfer, seg_idx,
2353 			bytes_transferred);
2354 		if (result < 0)
2355 			goto error_buf_in_populate;
2356 		++(wa->active_buf_in_urbs);
2357 		result = usb_submit_urb(buf_in_urb, GFP_ATOMIC);
2358 		if (result < 0) {
2359 			--(wa->active_buf_in_urbs);
2360 			goto error_submit_buf_in;
2361 		}
2362 	} else {
2363 		/* OUT data phase or no data, complete it -- */
2364 		seg->result = bytes_transferred;
2365 		rpipe_ready = rpipe_avail_inc(rpipe);
2366 		done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE);
2367 	}
2368 	spin_unlock_irqrestore(&xfer->lock, flags);
2369 	if (done)
2370 		wa_xfer_completion(xfer);
2371 	if (rpipe_ready)
2372 		wa_xfer_delayed_run(rpipe);
2373 	return;
2374 
2375 error_submit_buf_in:
2376 	if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
2377 		dev_err(dev, "DTI: URB max acceptable errors "
2378 			"exceeded, resetting device\n");
2379 		wa_reset_all(wa);
2380 	}
2381 	if (printk_ratelimit())
2382 		dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
2383 			xfer, seg_idx, result);
2384 	seg->result = result;
2385 	kfree(buf_in_urb->sg);
2386 	buf_in_urb->sg = NULL;
2387 error_buf_in_populate:
2388 	__wa_xfer_abort(xfer);
2389 	seg->status = WA_SEG_ERROR;
2390 error_complete:
2391 	xfer->segs_done++;
2392 	rpipe_ready = rpipe_avail_inc(rpipe);
2393 	wa_complete_remaining_xfer_segs(xfer, seg->index + 1, seg->status);
2394 	done = __wa_xfer_is_done(xfer);
2395 	/*
2396 	 * queue work item to clear STALL for control endpoints.
2397 	 * Otherwise, let endpoint_reset take care of it.
2398 	 */
2399 	if (((usb_status & 0x3f) == WA_XFER_STATUS_HALTED) &&
2400 		usb_endpoint_xfer_control(&xfer->ep->desc) &&
2401 		done) {
2402 
2403 		dev_info(dev, "Control EP stall.  Queue delayed work.\n");
2404 		spin_lock(&wa->xfer_list_lock);
2405 		/* move xfer from xfer_list to xfer_errored_list. */
2406 		list_move_tail(&xfer->list_node, &wa->xfer_errored_list);
2407 		spin_unlock(&wa->xfer_list_lock);
2408 		spin_unlock_irqrestore(&xfer->lock, flags);
2409 		queue_work(wusbd, &wa->xfer_error_work);
2410 	} else {
2411 		spin_unlock_irqrestore(&xfer->lock, flags);
2412 		if (done)
2413 			wa_xfer_completion(xfer);
2414 		if (rpipe_ready)
2415 			wa_xfer_delayed_run(rpipe);
2416 	}
2417 
2418 	return;
2419 
2420 error_bad_seg:
2421 	spin_unlock_irqrestore(&xfer->lock, flags);
2422 	wa_urb_dequeue(wa, xfer->urb, -ENOENT);
2423 	if (printk_ratelimit())
2424 		dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
2425 	if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
2426 		dev_err(dev, "DTI: URB max acceptable errors "
2427 			"exceeded, resetting device\n");
2428 		wa_reset_all(wa);
2429 	}
2430 	return;
2431 
2432 segment_aborted:
2433 	/* nothing to do, as the aborter did the completion */
2434 	spin_unlock_irqrestore(&xfer->lock, flags);
2435 }
2436 
2437 /*
2438  * Process a isochronous packet status message
2439  *
2440  * inbound transfers: need to schedule a buf_in_urb read
2441  */
wa_process_iso_packet_status(struct wahc * wa,struct urb * urb)2442 static int wa_process_iso_packet_status(struct wahc *wa, struct urb *urb)
2443 {
2444 	struct device *dev = &wa->usb_iface->dev;
2445 	struct wa_xfer_packet_status_hwaiso *packet_status;
2446 	struct wa_xfer_packet_status_len_hwaiso *status_array;
2447 	struct wa_xfer *xfer;
2448 	unsigned long flags;
2449 	struct wa_seg *seg;
2450 	struct wa_rpipe *rpipe;
2451 	unsigned done = 0, dti_busy = 0, data_frame_count = 0, seg_index;
2452 	unsigned first_frame_index = 0, rpipe_ready = 0;
2453 	int expected_size;
2454 
2455 	/* We have a xfer result buffer; check it */
2456 	dev_dbg(dev, "DTI: isoc packet status %d bytes at %p\n",
2457 		urb->actual_length, urb->transfer_buffer);
2458 	packet_status = (struct wa_xfer_packet_status_hwaiso *)(wa->dti_buf);
2459 	if (packet_status->bPacketType != WA_XFER_ISO_PACKET_STATUS) {
2460 		dev_err(dev, "DTI Error: isoc packet status--bad type 0x%02x\n",
2461 			packet_status->bPacketType);
2462 		goto error_parse_buffer;
2463 	}
2464 	xfer = wa_xfer_get_by_id(wa, wa->dti_isoc_xfer_in_progress);
2465 	if (xfer == NULL) {
2466 		dev_err(dev, "DTI Error: isoc packet status--unknown xfer 0x%08x\n",
2467 			wa->dti_isoc_xfer_in_progress);
2468 		goto error_parse_buffer;
2469 	}
2470 	spin_lock_irqsave(&xfer->lock, flags);
2471 	if (unlikely(wa->dti_isoc_xfer_seg >= xfer->segs))
2472 		goto error_bad_seg;
2473 	seg = xfer->seg[wa->dti_isoc_xfer_seg];
2474 	rpipe = xfer->ep->hcpriv;
2475 	expected_size = sizeof(*packet_status) +
2476 			(sizeof(packet_status->PacketStatus[0]) *
2477 			seg->isoc_frame_count);
2478 	if (urb->actual_length != expected_size) {
2479 		dev_err(dev, "DTI Error: isoc packet status--bad urb length (%d bytes vs %d needed)\n",
2480 			urb->actual_length, expected_size);
2481 		goto error_bad_seg;
2482 	}
2483 	if (le16_to_cpu(packet_status->wLength) != expected_size) {
2484 		dev_err(dev, "DTI Error: isoc packet status--bad length %u\n",
2485 			le16_to_cpu(packet_status->wLength));
2486 		goto error_bad_seg;
2487 	}
2488 	/* write isoc packet status and lengths back to the xfer urb. */
2489 	status_array = packet_status->PacketStatus;
2490 	xfer->urb->start_frame =
2491 		wa->wusb->usb_hcd.driver->get_frame_number(&wa->wusb->usb_hcd);
2492 	for (seg_index = 0; seg_index < seg->isoc_frame_count; ++seg_index) {
2493 		struct usb_iso_packet_descriptor *iso_frame_desc =
2494 			xfer->urb->iso_frame_desc;
2495 		const int xfer_frame_index =
2496 			seg->isoc_frame_offset + seg_index;
2497 
2498 		iso_frame_desc[xfer_frame_index].status =
2499 			wa_xfer_status_to_errno(
2500 			le16_to_cpu(status_array[seg_index].PacketStatus));
2501 		iso_frame_desc[xfer_frame_index].actual_length =
2502 			le16_to_cpu(status_array[seg_index].PacketLength);
2503 		/* track the number of frames successfully transferred. */
2504 		if (iso_frame_desc[xfer_frame_index].actual_length > 0) {
2505 			/* save the starting frame index for buf_in_urb. */
2506 			if (!data_frame_count)
2507 				first_frame_index = seg_index;
2508 			++data_frame_count;
2509 		}
2510 	}
2511 
2512 	if (xfer->is_inbound && data_frame_count) {
2513 		int result, total_frames_read = 0, urb_index = 0;
2514 		struct urb *buf_in_urb;
2515 
2516 		/* IN data phase: read to buffer */
2517 		seg->status = WA_SEG_DTI_PENDING;
2518 
2519 		/* start with the first frame with data. */
2520 		seg->isoc_frame_index = first_frame_index;
2521 		/* submit up to WA_MAX_BUF_IN_URBS read URBs. */
2522 		do {
2523 			int urb_frame_index, urb_frame_count;
2524 			struct usb_iso_packet_descriptor *iso_frame_desc;
2525 
2526 			buf_in_urb = &(wa->buf_in_urbs[urb_index]);
2527 			urb_frame_count = __wa_populate_buf_in_urb_isoc(wa,
2528 				buf_in_urb, xfer, seg);
2529 			/* advance frame index to start of next read URB. */
2530 			seg->isoc_frame_index += urb_frame_count;
2531 			total_frames_read += urb_frame_count;
2532 
2533 			++(wa->active_buf_in_urbs);
2534 			result = usb_submit_urb(buf_in_urb, GFP_ATOMIC);
2535 
2536 			/* skip 0-byte frames. */
2537 			urb_frame_index =
2538 				seg->isoc_frame_offset + seg->isoc_frame_index;
2539 			iso_frame_desc =
2540 				&(xfer->urb->iso_frame_desc[urb_frame_index]);
2541 			while ((seg->isoc_frame_index <
2542 						seg->isoc_frame_count) &&
2543 				 (iso_frame_desc->actual_length == 0)) {
2544 				++(seg->isoc_frame_index);
2545 				++iso_frame_desc;
2546 			}
2547 			++urb_index;
2548 
2549 		} while ((result == 0) && (urb_index < WA_MAX_BUF_IN_URBS)
2550 				&& (seg->isoc_frame_index <
2551 						seg->isoc_frame_count));
2552 
2553 		if (result < 0) {
2554 			--(wa->active_buf_in_urbs);
2555 			dev_err(dev, "DTI Error: Could not submit buf in URB (%d)",
2556 				result);
2557 			wa_reset_all(wa);
2558 		} else if (data_frame_count > total_frames_read)
2559 			/* If we need to read more frames, set DTI busy. */
2560 			dti_busy = 1;
2561 	} else {
2562 		/* OUT transfer or no more IN data, complete it -- */
2563 		rpipe_ready = rpipe_avail_inc(rpipe);
2564 		done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE);
2565 	}
2566 	spin_unlock_irqrestore(&xfer->lock, flags);
2567 	if (dti_busy)
2568 		wa->dti_state = WA_DTI_BUF_IN_DATA_PENDING;
2569 	else
2570 		wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
2571 	if (done)
2572 		wa_xfer_completion(xfer);
2573 	if (rpipe_ready)
2574 		wa_xfer_delayed_run(rpipe);
2575 	wa_xfer_put(xfer);
2576 	return dti_busy;
2577 
2578 error_bad_seg:
2579 	spin_unlock_irqrestore(&xfer->lock, flags);
2580 	wa_xfer_put(xfer);
2581 error_parse_buffer:
2582 	return dti_busy;
2583 }
2584 
2585 /*
2586  * Callback for the IN data phase
2587  *
2588  * If successful transition state; otherwise, take a note of the
2589  * error, mark this segment done and try completion.
2590  *
2591  * Note we don't access until we are sure that the transfer hasn't
2592  * been cancelled (ECONNRESET, ENOENT), which could mean that
2593  * seg->xfer could be already gone.
2594  */
wa_buf_in_cb(struct urb * urb)2595 static void wa_buf_in_cb(struct urb *urb)
2596 {
2597 	struct wa_seg *seg = urb->context;
2598 	struct wa_xfer *xfer = seg->xfer;
2599 	struct wahc *wa;
2600 	struct device *dev;
2601 	struct wa_rpipe *rpipe;
2602 	unsigned rpipe_ready = 0, isoc_data_frame_count = 0;
2603 	unsigned long flags;
2604 	int resubmit_dti = 0, active_buf_in_urbs;
2605 	u8 done = 0;
2606 
2607 	/* free the sg if it was used. */
2608 	kfree(urb->sg);
2609 	urb->sg = NULL;
2610 
2611 	spin_lock_irqsave(&xfer->lock, flags);
2612 	wa = xfer->wa;
2613 	dev = &wa->usb_iface->dev;
2614 	--(wa->active_buf_in_urbs);
2615 	active_buf_in_urbs = wa->active_buf_in_urbs;
2616 	rpipe = xfer->ep->hcpriv;
2617 
2618 	if (usb_pipeisoc(xfer->urb->pipe)) {
2619 		struct usb_iso_packet_descriptor *iso_frame_desc =
2620 			xfer->urb->iso_frame_desc;
2621 		int	seg_index;
2622 
2623 		/*
2624 		 * Find the next isoc frame with data and count how many
2625 		 * frames with data remain.
2626 		 */
2627 		seg_index = seg->isoc_frame_index;
2628 		while (seg_index < seg->isoc_frame_count) {
2629 			const int urb_frame_index =
2630 				seg->isoc_frame_offset + seg_index;
2631 
2632 			if (iso_frame_desc[urb_frame_index].actual_length > 0) {
2633 				/* save the index of the next frame with data */
2634 				if (!isoc_data_frame_count)
2635 					seg->isoc_frame_index = seg_index;
2636 				++isoc_data_frame_count;
2637 			}
2638 			++seg_index;
2639 		}
2640 	}
2641 	spin_unlock_irqrestore(&xfer->lock, flags);
2642 
2643 	switch (urb->status) {
2644 	case 0:
2645 		spin_lock_irqsave(&xfer->lock, flags);
2646 
2647 		seg->result += urb->actual_length;
2648 		if (isoc_data_frame_count > 0) {
2649 			int result, urb_frame_count;
2650 
2651 			/* submit a read URB for the next frame with data. */
2652 			urb_frame_count = __wa_populate_buf_in_urb_isoc(wa, urb,
2653 				 xfer, seg);
2654 			/* advance index to start of next read URB. */
2655 			seg->isoc_frame_index += urb_frame_count;
2656 			++(wa->active_buf_in_urbs);
2657 			result = usb_submit_urb(urb, GFP_ATOMIC);
2658 			if (result < 0) {
2659 				--(wa->active_buf_in_urbs);
2660 				dev_err(dev, "DTI Error: Could not submit buf in URB (%d)",
2661 					result);
2662 				wa_reset_all(wa);
2663 			}
2664 			/*
2665 			 * If we are in this callback and
2666 			 * isoc_data_frame_count > 0, it means that the dti_urb
2667 			 * submission was delayed in wa_dti_cb.  Once
2668 			 * we submit the last buf_in_urb, we can submit the
2669 			 * delayed dti_urb.
2670 			 */
2671 			  resubmit_dti = (isoc_data_frame_count ==
2672 							urb_frame_count);
2673 		} else if (active_buf_in_urbs == 0) {
2674 			dev_dbg(dev,
2675 				"xfer %p 0x%08X#%u: data in done (%zu bytes)\n",
2676 				xfer, wa_xfer_id(xfer), seg->index,
2677 				seg->result);
2678 			rpipe_ready = rpipe_avail_inc(rpipe);
2679 			done = __wa_xfer_mark_seg_as_done(xfer, seg,
2680 					WA_SEG_DONE);
2681 		}
2682 		spin_unlock_irqrestore(&xfer->lock, flags);
2683 		if (done)
2684 			wa_xfer_completion(xfer);
2685 		if (rpipe_ready)
2686 			wa_xfer_delayed_run(rpipe);
2687 		break;
2688 	case -ECONNRESET:	/* URB unlinked; no need to do anything */
2689 	case -ENOENT:		/* as it was done by the who unlinked us */
2690 		break;
2691 	default:		/* Other errors ... */
2692 		/*
2693 		 * Error on data buf read.  Only resubmit DTI if it hasn't
2694 		 * already been done by previously hitting this error or by a
2695 		 * successful completion of the previous buf_in_urb.
2696 		 */
2697 		resubmit_dti = wa->dti_state != WA_DTI_TRANSFER_RESULT_PENDING;
2698 		spin_lock_irqsave(&xfer->lock, flags);
2699 		if (printk_ratelimit())
2700 			dev_err(dev, "xfer %p 0x%08X#%u: data in error %d\n",
2701 				xfer, wa_xfer_id(xfer), seg->index,
2702 				urb->status);
2703 		if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
2704 			    EDC_ERROR_TIMEFRAME)){
2705 			dev_err(dev, "DTO: URB max acceptable errors "
2706 				"exceeded, resetting device\n");
2707 			wa_reset_all(wa);
2708 		}
2709 		seg->result = urb->status;
2710 		rpipe_ready = rpipe_avail_inc(rpipe);
2711 		if (active_buf_in_urbs == 0)
2712 			done = __wa_xfer_mark_seg_as_done(xfer, seg,
2713 				WA_SEG_ERROR);
2714 		else
2715 			__wa_xfer_abort(xfer);
2716 		spin_unlock_irqrestore(&xfer->lock, flags);
2717 		if (done)
2718 			wa_xfer_completion(xfer);
2719 		if (rpipe_ready)
2720 			wa_xfer_delayed_run(rpipe);
2721 	}
2722 
2723 	if (resubmit_dti) {
2724 		int result;
2725 
2726 		wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
2727 
2728 		result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
2729 		if (result < 0) {
2730 			dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n",
2731 				result);
2732 			wa_reset_all(wa);
2733 		}
2734 	}
2735 }
2736 
2737 /*
2738  * Handle an incoming transfer result buffer
2739  *
2740  * Given a transfer result buffer, it completes the transfer (possibly
2741  * scheduling and buffer in read) and then resubmits the DTI URB for a
2742  * new transfer result read.
2743  *
2744  *
2745  * The xfer_result DTI URB state machine
2746  *
2747  * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In)
2748  *
2749  * We start in OFF mode, the first xfer_result notification [through
2750  * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to
2751  * read.
2752  *
2753  * We receive a buffer -- if it is not a xfer_result, we complain and
2754  * repost the DTI-URB. If it is a xfer_result then do the xfer seg
2755  * request accounting. If it is an IN segment, we move to RBI and post
2756  * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will
2757  * repost the DTI-URB and move to RXR state. if there was no IN
2758  * segment, it will repost the DTI-URB.
2759  *
2760  * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
2761  * errors) in the URBs.
2762  */
wa_dti_cb(struct urb * urb)2763 static void wa_dti_cb(struct urb *urb)
2764 {
2765 	int result, dti_busy = 0;
2766 	struct wahc *wa = urb->context;
2767 	struct device *dev = &wa->usb_iface->dev;
2768 	u32 xfer_id;
2769 	u8 usb_status;
2770 
2771 	BUG_ON(wa->dti_urb != urb);
2772 	switch (wa->dti_urb->status) {
2773 	case 0:
2774 		if (wa->dti_state == WA_DTI_TRANSFER_RESULT_PENDING) {
2775 			struct wa_xfer_result *xfer_result;
2776 			struct wa_xfer *xfer;
2777 
2778 			/* We have a xfer result buffer; check it */
2779 			dev_dbg(dev, "DTI: xfer result %d bytes at %p\n",
2780 				urb->actual_length, urb->transfer_buffer);
2781 			if (urb->actual_length != sizeof(*xfer_result)) {
2782 				dev_err(dev, "DTI Error: xfer result--bad size xfer result (%d bytes vs %zu needed)\n",
2783 					urb->actual_length,
2784 					sizeof(*xfer_result));
2785 				break;
2786 			}
2787 			xfer_result = (struct wa_xfer_result *)(wa->dti_buf);
2788 			if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
2789 				dev_err(dev, "DTI Error: xfer result--bad header length %u\n",
2790 					xfer_result->hdr.bLength);
2791 				break;
2792 			}
2793 			if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
2794 				dev_err(dev, "DTI Error: xfer result--bad header type 0x%02x\n",
2795 					xfer_result->hdr.bNotifyType);
2796 				break;
2797 			}
2798 			xfer_id = le32_to_cpu(xfer_result->dwTransferID);
2799 			usb_status = xfer_result->bTransferStatus & 0x3f;
2800 			if (usb_status == WA_XFER_STATUS_NOT_FOUND) {
2801 				/* taken care of already */
2802 				dev_dbg(dev, "%s: xfer 0x%08X#%u not found.\n",
2803 					__func__, xfer_id,
2804 					xfer_result->bTransferSegment & 0x7f);
2805 				break;
2806 			}
2807 			xfer = wa_xfer_get_by_id(wa, xfer_id);
2808 			if (xfer == NULL) {
2809 				/* FIXME: transaction not found. */
2810 				dev_err(dev, "DTI Error: xfer result--unknown xfer 0x%08x (status 0x%02x)\n",
2811 					xfer_id, usb_status);
2812 				break;
2813 			}
2814 			wa_xfer_result_chew(wa, xfer, xfer_result);
2815 			wa_xfer_put(xfer);
2816 		} else if (wa->dti_state == WA_DTI_ISOC_PACKET_STATUS_PENDING) {
2817 			dti_busy = wa_process_iso_packet_status(wa, urb);
2818 		} else {
2819 			dev_err(dev, "DTI Error: unexpected EP state = %d\n",
2820 				wa->dti_state);
2821 		}
2822 		break;
2823 	case -ENOENT:		/* (we killed the URB)...so, no broadcast */
2824 	case -ESHUTDOWN:	/* going away! */
2825 		dev_dbg(dev, "DTI: going down! %d\n", urb->status);
2826 		goto out;
2827 	default:
2828 		/* Unknown error */
2829 		if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS,
2830 			    EDC_ERROR_TIMEFRAME)) {
2831 			dev_err(dev, "DTI: URB max acceptable errors "
2832 				"exceeded, resetting device\n");
2833 			wa_reset_all(wa);
2834 			goto out;
2835 		}
2836 		if (printk_ratelimit())
2837 			dev_err(dev, "DTI: URB error %d\n", urb->status);
2838 		break;
2839 	}
2840 
2841 	/* Resubmit the DTI URB if we are not busy processing isoc in frames. */
2842 	if (!dti_busy) {
2843 		result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
2844 		if (result < 0) {
2845 			dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n",
2846 				result);
2847 			wa_reset_all(wa);
2848 		}
2849 	}
2850 out:
2851 	return;
2852 }
2853 
2854 /*
2855  * Initialize the DTI URB for reading transfer result notifications and also
2856  * the buffer-in URB, for reading buffers. Then we just submit the DTI URB.
2857  */
wa_dti_start(struct wahc * wa)2858 int wa_dti_start(struct wahc *wa)
2859 {
2860 	const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
2861 	struct device *dev = &wa->usb_iface->dev;
2862 	int result = -ENOMEM, index;
2863 
2864 	if (wa->dti_urb != NULL)	/* DTI URB already started */
2865 		goto out;
2866 
2867 	wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
2868 	if (wa->dti_urb == NULL) {
2869 		dev_err(dev, "Can't allocate DTI URB\n");
2870 		goto error_dti_urb_alloc;
2871 	}
2872 	usb_fill_bulk_urb(
2873 		wa->dti_urb, wa->usb_dev,
2874 		usb_rcvbulkpipe(wa->usb_dev, 0x80 | dti_epd->bEndpointAddress),
2875 		wa->dti_buf, wa->dti_buf_size,
2876 		wa_dti_cb, wa);
2877 
2878 	/* init the buf in URBs */
2879 	for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index) {
2880 		usb_fill_bulk_urb(
2881 			&(wa->buf_in_urbs[index]), wa->usb_dev,
2882 			usb_rcvbulkpipe(wa->usb_dev,
2883 				0x80 | dti_epd->bEndpointAddress),
2884 			NULL, 0, wa_buf_in_cb, wa);
2885 	}
2886 	result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
2887 	if (result < 0) {
2888 		dev_err(dev, "DTI Error: Could not submit DTI URB (%d) resetting\n",
2889 			result);
2890 		goto error_dti_urb_submit;
2891 	}
2892 out:
2893 	return 0;
2894 
2895 error_dti_urb_submit:
2896 	usb_put_urb(wa->dti_urb);
2897 	wa->dti_urb = NULL;
2898 error_dti_urb_alloc:
2899 	return result;
2900 }
2901 EXPORT_SYMBOL_GPL(wa_dti_start);
2902 /*
2903  * Transfer complete notification
2904  *
2905  * Called from the notif.c code. We get a notification on EP2 saying
2906  * that some endpoint has some transfer result data available. We are
2907  * about to read it.
2908  *
2909  * To speed up things, we always have a URB reading the DTI URB; we
2910  * don't really set it up and start it until the first xfer complete
2911  * notification arrives, which is what we do here.
2912  *
2913  * Follow up in wa_dti_cb(), as that's where the whole state
2914  * machine starts.
2915  *
2916  * @wa shall be referenced
2917  */
wa_handle_notif_xfer(struct wahc * wa,struct wa_notif_hdr * notif_hdr)2918 void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
2919 {
2920 	struct device *dev = &wa->usb_iface->dev;
2921 	struct wa_notif_xfer *notif_xfer;
2922 	const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
2923 
2924 	notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr);
2925 	BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER);
2926 
2927 	if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) {
2928 		/* FIXME: hardcoded limitation, adapt */
2929 		dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n",
2930 			notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
2931 		goto error;
2932 	}
2933 
2934 	/* attempt to start the DTI ep processing. */
2935 	if (wa_dti_start(wa) < 0)
2936 		goto error;
2937 
2938 	return;
2939 
2940 error:
2941 	wa_reset_all(wa);
2942 }
2943