1/*
2 * Copyright (c) 2001-2004 by David Brownell
3 * Copyright (c) 2003 Michal Sojka, for high-speed iso transfers
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13 * for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software Foundation,
17 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */
19
20/* this file is part of ehci-hcd.c */
21
22/*-------------------------------------------------------------------------*/
23
24/*
25 * EHCI scheduled transaction support:  interrupt, iso, split iso
26 * These are called "periodic" transactions in the EHCI spec.
27 *
28 * Note that for interrupt transfers, the QH/QTD manipulation is shared
29 * with the "asynchronous" transaction support (control/bulk transfers).
30 * The only real difference is in how interrupt transfers are scheduled.
31 *
32 * For ISO, we make an "iso_stream" head to serve the same role as a QH.
33 * It keeps track of every ITD (or SITD) that's linked, and holds enough
34 * pre-calculated schedule data to make appending to the queue be quick.
35 */
36
37static int ehci_get_frame (struct usb_hcd *hcd);
38
39/*
40 * periodic_next_shadow - return "next" pointer on shadow list
41 * @periodic: host pointer to qh/itd/sitd
42 * @tag: hardware tag for type of this record
43 */
44static union ehci_shadow *
45periodic_next_shadow(struct ehci_hcd *ehci, union ehci_shadow *periodic,
46		__hc32 tag)
47{
48	switch (hc32_to_cpu(ehci, tag)) {
49	case Q_TYPE_QH:
50		return &periodic->qh->qh_next;
51	case Q_TYPE_FSTN:
52		return &periodic->fstn->fstn_next;
53	case Q_TYPE_ITD:
54		return &periodic->itd->itd_next;
55	// case Q_TYPE_SITD:
56	default:
57		return &periodic->sitd->sitd_next;
58	}
59}
60
61static __hc32 *
62shadow_next_periodic(struct ehci_hcd *ehci, union ehci_shadow *periodic,
63		__hc32 tag)
64{
65	switch (hc32_to_cpu(ehci, tag)) {
66	/* our ehci_shadow.qh is actually software part */
67	case Q_TYPE_QH:
68		return &periodic->qh->hw->hw_next;
69	/* others are hw parts */
70	default:
71		return periodic->hw_next;
72	}
73}
74
75/* caller must hold ehci->lock */
76static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr)
77{
78	union ehci_shadow	*prev_p = &ehci->pshadow[frame];
79	__hc32			*hw_p = &ehci->periodic[frame];
80	union ehci_shadow	here = *prev_p;
81
82	/* find predecessor of "ptr"; hw and shadow lists are in sync */
83	while (here.ptr && here.ptr != ptr) {
84		prev_p = periodic_next_shadow(ehci, prev_p,
85				Q_NEXT_TYPE(ehci, *hw_p));
86		hw_p = shadow_next_periodic(ehci, &here,
87				Q_NEXT_TYPE(ehci, *hw_p));
88		here = *prev_p;
89	}
90	/* an interrupt entry (at list end) could have been shared */
91	if (!here.ptr)
92		return;
93
94	/* update shadow and hardware lists ... the old "next" pointers
95	 * from ptr may still be in use, the caller updates them.
96	 */
97	*prev_p = *periodic_next_shadow(ehci, &here,
98			Q_NEXT_TYPE(ehci, *hw_p));
99
100	if (!ehci->use_dummy_qh ||
101	    *shadow_next_periodic(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p))
102			!= EHCI_LIST_END(ehci))
103		*hw_p = *shadow_next_periodic(ehci, &here,
104				Q_NEXT_TYPE(ehci, *hw_p));
105	else
106		*hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
107}
108
109/*-------------------------------------------------------------------------*/
110
111/* Bandwidth and TT management */
112
113/* Find the TT data structure for this device; create it if necessary */
114static struct ehci_tt *find_tt(struct usb_device *udev)
115{
116	struct usb_tt		*utt = udev->tt;
117	struct ehci_tt		*tt, **tt_index, **ptt;
118	unsigned		port;
119	bool			allocated_index = false;
120
121	if (!utt)
122		return NULL;		/* Not below a TT */
123
124	/*
125	 * Find/create our data structure.
126	 * For hubs with a single TT, we get it directly.
127	 * For hubs with multiple TTs, there's an extra level of pointers.
128	 */
129	tt_index = NULL;
130	if (utt->multi) {
131		tt_index = utt->hcpriv;
132		if (!tt_index) {		/* Create the index array */
133			tt_index = kzalloc(utt->hub->maxchild *
134					sizeof(*tt_index), GFP_ATOMIC);
135			if (!tt_index)
136				return ERR_PTR(-ENOMEM);
137			utt->hcpriv = tt_index;
138			allocated_index = true;
139		}
140		port = udev->ttport - 1;
141		ptt = &tt_index[port];
142	} else {
143		port = 0;
144		ptt = (struct ehci_tt **) &utt->hcpriv;
145	}
146
147	tt = *ptt;
148	if (!tt) {				/* Create the ehci_tt */
149		struct ehci_hcd		*ehci =
150				hcd_to_ehci(bus_to_hcd(udev->bus));
151
152		tt = kzalloc(sizeof(*tt), GFP_ATOMIC);
153		if (!tt) {
154			if (allocated_index) {
155				utt->hcpriv = NULL;
156				kfree(tt_index);
157			}
158			return ERR_PTR(-ENOMEM);
159		}
160		list_add_tail(&tt->tt_list, &ehci->tt_list);
161		INIT_LIST_HEAD(&tt->ps_list);
162		tt->usb_tt = utt;
163		tt->tt_port = port;
164		*ptt = tt;
165	}
166
167	return tt;
168}
169
170/* Release the TT above udev, if it's not in use */
171static void drop_tt(struct usb_device *udev)
172{
173	struct usb_tt		*utt = udev->tt;
174	struct ehci_tt		*tt, **tt_index, **ptt;
175	int			cnt, i;
176
177	if (!utt || !utt->hcpriv)
178		return;		/* Not below a TT, or never allocated */
179
180	cnt = 0;
181	if (utt->multi) {
182		tt_index = utt->hcpriv;
183		ptt = &tt_index[udev->ttport - 1];
184
185		/* How many entries are left in tt_index? */
186		for (i = 0; i < utt->hub->maxchild; ++i)
187			cnt += !!tt_index[i];
188	} else {
189		tt_index = NULL;
190		ptt = (struct ehci_tt **) &utt->hcpriv;
191	}
192
193	tt = *ptt;
194	if (!tt || !list_empty(&tt->ps_list))
195		return;		/* never allocated, or still in use */
196
197	list_del(&tt->tt_list);
198	*ptt = NULL;
199	kfree(tt);
200	if (cnt == 1) {
201		utt->hcpriv = NULL;
202		kfree(tt_index);
203	}
204}
205
206static void bandwidth_dbg(struct ehci_hcd *ehci, int sign, char *type,
207		struct ehci_per_sched *ps)
208{
209	dev_dbg(&ps->udev->dev,
210			"ep %02x: %s %s @ %u+%u (%u.%u+%u) [%u/%u us] mask %04x\n",
211			ps->ep->desc.bEndpointAddress,
212			(sign >= 0 ? "reserve" : "release"), type,
213			(ps->bw_phase << 3) + ps->phase_uf, ps->bw_uperiod,
214			ps->phase, ps->phase_uf, ps->period,
215			ps->usecs, ps->c_usecs, ps->cs_mask);
216}
217
218static void reserve_release_intr_bandwidth(struct ehci_hcd *ehci,
219		struct ehci_qh *qh, int sign)
220{
221	unsigned		start_uf;
222	unsigned		i, j, m;
223	int			usecs = qh->ps.usecs;
224	int			c_usecs = qh->ps.c_usecs;
225	int			tt_usecs = qh->ps.tt_usecs;
226	struct ehci_tt		*tt;
227
228	if (qh->ps.phase == NO_FRAME)	/* Bandwidth wasn't reserved */
229		return;
230	start_uf = qh->ps.bw_phase << 3;
231
232	bandwidth_dbg(ehci, sign, "intr", &qh->ps);
233
234	if (sign < 0) {		/* Release bandwidth */
235		usecs = -usecs;
236		c_usecs = -c_usecs;
237		tt_usecs = -tt_usecs;
238	}
239
240	/* Entire transaction (high speed) or start-split (full/low speed) */
241	for (i = start_uf + qh->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE;
242			i += qh->ps.bw_uperiod)
243		ehci->bandwidth[i] += usecs;
244
245	/* Complete-split (full/low speed) */
246	if (qh->ps.c_usecs) {
247		/* NOTE: adjustments needed for FSTN */
248		for (i = start_uf; i < EHCI_BANDWIDTH_SIZE;
249				i += qh->ps.bw_uperiod) {
250			for ((j = 2, m = 1 << (j+8)); j < 8; (++j, m <<= 1)) {
251				if (qh->ps.cs_mask & m)
252					ehci->bandwidth[i+j] += c_usecs;
253			}
254		}
255	}
256
257	/* FS/LS bus bandwidth */
258	if (tt_usecs) {
259		tt = find_tt(qh->ps.udev);
260		if (sign > 0)
261			list_add_tail(&qh->ps.ps_list, &tt->ps_list);
262		else
263			list_del(&qh->ps.ps_list);
264
265		for (i = start_uf >> 3; i < EHCI_BANDWIDTH_FRAMES;
266				i += qh->ps.bw_period)
267			tt->bandwidth[i] += tt_usecs;
268	}
269}
270
271/*-------------------------------------------------------------------------*/
272
273static void compute_tt_budget(u8 budget_table[EHCI_BANDWIDTH_SIZE],
274		struct ehci_tt *tt)
275{
276	struct ehci_per_sched	*ps;
277	unsigned		uframe, uf, x;
278	u8			*budget_line;
279
280	if (!tt)
281		return;
282	memset(budget_table, 0, EHCI_BANDWIDTH_SIZE);
283
284	/* Add up the contributions from all the endpoints using this TT */
285	list_for_each_entry(ps, &tt->ps_list, ps_list) {
286		for (uframe = ps->bw_phase << 3; uframe < EHCI_BANDWIDTH_SIZE;
287				uframe += ps->bw_uperiod) {
288			budget_line = &budget_table[uframe];
289			x = ps->tt_usecs;
290
291			/* propagate the time forward */
292			for (uf = ps->phase_uf; uf < 8; ++uf) {
293				x += budget_line[uf];
294
295				/* Each microframe lasts 125 us */
296				if (x <= 125) {
297					budget_line[uf] = x;
298					break;
299				} else {
300					budget_line[uf] = 125;
301					x -= 125;
302				}
303			}
304		}
305	}
306}
307
308static int __maybe_unused same_tt(struct usb_device *dev1,
309		struct usb_device *dev2)
310{
311	if (!dev1->tt || !dev2->tt)
312		return 0;
313	if (dev1->tt != dev2->tt)
314		return 0;
315	if (dev1->tt->multi)
316		return dev1->ttport == dev2->ttport;
317	else
318		return 1;
319}
320
321#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
322
323/* Which uframe does the low/fullspeed transfer start in?
324 *
325 * The parameter is the mask of ssplits in "H-frame" terms
326 * and this returns the transfer start uframe in "B-frame" terms,
327 * which allows both to match, e.g. a ssplit in "H-frame" uframe 0
328 * will cause a transfer in "B-frame" uframe 0.  "B-frames" lag
329 * "H-frames" by 1 uframe.  See the EHCI spec sec 4.5 and figure 4.7.
330 */
331static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask)
332{
333	unsigned char smask = QH_SMASK & hc32_to_cpu(ehci, mask);
334	if (!smask) {
335		ehci_err(ehci, "invalid empty smask!\n");
336		/* uframe 7 can't have bw so this will indicate failure */
337		return 7;
338	}
339	return ffs(smask) - 1;
340}
341
342static const unsigned char
343max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 };
344
345/* carryover low/fullspeed bandwidth that crosses uframe boundries */
346static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
347{
348	int i;
349	for (i=0; i<7; i++) {
350		if (max_tt_usecs[i] < tt_usecs[i]) {
351			tt_usecs[i+1] += tt_usecs[i] - max_tt_usecs[i];
352			tt_usecs[i] = max_tt_usecs[i];
353		}
354	}
355}
356
357/*
358 * Return true if the device's tt's downstream bus is available for a
359 * periodic transfer of the specified length (usecs), starting at the
360 * specified frame/uframe.  Note that (as summarized in section 11.19
361 * of the usb 2.0 spec) TTs can buffer multiple transactions for each
362 * uframe.
363 *
364 * The uframe parameter is when the fullspeed/lowspeed transfer
365 * should be executed in "B-frame" terms, which is the same as the
366 * highspeed ssplit's uframe (which is in "H-frame" terms).  For example
367 * a ssplit in "H-frame" 0 causes a transfer in "B-frame" 0.
368 * See the EHCI spec sec 4.5 and fig 4.7.
369 *
370 * This checks if the full/lowspeed bus, at the specified starting uframe,
371 * has the specified bandwidth available, according to rules listed
372 * in USB 2.0 spec section 11.18.1 fig 11-60.
373 *
374 * This does not check if the transfer would exceed the max ssplit
375 * limit of 16, specified in USB 2.0 spec section 11.18.4 requirement #4,
376 * since proper scheduling limits ssplits to less than 16 per uframe.
377 */
378static int tt_available (
379	struct ehci_hcd		*ehci,
380	struct ehci_per_sched	*ps,
381	struct ehci_tt		*tt,
382	unsigned		frame,
383	unsigned		uframe
384)
385{
386	unsigned		period = ps->bw_period;
387	unsigned		usecs = ps->tt_usecs;
388
389	if ((period == 0) || (uframe >= 7))	/* error */
390		return 0;
391
392	for (frame &= period - 1; frame < EHCI_BANDWIDTH_FRAMES;
393			frame += period) {
394		unsigned	i, uf;
395		unsigned short	tt_usecs[8];
396
397		if (tt->bandwidth[frame] + usecs > 900)
398			return 0;
399
400		uf = frame << 3;
401		for (i = 0; i < 8; (++i, ++uf))
402			tt_usecs[i] = ehci->tt_budget[uf];
403
404		if (max_tt_usecs[uframe] <= tt_usecs[uframe])
405			return 0;
406
407		/* special case for isoc transfers larger than 125us:
408		 * the first and each subsequent fully used uframe
409		 * must be empty, so as to not illegally delay
410		 * already scheduled transactions
411		 */
412		if (125 < usecs) {
413			int ufs = (usecs / 125);
414
415			for (i = uframe; i < (uframe + ufs) && i < 8; i++)
416				if (0 < tt_usecs[i])
417					return 0;
418		}
419
420		tt_usecs[uframe] += usecs;
421
422		carryover_tt_bandwidth(tt_usecs);
423
424		/* fail if the carryover pushed bw past the last uframe's limit */
425		if (max_tt_usecs[7] < tt_usecs[7])
426			return 0;
427	}
428
429	return 1;
430}
431
432#else
433
434/* return true iff the device's transaction translator is available
435 * for a periodic transfer starting at the specified frame, using
436 * all the uframes in the mask.
437 */
438static int tt_no_collision (
439	struct ehci_hcd		*ehci,
440	unsigned		period,
441	struct usb_device	*dev,
442	unsigned		frame,
443	u32			uf_mask
444)
445{
446	if (period == 0)	/* error */
447		return 0;
448
449	/* note bandwidth wastage:  split never follows csplit
450	 * (different dev or endpoint) until the next uframe.
451	 * calling convention doesn't make that distinction.
452	 */
453	for (; frame < ehci->periodic_size; frame += period) {
454		union ehci_shadow	here;
455		__hc32			type;
456		struct ehci_qh_hw	*hw;
457
458		here = ehci->pshadow [frame];
459		type = Q_NEXT_TYPE(ehci, ehci->periodic [frame]);
460		while (here.ptr) {
461			switch (hc32_to_cpu(ehci, type)) {
462			case Q_TYPE_ITD:
463				type = Q_NEXT_TYPE(ehci, here.itd->hw_next);
464				here = here.itd->itd_next;
465				continue;
466			case Q_TYPE_QH:
467				hw = here.qh->hw;
468				if (same_tt(dev, here.qh->ps.udev)) {
469					u32		mask;
470
471					mask = hc32_to_cpu(ehci,
472							hw->hw_info2);
473					/* "knows" no gap is needed */
474					mask |= mask >> 8;
475					if (mask & uf_mask)
476						break;
477				}
478				type = Q_NEXT_TYPE(ehci, hw->hw_next);
479				here = here.qh->qh_next;
480				continue;
481			case Q_TYPE_SITD:
482				if (same_tt (dev, here.sitd->urb->dev)) {
483					u16		mask;
484
485					mask = hc32_to_cpu(ehci, here.sitd
486								->hw_uframe);
487					/* FIXME assumes no gap for IN! */
488					mask |= mask >> 8;
489					if (mask & uf_mask)
490						break;
491				}
492				type = Q_NEXT_TYPE(ehci, here.sitd->hw_next);
493				here = here.sitd->sitd_next;
494				continue;
495			// case Q_TYPE_FSTN:
496			default:
497				ehci_dbg (ehci,
498					"periodic frame %d bogus type %d\n",
499					frame, type);
500			}
501
502			/* collision or error */
503			return 0;
504		}
505	}
506
507	/* no collision */
508	return 1;
509}
510
511#endif /* CONFIG_USB_EHCI_TT_NEWSCHED */
512
513/*-------------------------------------------------------------------------*/
514
515static void enable_periodic(struct ehci_hcd *ehci)
516{
517	if (ehci->periodic_count++)
518		return;
519
520	/* Stop waiting to turn off the periodic schedule */
521	ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_PERIODIC);
522
523	/* Don't start the schedule until PSS is 0 */
524	ehci_poll_PSS(ehci);
525	turn_on_io_watchdog(ehci);
526}
527
528static void disable_periodic(struct ehci_hcd *ehci)
529{
530	if (--ehci->periodic_count)
531		return;
532
533	/* Don't turn off the schedule until PSS is 1 */
534	ehci_poll_PSS(ehci);
535}
536
537/*-------------------------------------------------------------------------*/
538
539/* periodic schedule slots have iso tds (normal or split) first, then a
540 * sparse tree for active interrupt transfers.
541 *
542 * this just links in a qh; caller guarantees uframe masks are set right.
543 * no FSTN support (yet; ehci 0.96+)
544 */
545static void qh_link_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
546{
547	unsigned	i;
548	unsigned	period = qh->ps.period;
549
550	dev_dbg(&qh->ps.udev->dev,
551		"link qh%d-%04x/%p start %d [%d/%d us]\n",
552		period, hc32_to_cpup(ehci, &qh->hw->hw_info2)
553			& (QH_CMASK | QH_SMASK),
554		qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs);
555
556	/* high bandwidth, or otherwise every microframe */
557	if (period == 0)
558		period = 1;
559
560	for (i = qh->ps.phase; i < ehci->periodic_size; i += period) {
561		union ehci_shadow	*prev = &ehci->pshadow[i];
562		__hc32			*hw_p = &ehci->periodic[i];
563		union ehci_shadow	here = *prev;
564		__hc32			type = 0;
565
566		/* skip the iso nodes at list head */
567		while (here.ptr) {
568			type = Q_NEXT_TYPE(ehci, *hw_p);
569			if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
570				break;
571			prev = periodic_next_shadow(ehci, prev, type);
572			hw_p = shadow_next_periodic(ehci, &here, type);
573			here = *prev;
574		}
575
576		/* sorting each branch by period (slow-->fast)
577		 * enables sharing interior tree nodes
578		 */
579		while (here.ptr && qh != here.qh) {
580			if (qh->ps.period > here.qh->ps.period)
581				break;
582			prev = &here.qh->qh_next;
583			hw_p = &here.qh->hw->hw_next;
584			here = *prev;
585		}
586		/* link in this qh, unless some earlier pass did that */
587		if (qh != here.qh) {
588			qh->qh_next = here;
589			if (here.qh)
590				qh->hw->hw_next = *hw_p;
591			wmb ();
592			prev->qh = qh;
593			*hw_p = QH_NEXT (ehci, qh->qh_dma);
594		}
595	}
596	qh->qh_state = QH_STATE_LINKED;
597	qh->xacterrs = 0;
598	qh->exception = 0;
599
600	/* update per-qh bandwidth for debugfs */
601	ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->ps.bw_period
602		? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period)
603		: (qh->ps.usecs * 8);
604
605	list_add(&qh->intr_node, &ehci->intr_qh_list);
606
607	/* maybe enable periodic schedule processing */
608	++ehci->intr_count;
609	enable_periodic(ehci);
610}
611
612static void qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
613{
614	unsigned	i;
615	unsigned	period;
616
617	/*
618	 * If qh is for a low/full-speed device, simply unlinking it
619	 * could interfere with an ongoing split transaction.  To unlink
620	 * it safely would require setting the QH_INACTIVATE bit and
621	 * waiting at least one frame, as described in EHCI 4.12.2.5.
622	 *
623	 * We won't bother with any of this.  Instead, we assume that the
624	 * only reason for unlinking an interrupt QH while the current URB
625	 * is still active is to dequeue all the URBs (flush the whole
626	 * endpoint queue).
627	 *
628	 * If rebalancing the periodic schedule is ever implemented, this
629	 * approach will no longer be valid.
630	 */
631
632	/* high bandwidth, or otherwise part of every microframe */
633	period = qh->ps.period ? : 1;
634
635	for (i = qh->ps.phase; i < ehci->periodic_size; i += period)
636		periodic_unlink (ehci, i, qh);
637
638	/* update per-qh bandwidth for debugfs */
639	ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->ps.bw_period
640		? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period)
641		: (qh->ps.usecs * 8);
642
643	dev_dbg(&qh->ps.udev->dev,
644		"unlink qh%d-%04x/%p start %d [%d/%d us]\n",
645		qh->ps.period,
646		hc32_to_cpup(ehci, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK),
647		qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs);
648
649	/* qh->qh_next still "live" to HC */
650	qh->qh_state = QH_STATE_UNLINK;
651	qh->qh_next.ptr = NULL;
652
653	if (ehci->qh_scan_next == qh)
654		ehci->qh_scan_next = list_entry(qh->intr_node.next,
655				struct ehci_qh, intr_node);
656	list_del(&qh->intr_node);
657}
658
659static void cancel_unlink_wait_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
660{
661	if (qh->qh_state != QH_STATE_LINKED ||
662			list_empty(&qh->unlink_node))
663		return;
664
665	list_del_init(&qh->unlink_node);
666
667	/*
668	 * TODO: disable the event of EHCI_HRTIMER_START_UNLINK_INTR for
669	 * avoiding unnecessary CPU wakeup
670	 */
671}
672
673static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
674{
675	/* If the QH isn't linked then there's nothing we can do. */
676	if (qh->qh_state != QH_STATE_LINKED)
677		return;
678
679	/* if the qh is waiting for unlink, cancel it now */
680	cancel_unlink_wait_intr(ehci, qh);
681
682	qh_unlink_periodic (ehci, qh);
683
684	/* Make sure the unlinks are visible before starting the timer */
685	wmb();
686
687	/*
688	 * The EHCI spec doesn't say how long it takes the controller to
689	 * stop accessing an unlinked interrupt QH.  The timer delay is
690	 * 9 uframes; presumably that will be long enough.
691	 */
692	qh->unlink_cycle = ehci->intr_unlink_cycle;
693
694	/* New entries go at the end of the intr_unlink list */
695	list_add_tail(&qh->unlink_node, &ehci->intr_unlink);
696
697	if (ehci->intr_unlinking)
698		;	/* Avoid recursive calls */
699	else if (ehci->rh_state < EHCI_RH_RUNNING)
700		ehci_handle_intr_unlinks(ehci);
701	else if (ehci->intr_unlink.next == &qh->unlink_node) {
702		ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true);
703		++ehci->intr_unlink_cycle;
704	}
705}
706
707/*
708 * It is common only one intr URB is scheduled on one qh, and
709 * given complete() is run in tasklet context, introduce a bit
710 * delay to avoid unlink qh too early.
711 */
712static void start_unlink_intr_wait(struct ehci_hcd *ehci,
713				   struct ehci_qh *qh)
714{
715	qh->unlink_cycle = ehci->intr_unlink_wait_cycle;
716
717	/* New entries go at the end of the intr_unlink_wait list */
718	list_add_tail(&qh->unlink_node, &ehci->intr_unlink_wait);
719
720	if (ehci->rh_state < EHCI_RH_RUNNING)
721		ehci_handle_start_intr_unlinks(ehci);
722	else if (ehci->intr_unlink_wait.next == &qh->unlink_node) {
723		ehci_enable_event(ehci, EHCI_HRTIMER_START_UNLINK_INTR, true);
724		++ehci->intr_unlink_wait_cycle;
725	}
726}
727
728static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
729{
730	struct ehci_qh_hw	*hw = qh->hw;
731	int			rc;
732
733	qh->qh_state = QH_STATE_IDLE;
734	hw->hw_next = EHCI_LIST_END(ehci);
735
736	if (!list_empty(&qh->qtd_list))
737		qh_completions(ehci, qh);
738
739	/* reschedule QH iff another request is queued */
740	if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) {
741		rc = qh_schedule(ehci, qh);
742		if (rc == 0) {
743			qh_refresh(ehci, qh);
744			qh_link_periodic(ehci, qh);
745		}
746
747		/* An error here likely indicates handshake failure
748		 * or no space left in the schedule.  Neither fault
749		 * should happen often ...
750		 *
751		 * FIXME kill the now-dysfunctional queued urbs
752		 */
753		else {
754			ehci_err(ehci, "can't reschedule qh %p, err %d\n",
755					qh, rc);
756		}
757	}
758
759	/* maybe turn off periodic schedule */
760	--ehci->intr_count;
761	disable_periodic(ehci);
762}
763
764/*-------------------------------------------------------------------------*/
765
766static int check_period (
767	struct ehci_hcd *ehci,
768	unsigned	frame,
769	unsigned	uframe,
770	unsigned	uperiod,
771	unsigned	usecs
772) {
773	/* complete split running into next frame?
774	 * given FSTN support, we could sometimes check...
775	 */
776	if (uframe >= 8)
777		return 0;
778
779	/* convert "usecs we need" to "max already claimed" */
780	usecs = ehci->uframe_periodic_max - usecs;
781
782	for (uframe += frame << 3; uframe < EHCI_BANDWIDTH_SIZE;
783			uframe += uperiod) {
784		if (ehci->bandwidth[uframe] > usecs)
785			return 0;
786	}
787
788	// success!
789	return 1;
790}
791
792static int check_intr_schedule (
793	struct ehci_hcd		*ehci,
794	unsigned		frame,
795	unsigned		uframe,
796	struct ehci_qh		*qh,
797	unsigned		*c_maskp,
798	struct ehci_tt		*tt
799)
800{
801	int		retval = -ENOSPC;
802	u8		mask = 0;
803
804	if (qh->ps.c_usecs && uframe >= 6)	/* FSTN territory? */
805		goto done;
806
807	if (!check_period(ehci, frame, uframe, qh->ps.bw_uperiod, qh->ps.usecs))
808		goto done;
809	if (!qh->ps.c_usecs) {
810		retval = 0;
811		*c_maskp = 0;
812		goto done;
813	}
814
815#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
816	if (tt_available(ehci, &qh->ps, tt, frame, uframe)) {
817		unsigned i;
818
819		/* TODO : this may need FSTN for SSPLIT in uframe 5. */
820		for (i = uframe+2; i < 8 && i <= uframe+4; i++)
821			if (!check_period(ehci, frame, i,
822					qh->ps.bw_uperiod, qh->ps.c_usecs))
823				goto done;
824			else
825				mask |= 1 << i;
826
827		retval = 0;
828
829		*c_maskp = mask;
830	}
831#else
832	/* Make sure this tt's buffer is also available for CSPLITs.
833	 * We pessimize a bit; probably the typical full speed case
834	 * doesn't need the second CSPLIT.
835	 *
836	 * NOTE:  both SPLIT and CSPLIT could be checked in just
837	 * one smart pass...
838	 */
839	mask = 0x03 << (uframe + qh->gap_uf);
840	*c_maskp = mask;
841
842	mask |= 1 << uframe;
843	if (tt_no_collision(ehci, qh->ps.bw_period, qh->ps.udev, frame, mask)) {
844		if (!check_period(ehci, frame, uframe + qh->gap_uf + 1,
845				qh->ps.bw_uperiod, qh->ps.c_usecs))
846			goto done;
847		if (!check_period(ehci, frame, uframe + qh->gap_uf,
848				qh->ps.bw_uperiod, qh->ps.c_usecs))
849			goto done;
850		retval = 0;
851	}
852#endif
853done:
854	return retval;
855}
856
857/* "first fit" scheduling policy used the first time through,
858 * or when the previous schedule slot can't be re-used.
859 */
860static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
861{
862	int		status = 0;
863	unsigned	uframe;
864	unsigned	c_mask;
865	struct ehci_qh_hw	*hw = qh->hw;
866	struct ehci_tt		*tt;
867
868	hw->hw_next = EHCI_LIST_END(ehci);
869
870	/* reuse the previous schedule slots, if we can */
871	if (qh->ps.phase != NO_FRAME) {
872		ehci_dbg(ehci, "reused qh %p schedule\n", qh);
873		return 0;
874	}
875
876	uframe = 0;
877	c_mask = 0;
878	tt = find_tt(qh->ps.udev);
879	if (IS_ERR(tt)) {
880		status = PTR_ERR(tt);
881		goto done;
882	}
883	compute_tt_budget(ehci->tt_budget, tt);
884
885	/* else scan the schedule to find a group of slots such that all
886	 * uframes have enough periodic bandwidth available.
887	 */
888	/* "normal" case, uframing flexible except with splits */
889	if (qh->ps.bw_period) {
890		int		i;
891		unsigned	frame;
892
893		for (i = qh->ps.bw_period; i > 0; --i) {
894			frame = ++ehci->random_frame & (qh->ps.bw_period - 1);
895			for (uframe = 0; uframe < 8; uframe++) {
896				status = check_intr_schedule(ehci,
897						frame, uframe, qh, &c_mask, tt);
898				if (status == 0)
899					goto got_it;
900			}
901		}
902
903	/* qh->ps.bw_period == 0 means every uframe */
904	} else {
905		status = check_intr_schedule(ehci, 0, 0, qh, &c_mask, tt);
906	}
907	if (status)
908		goto done;
909
910 got_it:
911	qh->ps.phase = (qh->ps.period ? ehci->random_frame &
912			(qh->ps.period - 1) : 0);
913	qh->ps.bw_phase = qh->ps.phase & (qh->ps.bw_period - 1);
914	qh->ps.phase_uf = uframe;
915	qh->ps.cs_mask = qh->ps.period ?
916			(c_mask << 8) | (1 << uframe) :
917			QH_SMASK;
918
919	/* reset S-frame and (maybe) C-frame masks */
920	hw->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK));
921	hw->hw_info2 |= cpu_to_hc32(ehci, qh->ps.cs_mask);
922	reserve_release_intr_bandwidth(ehci, qh, 1);
923
924done:
925	return status;
926}
927
928static int intr_submit (
929	struct ehci_hcd		*ehci,
930	struct urb		*urb,
931	struct list_head	*qtd_list,
932	gfp_t			mem_flags
933) {
934	unsigned		epnum;
935	unsigned long		flags;
936	struct ehci_qh		*qh;
937	int			status;
938	struct list_head	empty;
939
940	/* get endpoint and transfer/schedule data */
941	epnum = urb->ep->desc.bEndpointAddress;
942
943	spin_lock_irqsave (&ehci->lock, flags);
944
945	if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
946		status = -ESHUTDOWN;
947		goto done_not_linked;
948	}
949	status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
950	if (unlikely(status))
951		goto done_not_linked;
952
953	/* get qh and force any scheduling errors */
954	INIT_LIST_HEAD (&empty);
955	qh = qh_append_tds(ehci, urb, &empty, epnum, &urb->ep->hcpriv);
956	if (qh == NULL) {
957		status = -ENOMEM;
958		goto done;
959	}
960	if (qh->qh_state == QH_STATE_IDLE) {
961		if ((status = qh_schedule (ehci, qh)) != 0)
962			goto done;
963	}
964
965	/* then queue the urb's tds to the qh */
966	qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
967	BUG_ON (qh == NULL);
968
969	/* stuff into the periodic schedule */
970	if (qh->qh_state == QH_STATE_IDLE) {
971		qh_refresh(ehci, qh);
972		qh_link_periodic(ehci, qh);
973	} else {
974		/* cancel unlink wait for the qh */
975		cancel_unlink_wait_intr(ehci, qh);
976	}
977
978	/* ... update usbfs periodic stats */
979	ehci_to_hcd(ehci)->self.bandwidth_int_reqs++;
980
981done:
982	if (unlikely(status))
983		usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
984done_not_linked:
985	spin_unlock_irqrestore (&ehci->lock, flags);
986	if (status)
987		qtd_list_free (ehci, urb, qtd_list);
988
989	return status;
990}
991
992static void scan_intr(struct ehci_hcd *ehci)
993{
994	struct ehci_qh		*qh;
995
996	list_for_each_entry_safe(qh, ehci->qh_scan_next, &ehci->intr_qh_list,
997			intr_node) {
998
999		/* clean any finished work for this qh */
1000		if (!list_empty(&qh->qtd_list)) {
1001			int temp;
1002
1003			/*
1004			 * Unlinks could happen here; completion reporting
1005			 * drops the lock.  That's why ehci->qh_scan_next
1006			 * always holds the next qh to scan; if the next qh
1007			 * gets unlinked then ehci->qh_scan_next is adjusted
1008			 * in qh_unlink_periodic().
1009			 */
1010			temp = qh_completions(ehci, qh);
1011			if (unlikely(temp))
1012				start_unlink_intr(ehci, qh);
1013			else if (unlikely(list_empty(&qh->qtd_list) &&
1014					qh->qh_state == QH_STATE_LINKED))
1015				start_unlink_intr_wait(ehci, qh);
1016		}
1017	}
1018}
1019
1020/*-------------------------------------------------------------------------*/
1021
1022/* ehci_iso_stream ops work with both ITD and SITD */
1023
1024static struct ehci_iso_stream *
1025iso_stream_alloc (gfp_t mem_flags)
1026{
1027	struct ehci_iso_stream *stream;
1028
1029	stream = kzalloc(sizeof *stream, mem_flags);
1030	if (likely (stream != NULL)) {
1031		INIT_LIST_HEAD(&stream->td_list);
1032		INIT_LIST_HEAD(&stream->free_list);
1033		stream->next_uframe = NO_FRAME;
1034		stream->ps.phase = NO_FRAME;
1035	}
1036	return stream;
1037}
1038
1039static void
1040iso_stream_init (
1041	struct ehci_hcd		*ehci,
1042	struct ehci_iso_stream	*stream,
1043	struct urb		*urb
1044)
1045{
1046	static const u8 smask_out [] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f };
1047
1048	struct usb_device	*dev = urb->dev;
1049	u32			buf1;
1050	unsigned		epnum, maxp;
1051	int			is_input;
1052	unsigned		tmp;
1053
1054	/*
1055	 * this might be a "high bandwidth" highspeed endpoint,
1056	 * as encoded in the ep descriptor's wMaxPacket field
1057	 */
1058	epnum = usb_pipeendpoint(urb->pipe);
1059	is_input = usb_pipein(urb->pipe) ? USB_DIR_IN : 0;
1060	maxp = usb_endpoint_maxp(&urb->ep->desc);
1061	if (is_input) {
1062		buf1 = (1 << 11);
1063	} else {
1064		buf1 = 0;
1065	}
1066
1067	/* knows about ITD vs SITD */
1068	if (dev->speed == USB_SPEED_HIGH) {
1069		unsigned multi = hb_mult(maxp);
1070
1071		stream->highspeed = 1;
1072
1073		maxp = max_packet(maxp);
1074		buf1 |= maxp;
1075		maxp *= multi;
1076
1077		stream->buf0 = cpu_to_hc32(ehci, (epnum << 8) | dev->devnum);
1078		stream->buf1 = cpu_to_hc32(ehci, buf1);
1079		stream->buf2 = cpu_to_hc32(ehci, multi);
1080
1081		/* usbfs wants to report the average usecs per frame tied up
1082		 * when transfers on this endpoint are scheduled ...
1083		 */
1084		stream->ps.usecs = HS_USECS_ISO(maxp);
1085
1086		/* period for bandwidth allocation */
1087		tmp = min_t(unsigned, EHCI_BANDWIDTH_SIZE,
1088				1 << (urb->ep->desc.bInterval - 1));
1089
1090		/* Allow urb->interval to override */
1091		stream->ps.bw_uperiod = min_t(unsigned, tmp, urb->interval);
1092
1093		stream->uperiod = urb->interval;
1094		stream->ps.period = urb->interval >> 3;
1095		stream->bandwidth = stream->ps.usecs * 8 /
1096				stream->ps.bw_uperiod;
1097
1098	} else {
1099		u32		addr;
1100		int		think_time;
1101		int		hs_transfers;
1102
1103		addr = dev->ttport << 24;
1104		if (!ehci_is_TDI(ehci)
1105				|| (dev->tt->hub !=
1106					ehci_to_hcd(ehci)->self.root_hub))
1107			addr |= dev->tt->hub->devnum << 16;
1108		addr |= epnum << 8;
1109		addr |= dev->devnum;
1110		stream->ps.usecs = HS_USECS_ISO(maxp);
1111		think_time = dev->tt ? dev->tt->think_time : 0;
1112		stream->ps.tt_usecs = NS_TO_US(think_time + usb_calc_bus_time(
1113				dev->speed, is_input, 1, maxp));
1114		hs_transfers = max (1u, (maxp + 187) / 188);
1115		if (is_input) {
1116			u32	tmp;
1117
1118			addr |= 1 << 31;
1119			stream->ps.c_usecs = stream->ps.usecs;
1120			stream->ps.usecs = HS_USECS_ISO(1);
1121			stream->ps.cs_mask = 1;
1122
1123			/* c-mask as specified in USB 2.0 11.18.4 3.c */
1124			tmp = (1 << (hs_transfers + 2)) - 1;
1125			stream->ps.cs_mask |= tmp << (8 + 2);
1126		} else
1127			stream->ps.cs_mask = smask_out[hs_transfers - 1];
1128
1129		/* period for bandwidth allocation */
1130		tmp = min_t(unsigned, EHCI_BANDWIDTH_FRAMES,
1131				1 << (urb->ep->desc.bInterval - 1));
1132
1133		/* Allow urb->interval to override */
1134		stream->ps.bw_period = min_t(unsigned, tmp, urb->interval);
1135		stream->ps.bw_uperiod = stream->ps.bw_period << 3;
1136
1137		stream->ps.period = urb->interval;
1138		stream->uperiod = urb->interval << 3;
1139		stream->bandwidth = (stream->ps.usecs + stream->ps.c_usecs) /
1140				stream->ps.bw_period;
1141
1142		/* stream->splits gets created from cs_mask later */
1143		stream->address = cpu_to_hc32(ehci, addr);
1144	}
1145
1146	stream->ps.udev = dev;
1147	stream->ps.ep = urb->ep;
1148
1149	stream->bEndpointAddress = is_input | epnum;
1150	stream->maxp = maxp;
1151}
1152
1153static struct ehci_iso_stream *
1154iso_stream_find (struct ehci_hcd *ehci, struct urb *urb)
1155{
1156	unsigned		epnum;
1157	struct ehci_iso_stream	*stream;
1158	struct usb_host_endpoint *ep;
1159	unsigned long		flags;
1160
1161	epnum = usb_pipeendpoint (urb->pipe);
1162	if (usb_pipein(urb->pipe))
1163		ep = urb->dev->ep_in[epnum];
1164	else
1165		ep = urb->dev->ep_out[epnum];
1166
1167	spin_lock_irqsave (&ehci->lock, flags);
1168	stream = ep->hcpriv;
1169
1170	if (unlikely (stream == NULL)) {
1171		stream = iso_stream_alloc(GFP_ATOMIC);
1172		if (likely (stream != NULL)) {
1173			ep->hcpriv = stream;
1174			iso_stream_init(ehci, stream, urb);
1175		}
1176
1177	/* if dev->ep [epnum] is a QH, hw is set */
1178	} else if (unlikely (stream->hw != NULL)) {
1179		ehci_dbg (ehci, "dev %s ep%d%s, not iso??\n",
1180			urb->dev->devpath, epnum,
1181			usb_pipein(urb->pipe) ? "in" : "out");
1182		stream = NULL;
1183	}
1184
1185	spin_unlock_irqrestore (&ehci->lock, flags);
1186	return stream;
1187}
1188
1189/*-------------------------------------------------------------------------*/
1190
1191/* ehci_iso_sched ops can be ITD-only or SITD-only */
1192
1193static struct ehci_iso_sched *
1194iso_sched_alloc (unsigned packets, gfp_t mem_flags)
1195{
1196	struct ehci_iso_sched	*iso_sched;
1197	int			size = sizeof *iso_sched;
1198
1199	size += packets * sizeof (struct ehci_iso_packet);
1200	iso_sched = kzalloc(size, mem_flags);
1201	if (likely (iso_sched != NULL)) {
1202		INIT_LIST_HEAD (&iso_sched->td_list);
1203	}
1204	return iso_sched;
1205}
1206
1207static inline void
1208itd_sched_init(
1209	struct ehci_hcd		*ehci,
1210	struct ehci_iso_sched	*iso_sched,
1211	struct ehci_iso_stream	*stream,
1212	struct urb		*urb
1213)
1214{
1215	unsigned	i;
1216	dma_addr_t	dma = urb->transfer_dma;
1217
1218	/* how many uframes are needed for these transfers */
1219	iso_sched->span = urb->number_of_packets * stream->uperiod;
1220
1221	/* figure out per-uframe itd fields that we'll need later
1222	 * when we fit new itds into the schedule.
1223	 */
1224	for (i = 0; i < urb->number_of_packets; i++) {
1225		struct ehci_iso_packet	*uframe = &iso_sched->packet [i];
1226		unsigned		length;
1227		dma_addr_t		buf;
1228		u32			trans;
1229
1230		length = urb->iso_frame_desc [i].length;
1231		buf = dma + urb->iso_frame_desc [i].offset;
1232
1233		trans = EHCI_ISOC_ACTIVE;
1234		trans |= buf & 0x0fff;
1235		if (unlikely (((i + 1) == urb->number_of_packets))
1236				&& !(urb->transfer_flags & URB_NO_INTERRUPT))
1237			trans |= EHCI_ITD_IOC;
1238		trans |= length << 16;
1239		uframe->transaction = cpu_to_hc32(ehci, trans);
1240
1241		/* might need to cross a buffer page within a uframe */
1242		uframe->bufp = (buf & ~(u64)0x0fff);
1243		buf += length;
1244		if (unlikely ((uframe->bufp != (buf & ~(u64)0x0fff))))
1245			uframe->cross = 1;
1246	}
1247}
1248
1249static void
1250iso_sched_free (
1251	struct ehci_iso_stream	*stream,
1252	struct ehci_iso_sched	*iso_sched
1253)
1254{
1255	if (!iso_sched)
1256		return;
1257	// caller must hold ehci->lock!
1258	list_splice (&iso_sched->td_list, &stream->free_list);
1259	kfree (iso_sched);
1260}
1261
1262static int
1263itd_urb_transaction (
1264	struct ehci_iso_stream	*stream,
1265	struct ehci_hcd		*ehci,
1266	struct urb		*urb,
1267	gfp_t			mem_flags
1268)
1269{
1270	struct ehci_itd		*itd;
1271	dma_addr_t		itd_dma;
1272	int			i;
1273	unsigned		num_itds;
1274	struct ehci_iso_sched	*sched;
1275	unsigned long		flags;
1276
1277	sched = iso_sched_alloc (urb->number_of_packets, mem_flags);
1278	if (unlikely (sched == NULL))
1279		return -ENOMEM;
1280
1281	itd_sched_init(ehci, sched, stream, urb);
1282
1283	if (urb->interval < 8)
1284		num_itds = 1 + (sched->span + 7) / 8;
1285	else
1286		num_itds = urb->number_of_packets;
1287
1288	/* allocate/init ITDs */
1289	spin_lock_irqsave (&ehci->lock, flags);
1290	for (i = 0; i < num_itds; i++) {
1291
1292		/*
1293		 * Use iTDs from the free list, but not iTDs that may
1294		 * still be in use by the hardware.
1295		 */
1296		if (likely(!list_empty(&stream->free_list))) {
1297			itd = list_first_entry(&stream->free_list,
1298					struct ehci_itd, itd_list);
1299			if (itd->frame == ehci->now_frame)
1300				goto alloc_itd;
1301			list_del (&itd->itd_list);
1302			itd_dma = itd->itd_dma;
1303		} else {
1304 alloc_itd:
1305			spin_unlock_irqrestore (&ehci->lock, flags);
1306			itd = dma_pool_alloc (ehci->itd_pool, mem_flags,
1307					&itd_dma);
1308			spin_lock_irqsave (&ehci->lock, flags);
1309			if (!itd) {
1310				iso_sched_free(stream, sched);
1311				spin_unlock_irqrestore(&ehci->lock, flags);
1312				return -ENOMEM;
1313			}
1314		}
1315
1316		memset (itd, 0, sizeof *itd);
1317		itd->itd_dma = itd_dma;
1318		itd->frame = NO_FRAME;
1319		list_add (&itd->itd_list, &sched->td_list);
1320	}
1321	spin_unlock_irqrestore (&ehci->lock, flags);
1322
1323	/* temporarily store schedule info in hcpriv */
1324	urb->hcpriv = sched;
1325	urb->error_count = 0;
1326	return 0;
1327}
1328
1329/*-------------------------------------------------------------------------*/
1330
1331static void reserve_release_iso_bandwidth(struct ehci_hcd *ehci,
1332		struct ehci_iso_stream *stream, int sign)
1333{
1334	unsigned		uframe;
1335	unsigned		i, j;
1336	unsigned		s_mask, c_mask, m;
1337	int			usecs = stream->ps.usecs;
1338	int			c_usecs = stream->ps.c_usecs;
1339	int			tt_usecs = stream->ps.tt_usecs;
1340	struct ehci_tt		*tt;
1341
1342	if (stream->ps.phase == NO_FRAME)	/* Bandwidth wasn't reserved */
1343		return;
1344	uframe = stream->ps.bw_phase << 3;
1345
1346	bandwidth_dbg(ehci, sign, "iso", &stream->ps);
1347
1348	if (sign < 0) {		/* Release bandwidth */
1349		usecs = -usecs;
1350		c_usecs = -c_usecs;
1351		tt_usecs = -tt_usecs;
1352	}
1353
1354	if (!stream->splits) {		/* High speed */
1355		for (i = uframe + stream->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE;
1356				i += stream->ps.bw_uperiod)
1357			ehci->bandwidth[i] += usecs;
1358
1359	} else {			/* Full speed */
1360		s_mask = stream->ps.cs_mask;
1361		c_mask = s_mask >> 8;
1362
1363		/* NOTE: adjustment needed for frame overflow */
1364		for (i = uframe; i < EHCI_BANDWIDTH_SIZE;
1365				i += stream->ps.bw_uperiod) {
1366			for ((j = stream->ps.phase_uf, m = 1 << j); j < 8;
1367					(++j, m <<= 1)) {
1368				if (s_mask & m)
1369					ehci->bandwidth[i+j] += usecs;
1370				else if (c_mask & m)
1371					ehci->bandwidth[i+j] += c_usecs;
1372			}
1373		}
1374
1375		tt = find_tt(stream->ps.udev);
1376		if (sign > 0)
1377			list_add_tail(&stream->ps.ps_list, &tt->ps_list);
1378		else
1379			list_del(&stream->ps.ps_list);
1380
1381		for (i = uframe >> 3; i < EHCI_BANDWIDTH_FRAMES;
1382				i += stream->ps.bw_period)
1383			tt->bandwidth[i] += tt_usecs;
1384	}
1385}
1386
1387static inline int
1388itd_slot_ok (
1389	struct ehci_hcd		*ehci,
1390	struct ehci_iso_stream	*stream,
1391	unsigned		uframe
1392)
1393{
1394	unsigned		usecs;
1395
1396	/* convert "usecs we need" to "max already claimed" */
1397	usecs = ehci->uframe_periodic_max - stream->ps.usecs;
1398
1399	for (uframe &= stream->ps.bw_uperiod - 1; uframe < EHCI_BANDWIDTH_SIZE;
1400			uframe += stream->ps.bw_uperiod) {
1401		if (ehci->bandwidth[uframe] > usecs)
1402			return 0;
1403	}
1404	return 1;
1405}
1406
1407static inline int
1408sitd_slot_ok (
1409	struct ehci_hcd		*ehci,
1410	struct ehci_iso_stream	*stream,
1411	unsigned		uframe,
1412	struct ehci_iso_sched	*sched,
1413	struct ehci_tt		*tt
1414)
1415{
1416	unsigned		mask, tmp;
1417	unsigned		frame, uf;
1418
1419	mask = stream->ps.cs_mask << (uframe & 7);
1420
1421	/* for OUT, don't wrap SSPLIT into H-microframe 7 */
1422	if (((stream->ps.cs_mask & 0xff) << (uframe & 7)) >= (1 << 7))
1423		return 0;
1424
1425	/* for IN, don't wrap CSPLIT into the next frame */
1426	if (mask & ~0xffff)
1427		return 0;
1428
1429	/* check bandwidth */
1430	uframe &= stream->ps.bw_uperiod - 1;
1431	frame = uframe >> 3;
1432
1433#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
1434	/* The tt's fullspeed bus bandwidth must be available.
1435	 * tt_available scheduling guarantees 10+% for control/bulk.
1436	 */
1437	uf = uframe & 7;
1438	if (!tt_available(ehci, &stream->ps, tt, frame, uf))
1439		return 0;
1440#else
1441	/* tt must be idle for start(s), any gap, and csplit.
1442	 * assume scheduling slop leaves 10+% for control/bulk.
1443	 */
1444	if (!tt_no_collision(ehci, stream->ps.bw_period,
1445			stream->ps.udev, frame, mask))
1446		return 0;
1447#endif
1448
1449	do {
1450		unsigned	max_used;
1451		unsigned	i;
1452
1453		/* check starts (OUT uses more than one) */
1454		uf = uframe;
1455		max_used = ehci->uframe_periodic_max - stream->ps.usecs;
1456		for (tmp = stream->ps.cs_mask & 0xff; tmp; tmp >>= 1, uf++) {
1457			if (ehci->bandwidth[uf] > max_used)
1458				return 0;
1459		}
1460
1461		/* for IN, check CSPLIT */
1462		if (stream->ps.c_usecs) {
1463			max_used = ehci->uframe_periodic_max -
1464					stream->ps.c_usecs;
1465			uf = uframe & ~7;
1466			tmp = 1 << (2+8);
1467			for (i = (uframe & 7) + 2; i < 8; (++i, tmp <<= 1)) {
1468				if ((stream->ps.cs_mask & tmp) == 0)
1469					continue;
1470				if (ehci->bandwidth[uf+i] > max_used)
1471					return 0;
1472			}
1473		}
1474
1475		uframe += stream->ps.bw_uperiod;
1476	} while (uframe < EHCI_BANDWIDTH_SIZE);
1477
1478	stream->ps.cs_mask <<= uframe & 7;
1479	stream->splits = cpu_to_hc32(ehci, stream->ps.cs_mask);
1480	return 1;
1481}
1482
1483/*
1484 * This scheduler plans almost as far into the future as it has actual
1485 * periodic schedule slots.  (Affected by TUNE_FLS, which defaults to
1486 * "as small as possible" to be cache-friendlier.)  That limits the size
1487 * transfers you can stream reliably; avoid more than 64 msec per urb.
1488 * Also avoid queue depths of less than ehci's worst irq latency (affected
1489 * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter,
1490 * and other factors); or more than about 230 msec total (for portability,
1491 * given EHCI_TUNE_FLS and the slop).  Or, write a smarter scheduler!
1492 */
1493
1494static int
1495iso_stream_schedule (
1496	struct ehci_hcd		*ehci,
1497	struct urb		*urb,
1498	struct ehci_iso_stream	*stream
1499)
1500{
1501	u32			now, base, next, start, period, span, now2;
1502	u32			wrap = 0, skip = 0;
1503	int			status = 0;
1504	unsigned		mod = ehci->periodic_size << 3;
1505	struct ehci_iso_sched	*sched = urb->hcpriv;
1506	bool			empty = list_empty(&stream->td_list);
1507	bool			new_stream = false;
1508
1509	period = stream->uperiod;
1510	span = sched->span;
1511	if (!stream->highspeed)
1512		span <<= 3;
1513
1514	/* Start a new isochronous stream? */
1515	if (unlikely(empty && !hcd_periodic_completion_in_progress(
1516			ehci_to_hcd(ehci), urb->ep))) {
1517
1518		/* Schedule the endpoint */
1519		if (stream->ps.phase == NO_FRAME) {
1520			int		done = 0;
1521			struct ehci_tt	*tt = find_tt(stream->ps.udev);
1522
1523			if (IS_ERR(tt)) {
1524				status = PTR_ERR(tt);
1525				goto fail;
1526			}
1527			compute_tt_budget(ehci->tt_budget, tt);
1528
1529			start = ((-(++ehci->random_frame)) << 3) & (period - 1);
1530
1531			/* find a uframe slot with enough bandwidth.
1532			 * Early uframes are more precious because full-speed
1533			 * iso IN transfers can't use late uframes,
1534			 * and therefore they should be allocated last.
1535			 */
1536			next = start;
1537			start += period;
1538			do {
1539				start--;
1540				/* check schedule: enough space? */
1541				if (stream->highspeed) {
1542					if (itd_slot_ok(ehci, stream, start))
1543						done = 1;
1544				} else {
1545					if ((start % 8) >= 6)
1546						continue;
1547					if (sitd_slot_ok(ehci, stream, start,
1548							sched, tt))
1549						done = 1;
1550				}
1551			} while (start > next && !done);
1552
1553			/* no room in the schedule */
1554			if (!done) {
1555				ehci_dbg(ehci, "iso sched full %p", urb);
1556				status = -ENOSPC;
1557				goto fail;
1558			}
1559			stream->ps.phase = (start >> 3) &
1560					(stream->ps.period - 1);
1561			stream->ps.bw_phase = stream->ps.phase &
1562					(stream->ps.bw_period - 1);
1563			stream->ps.phase_uf = start & 7;
1564			reserve_release_iso_bandwidth(ehci, stream, 1);
1565		}
1566
1567		/* New stream is already scheduled; use the upcoming slot */
1568		else {
1569			start = (stream->ps.phase << 3) + stream->ps.phase_uf;
1570		}
1571
1572		stream->next_uframe = start;
1573		new_stream = true;
1574	}
1575
1576	now = ehci_read_frame_index(ehci) & (mod - 1);
1577
1578	/* Take the isochronous scheduling threshold into account */
1579	if (ehci->i_thresh)
1580		next = now + ehci->i_thresh;	/* uframe cache */
1581	else
1582		next = (now + 2 + 7) & ~0x07;	/* full frame cache */
1583
1584	/* If needed, initialize last_iso_frame so that this URB will be seen */
1585	if (ehci->isoc_count == 0)
1586		ehci->last_iso_frame = now >> 3;
1587
1588	/*
1589	 * Use ehci->last_iso_frame as the base.  There can't be any
1590	 * TDs scheduled for earlier than that.
1591	 */
1592	base = ehci->last_iso_frame << 3;
1593	next = (next - base) & (mod - 1);
1594	start = (stream->next_uframe - base) & (mod - 1);
1595
1596	if (unlikely(new_stream))
1597		goto do_ASAP;
1598
1599	/*
1600	 * Typical case: reuse current schedule, stream may still be active.
1601	 * Hopefully there are no gaps from the host falling behind
1602	 * (irq delays etc).  If there are, the behavior depends on
1603	 * whether URB_ISO_ASAP is set.
1604	 */
1605	now2 = (now - base) & (mod - 1);
1606
1607	/* Is the schedule about to wrap around? */
1608	if (unlikely(!empty && start < period)) {
1609		ehci_dbg(ehci, "request %p would overflow (%u-%u < %u mod %u)\n",
1610				urb, stream->next_uframe, base, period, mod);
1611		status = -EFBIG;
1612		goto fail;
1613	}
1614
1615	/* Is the next packet scheduled after the base time? */
1616	if (likely(!empty || start <= now2 + period)) {
1617
1618		/* URB_ISO_ASAP: make sure that start >= next */
1619		if (unlikely(start < next &&
1620				(urb->transfer_flags & URB_ISO_ASAP)))
1621			goto do_ASAP;
1622
1623		/* Otherwise use start, if it's not in the past */
1624		if (likely(start >= now2))
1625			goto use_start;
1626
1627	/* Otherwise we got an underrun while the queue was empty */
1628	} else {
1629		if (urb->transfer_flags & URB_ISO_ASAP)
1630			goto do_ASAP;
1631		wrap = mod;
1632		now2 += mod;
1633	}
1634
1635	/* How many uframes and packets do we need to skip? */
1636	skip = (now2 - start + period - 1) & -period;
1637	if (skip >= span) {		/* Entirely in the past? */
1638		ehci_dbg(ehci, "iso underrun %p (%u+%u < %u) [%u]\n",
1639				urb, start + base, span - period, now2 + base,
1640				base);
1641
1642		/* Try to keep the last TD intact for scanning later */
1643		skip = span - period;
1644
1645		/* Will it come before the current scan position? */
1646		if (empty) {
1647			skip = span;	/* Skip the entire URB */
1648			status = 1;	/* and give it back immediately */
1649			iso_sched_free(stream, sched);
1650			sched = NULL;
1651		}
1652	}
1653	urb->error_count = skip / period;
1654	if (sched)
1655		sched->first_packet = urb->error_count;
1656	goto use_start;
1657
1658 do_ASAP:
1659	/* Use the first slot after "next" */
1660	start = next + ((start - next) & (period - 1));
1661
1662 use_start:
1663	/* Tried to schedule too far into the future? */
1664	if (unlikely(start + span - period >= mod + wrap)) {
1665		ehci_dbg(ehci, "request %p would overflow (%u+%u >= %u)\n",
1666				urb, start, span - period, mod + wrap);
1667		status = -EFBIG;
1668		goto fail;
1669	}
1670
1671	start += base;
1672	stream->next_uframe = (start + skip) & (mod - 1);
1673
1674	/* report high speed start in uframes; full speed, in frames */
1675	urb->start_frame = start & (mod - 1);
1676	if (!stream->highspeed)
1677		urb->start_frame >>= 3;
1678	return status;
1679
1680 fail:
1681	iso_sched_free(stream, sched);
1682	urb->hcpriv = NULL;
1683	return status;
1684}
1685
1686/*-------------------------------------------------------------------------*/
1687
1688static inline void
1689itd_init(struct ehci_hcd *ehci, struct ehci_iso_stream *stream,
1690		struct ehci_itd *itd)
1691{
1692	int i;
1693
1694	/* it's been recently zeroed */
1695	itd->hw_next = EHCI_LIST_END(ehci);
1696	itd->hw_bufp [0] = stream->buf0;
1697	itd->hw_bufp [1] = stream->buf1;
1698	itd->hw_bufp [2] = stream->buf2;
1699
1700	for (i = 0; i < 8; i++)
1701		itd->index[i] = -1;
1702
1703	/* All other fields are filled when scheduling */
1704}
1705
1706static inline void
1707itd_patch(
1708	struct ehci_hcd		*ehci,
1709	struct ehci_itd		*itd,
1710	struct ehci_iso_sched	*iso_sched,
1711	unsigned		index,
1712	u16			uframe
1713)
1714{
1715	struct ehci_iso_packet	*uf = &iso_sched->packet [index];
1716	unsigned		pg = itd->pg;
1717
1718	// BUG_ON (pg == 6 && uf->cross);
1719
1720	uframe &= 0x07;
1721	itd->index [uframe] = index;
1722
1723	itd->hw_transaction[uframe] = uf->transaction;
1724	itd->hw_transaction[uframe] |= cpu_to_hc32(ehci, pg << 12);
1725	itd->hw_bufp[pg] |= cpu_to_hc32(ehci, uf->bufp & ~(u32)0);
1726	itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(uf->bufp >> 32));
1727
1728	/* iso_frame_desc[].offset must be strictly increasing */
1729	if (unlikely (uf->cross)) {
1730		u64	bufp = uf->bufp + 4096;
1731
1732		itd->pg = ++pg;
1733		itd->hw_bufp[pg] |= cpu_to_hc32(ehci, bufp & ~(u32)0);
1734		itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(bufp >> 32));
1735	}
1736}
1737
1738static inline void
1739itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
1740{
1741	union ehci_shadow	*prev = &ehci->pshadow[frame];
1742	__hc32			*hw_p = &ehci->periodic[frame];
1743	union ehci_shadow	here = *prev;
1744	__hc32			type = 0;
1745
1746	/* skip any iso nodes which might belong to previous microframes */
1747	while (here.ptr) {
1748		type = Q_NEXT_TYPE(ehci, *hw_p);
1749		if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
1750			break;
1751		prev = periodic_next_shadow(ehci, prev, type);
1752		hw_p = shadow_next_periodic(ehci, &here, type);
1753		here = *prev;
1754	}
1755
1756	itd->itd_next = here;
1757	itd->hw_next = *hw_p;
1758	prev->itd = itd;
1759	itd->frame = frame;
1760	wmb ();
1761	*hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
1762}
1763
1764/* fit urb's itds into the selected schedule slot; activate as needed */
1765static void itd_link_urb(
1766	struct ehci_hcd		*ehci,
1767	struct urb		*urb,
1768	unsigned		mod,
1769	struct ehci_iso_stream	*stream
1770)
1771{
1772	int			packet;
1773	unsigned		next_uframe, uframe, frame;
1774	struct ehci_iso_sched	*iso_sched = urb->hcpriv;
1775	struct ehci_itd		*itd;
1776
1777	next_uframe = stream->next_uframe & (mod - 1);
1778
1779	if (unlikely (list_empty(&stream->td_list)))
1780		ehci_to_hcd(ehci)->self.bandwidth_allocated
1781				+= stream->bandwidth;
1782
1783	if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
1784		if (ehci->amd_pll_fix == 1)
1785			usb_amd_quirk_pll_disable();
1786	}
1787
1788	ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
1789
1790	/* fill iTDs uframe by uframe */
1791	for (packet = iso_sched->first_packet, itd = NULL;
1792			packet < urb->number_of_packets;) {
1793		if (itd == NULL) {
1794			/* ASSERT:  we have all necessary itds */
1795			// BUG_ON (list_empty (&iso_sched->td_list));
1796
1797			/* ASSERT:  no itds for this endpoint in this uframe */
1798
1799			itd = list_entry (iso_sched->td_list.next,
1800					struct ehci_itd, itd_list);
1801			list_move_tail (&itd->itd_list, &stream->td_list);
1802			itd->stream = stream;
1803			itd->urb = urb;
1804			itd_init (ehci, stream, itd);
1805		}
1806
1807		uframe = next_uframe & 0x07;
1808		frame = next_uframe >> 3;
1809
1810		itd_patch(ehci, itd, iso_sched, packet, uframe);
1811
1812		next_uframe += stream->uperiod;
1813		next_uframe &= mod - 1;
1814		packet++;
1815
1816		/* link completed itds into the schedule */
1817		if (((next_uframe >> 3) != frame)
1818				|| packet == urb->number_of_packets) {
1819			itd_link(ehci, frame & (ehci->periodic_size - 1), itd);
1820			itd = NULL;
1821		}
1822	}
1823	stream->next_uframe = next_uframe;
1824
1825	/* don't need that schedule data any more */
1826	iso_sched_free (stream, iso_sched);
1827	urb->hcpriv = stream;
1828
1829	++ehci->isoc_count;
1830	enable_periodic(ehci);
1831}
1832
1833#define	ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR)
1834
1835/* Process and recycle a completed ITD.  Return true iff its urb completed,
1836 * and hence its completion callback probably added things to the hardware
1837 * schedule.
1838 *
1839 * Note that we carefully avoid recycling this descriptor until after any
1840 * completion callback runs, so that it won't be reused quickly.  That is,
1841 * assuming (a) no more than two urbs per frame on this endpoint, and also
1842 * (b) only this endpoint's completions submit URBs.  It seems some silicon
1843 * corrupts things if you reuse completed descriptors very quickly...
1844 */
1845static bool itd_complete(struct ehci_hcd *ehci, struct ehci_itd *itd)
1846{
1847	struct urb				*urb = itd->urb;
1848	struct usb_iso_packet_descriptor	*desc;
1849	u32					t;
1850	unsigned				uframe;
1851	int					urb_index = -1;
1852	struct ehci_iso_stream			*stream = itd->stream;
1853	struct usb_device			*dev;
1854	bool					retval = false;
1855
1856	/* for each uframe with a packet */
1857	for (uframe = 0; uframe < 8; uframe++) {
1858		if (likely (itd->index[uframe] == -1))
1859			continue;
1860		urb_index = itd->index[uframe];
1861		desc = &urb->iso_frame_desc [urb_index];
1862
1863		t = hc32_to_cpup(ehci, &itd->hw_transaction [uframe]);
1864		itd->hw_transaction [uframe] = 0;
1865
1866		/* report transfer status */
1867		if (unlikely (t & ISO_ERRS)) {
1868			urb->error_count++;
1869			if (t & EHCI_ISOC_BUF_ERR)
1870				desc->status = usb_pipein (urb->pipe)
1871					? -ENOSR  /* hc couldn't read */
1872					: -ECOMM; /* hc couldn't write */
1873			else if (t & EHCI_ISOC_BABBLE)
1874				desc->status = -EOVERFLOW;
1875			else /* (t & EHCI_ISOC_XACTERR) */
1876				desc->status = -EPROTO;
1877
1878			/* HC need not update length with this error */
1879			if (!(t & EHCI_ISOC_BABBLE)) {
1880				desc->actual_length = EHCI_ITD_LENGTH(t);
1881				urb->actual_length += desc->actual_length;
1882			}
1883		} else if (likely ((t & EHCI_ISOC_ACTIVE) == 0)) {
1884			desc->status = 0;
1885			desc->actual_length = EHCI_ITD_LENGTH(t);
1886			urb->actual_length += desc->actual_length;
1887		} else {
1888			/* URB was too late */
1889			urb->error_count++;
1890		}
1891	}
1892
1893	/* handle completion now? */
1894	if (likely ((urb_index + 1) != urb->number_of_packets))
1895		goto done;
1896
1897	/* ASSERT: it's really the last itd for this urb
1898	list_for_each_entry (itd, &stream->td_list, itd_list)
1899		BUG_ON (itd->urb == urb);
1900	 */
1901
1902	/* give urb back to the driver; completion often (re)submits */
1903	dev = urb->dev;
1904	ehci_urb_done(ehci, urb, 0);
1905	retval = true;
1906	urb = NULL;
1907
1908	--ehci->isoc_count;
1909	disable_periodic(ehci);
1910
1911	ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
1912	if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
1913		if (ehci->amd_pll_fix == 1)
1914			usb_amd_quirk_pll_enable();
1915	}
1916
1917	if (unlikely(list_is_singular(&stream->td_list)))
1918		ehci_to_hcd(ehci)->self.bandwidth_allocated
1919				-= stream->bandwidth;
1920
1921done:
1922	itd->urb = NULL;
1923
1924	/* Add to the end of the free list for later reuse */
1925	list_move_tail(&itd->itd_list, &stream->free_list);
1926
1927	/* Recycle the iTDs when the pipeline is empty (ep no longer in use) */
1928	if (list_empty(&stream->td_list)) {
1929		list_splice_tail_init(&stream->free_list,
1930				&ehci->cached_itd_list);
1931		start_free_itds(ehci);
1932	}
1933
1934	return retval;
1935}
1936
1937/*-------------------------------------------------------------------------*/
1938
1939static int itd_submit (struct ehci_hcd *ehci, struct urb *urb,
1940	gfp_t mem_flags)
1941{
1942	int			status = -EINVAL;
1943	unsigned long		flags;
1944	struct ehci_iso_stream	*stream;
1945
1946	/* Get iso_stream head */
1947	stream = iso_stream_find (ehci, urb);
1948	if (unlikely (stream == NULL)) {
1949		ehci_dbg (ehci, "can't get iso stream\n");
1950		return -ENOMEM;
1951	}
1952	if (unlikely(urb->interval != stream->uperiod)) {
1953		ehci_dbg (ehci, "can't change iso interval %d --> %d\n",
1954			stream->uperiod, urb->interval);
1955		goto done;
1956	}
1957
1958#ifdef EHCI_URB_TRACE
1959	ehci_dbg (ehci,
1960		"%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n",
1961		__func__, urb->dev->devpath, urb,
1962		usb_pipeendpoint (urb->pipe),
1963		usb_pipein (urb->pipe) ? "in" : "out",
1964		urb->transfer_buffer_length,
1965		urb->number_of_packets, urb->interval,
1966		stream);
1967#endif
1968
1969	/* allocate ITDs w/o locking anything */
1970	status = itd_urb_transaction (stream, ehci, urb, mem_flags);
1971	if (unlikely (status < 0)) {
1972		ehci_dbg (ehci, "can't init itds\n");
1973		goto done;
1974	}
1975
1976	/* schedule ... need to lock */
1977	spin_lock_irqsave (&ehci->lock, flags);
1978	if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
1979		status = -ESHUTDOWN;
1980		goto done_not_linked;
1981	}
1982	status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
1983	if (unlikely(status))
1984		goto done_not_linked;
1985	status = iso_stream_schedule(ehci, urb, stream);
1986	if (likely(status == 0)) {
1987		itd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
1988	} else if (status > 0) {
1989		status = 0;
1990		ehci_urb_done(ehci, urb, 0);
1991	} else {
1992		usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
1993	}
1994 done_not_linked:
1995	spin_unlock_irqrestore (&ehci->lock, flags);
1996 done:
1997	return status;
1998}
1999
2000/*-------------------------------------------------------------------------*/
2001
2002/*
2003 * "Split ISO TDs" ... used for USB 1.1 devices going through the
2004 * TTs in USB 2.0 hubs.  These need microframe scheduling.
2005 */
2006
2007static inline void
2008sitd_sched_init(
2009	struct ehci_hcd		*ehci,
2010	struct ehci_iso_sched	*iso_sched,
2011	struct ehci_iso_stream	*stream,
2012	struct urb		*urb
2013)
2014{
2015	unsigned	i;
2016	dma_addr_t	dma = urb->transfer_dma;
2017
2018	/* how many frames are needed for these transfers */
2019	iso_sched->span = urb->number_of_packets * stream->ps.period;
2020
2021	/* figure out per-frame sitd fields that we'll need later
2022	 * when we fit new sitds into the schedule.
2023	 */
2024	for (i = 0; i < urb->number_of_packets; i++) {
2025		struct ehci_iso_packet	*packet = &iso_sched->packet [i];
2026		unsigned		length;
2027		dma_addr_t		buf;
2028		u32			trans;
2029
2030		length = urb->iso_frame_desc [i].length & 0x03ff;
2031		buf = dma + urb->iso_frame_desc [i].offset;
2032
2033		trans = SITD_STS_ACTIVE;
2034		if (((i + 1) == urb->number_of_packets)
2035				&& !(urb->transfer_flags & URB_NO_INTERRUPT))
2036			trans |= SITD_IOC;
2037		trans |= length << 16;
2038		packet->transaction = cpu_to_hc32(ehci, trans);
2039
2040		/* might need to cross a buffer page within a td */
2041		packet->bufp = buf;
2042		packet->buf1 = (buf + length) & ~0x0fff;
2043		if (packet->buf1 != (buf & ~(u64)0x0fff))
2044			packet->cross = 1;
2045
2046		/* OUT uses multiple start-splits */
2047		if (stream->bEndpointAddress & USB_DIR_IN)
2048			continue;
2049		length = (length + 187) / 188;
2050		if (length > 1) /* BEGIN vs ALL */
2051			length |= 1 << 3;
2052		packet->buf1 |= length;
2053	}
2054}
2055
2056static int
2057sitd_urb_transaction (
2058	struct ehci_iso_stream	*stream,
2059	struct ehci_hcd		*ehci,
2060	struct urb		*urb,
2061	gfp_t			mem_flags
2062)
2063{
2064	struct ehci_sitd	*sitd;
2065	dma_addr_t		sitd_dma;
2066	int			i;
2067	struct ehci_iso_sched	*iso_sched;
2068	unsigned long		flags;
2069
2070	iso_sched = iso_sched_alloc (urb->number_of_packets, mem_flags);
2071	if (iso_sched == NULL)
2072		return -ENOMEM;
2073
2074	sitd_sched_init(ehci, iso_sched, stream, urb);
2075
2076	/* allocate/init sITDs */
2077	spin_lock_irqsave (&ehci->lock, flags);
2078	for (i = 0; i < urb->number_of_packets; i++) {
2079
2080		/* NOTE:  for now, we don't try to handle wraparound cases
2081		 * for IN (using sitd->hw_backpointer, like a FSTN), which
2082		 * means we never need two sitds for full speed packets.
2083		 */
2084
2085		/*
2086		 * Use siTDs from the free list, but not siTDs that may
2087		 * still be in use by the hardware.
2088		 */
2089		if (likely(!list_empty(&stream->free_list))) {
2090			sitd = list_first_entry(&stream->free_list,
2091					 struct ehci_sitd, sitd_list);
2092			if (sitd->frame == ehci->now_frame)
2093				goto alloc_sitd;
2094			list_del (&sitd->sitd_list);
2095			sitd_dma = sitd->sitd_dma;
2096		} else {
2097 alloc_sitd:
2098			spin_unlock_irqrestore (&ehci->lock, flags);
2099			sitd = dma_pool_alloc (ehci->sitd_pool, mem_flags,
2100					&sitd_dma);
2101			spin_lock_irqsave (&ehci->lock, flags);
2102			if (!sitd) {
2103				iso_sched_free(stream, iso_sched);
2104				spin_unlock_irqrestore(&ehci->lock, flags);
2105				return -ENOMEM;
2106			}
2107		}
2108
2109		memset (sitd, 0, sizeof *sitd);
2110		sitd->sitd_dma = sitd_dma;
2111		sitd->frame = NO_FRAME;
2112		list_add (&sitd->sitd_list, &iso_sched->td_list);
2113	}
2114
2115	/* temporarily store schedule info in hcpriv */
2116	urb->hcpriv = iso_sched;
2117	urb->error_count = 0;
2118
2119	spin_unlock_irqrestore (&ehci->lock, flags);
2120	return 0;
2121}
2122
2123/*-------------------------------------------------------------------------*/
2124
2125static inline void
2126sitd_patch(
2127	struct ehci_hcd		*ehci,
2128	struct ehci_iso_stream	*stream,
2129	struct ehci_sitd	*sitd,
2130	struct ehci_iso_sched	*iso_sched,
2131	unsigned		index
2132)
2133{
2134	struct ehci_iso_packet	*uf = &iso_sched->packet [index];
2135	u64			bufp = uf->bufp;
2136
2137	sitd->hw_next = EHCI_LIST_END(ehci);
2138	sitd->hw_fullspeed_ep = stream->address;
2139	sitd->hw_uframe = stream->splits;
2140	sitd->hw_results = uf->transaction;
2141	sitd->hw_backpointer = EHCI_LIST_END(ehci);
2142
2143	bufp = uf->bufp;
2144	sitd->hw_buf[0] = cpu_to_hc32(ehci, bufp);
2145	sitd->hw_buf_hi[0] = cpu_to_hc32(ehci, bufp >> 32);
2146
2147	sitd->hw_buf[1] = cpu_to_hc32(ehci, uf->buf1);
2148	if (uf->cross)
2149		bufp += 4096;
2150	sitd->hw_buf_hi[1] = cpu_to_hc32(ehci, bufp >> 32);
2151	sitd->index = index;
2152}
2153
2154static inline void
2155sitd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd)
2156{
2157	/* note: sitd ordering could matter (CSPLIT then SSPLIT) */
2158	sitd->sitd_next = ehci->pshadow [frame];
2159	sitd->hw_next = ehci->periodic [frame];
2160	ehci->pshadow [frame].sitd = sitd;
2161	sitd->frame = frame;
2162	wmb ();
2163	ehci->periodic[frame] = cpu_to_hc32(ehci, sitd->sitd_dma | Q_TYPE_SITD);
2164}
2165
2166/* fit urb's sitds into the selected schedule slot; activate as needed */
2167static void sitd_link_urb(
2168	struct ehci_hcd		*ehci,
2169	struct urb		*urb,
2170	unsigned		mod,
2171	struct ehci_iso_stream	*stream
2172)
2173{
2174	int			packet;
2175	unsigned		next_uframe;
2176	struct ehci_iso_sched	*sched = urb->hcpriv;
2177	struct ehci_sitd	*sitd;
2178
2179	next_uframe = stream->next_uframe;
2180
2181	if (list_empty(&stream->td_list))
2182		/* usbfs ignores TT bandwidth */
2183		ehci_to_hcd(ehci)->self.bandwidth_allocated
2184				+= stream->bandwidth;
2185
2186	if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
2187		if (ehci->amd_pll_fix == 1)
2188			usb_amd_quirk_pll_disable();
2189	}
2190
2191	ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
2192
2193	/* fill sITDs frame by frame */
2194	for (packet = sched->first_packet, sitd = NULL;
2195			packet < urb->number_of_packets;
2196			packet++) {
2197
2198		/* ASSERT:  we have all necessary sitds */
2199		BUG_ON (list_empty (&sched->td_list));
2200
2201		/* ASSERT:  no itds for this endpoint in this frame */
2202
2203		sitd = list_entry (sched->td_list.next,
2204				struct ehci_sitd, sitd_list);
2205		list_move_tail (&sitd->sitd_list, &stream->td_list);
2206		sitd->stream = stream;
2207		sitd->urb = urb;
2208
2209		sitd_patch(ehci, stream, sitd, sched, packet);
2210		sitd_link(ehci, (next_uframe >> 3) & (ehci->periodic_size - 1),
2211				sitd);
2212
2213		next_uframe += stream->uperiod;
2214	}
2215	stream->next_uframe = next_uframe & (mod - 1);
2216
2217	/* don't need that schedule data any more */
2218	iso_sched_free (stream, sched);
2219	urb->hcpriv = stream;
2220
2221	++ehci->isoc_count;
2222	enable_periodic(ehci);
2223}
2224
2225/*-------------------------------------------------------------------------*/
2226
2227#define	SITD_ERRS (SITD_STS_ERR | SITD_STS_DBE | SITD_STS_BABBLE \
2228				| SITD_STS_XACT | SITD_STS_MMF)
2229
2230/* Process and recycle a completed SITD.  Return true iff its urb completed,
2231 * and hence its completion callback probably added things to the hardware
2232 * schedule.
2233 *
2234 * Note that we carefully avoid recycling this descriptor until after any
2235 * completion callback runs, so that it won't be reused quickly.  That is,
2236 * assuming (a) no more than two urbs per frame on this endpoint, and also
2237 * (b) only this endpoint's completions submit URBs.  It seems some silicon
2238 * corrupts things if you reuse completed descriptors very quickly...
2239 */
2240static bool sitd_complete(struct ehci_hcd *ehci, struct ehci_sitd *sitd)
2241{
2242	struct urb				*urb = sitd->urb;
2243	struct usb_iso_packet_descriptor	*desc;
2244	u32					t;
2245	int					urb_index = -1;
2246	struct ehci_iso_stream			*stream = sitd->stream;
2247	struct usb_device			*dev;
2248	bool					retval = false;
2249
2250	urb_index = sitd->index;
2251	desc = &urb->iso_frame_desc [urb_index];
2252	t = hc32_to_cpup(ehci, &sitd->hw_results);
2253
2254	/* report transfer status */
2255	if (unlikely(t & SITD_ERRS)) {
2256		urb->error_count++;
2257		if (t & SITD_STS_DBE)
2258			desc->status = usb_pipein (urb->pipe)
2259				? -ENOSR  /* hc couldn't read */
2260				: -ECOMM; /* hc couldn't write */
2261		else if (t & SITD_STS_BABBLE)
2262			desc->status = -EOVERFLOW;
2263		else /* XACT, MMF, etc */
2264			desc->status = -EPROTO;
2265	} else if (unlikely(t & SITD_STS_ACTIVE)) {
2266		/* URB was too late */
2267		urb->error_count++;
2268	} else {
2269		desc->status = 0;
2270		desc->actual_length = desc->length - SITD_LENGTH(t);
2271		urb->actual_length += desc->actual_length;
2272	}
2273
2274	/* handle completion now? */
2275	if ((urb_index + 1) != urb->number_of_packets)
2276		goto done;
2277
2278	/* ASSERT: it's really the last sitd for this urb
2279	list_for_each_entry (sitd, &stream->td_list, sitd_list)
2280		BUG_ON (sitd->urb == urb);
2281	 */
2282
2283	/* give urb back to the driver; completion often (re)submits */
2284	dev = urb->dev;
2285	ehci_urb_done(ehci, urb, 0);
2286	retval = true;
2287	urb = NULL;
2288
2289	--ehci->isoc_count;
2290	disable_periodic(ehci);
2291
2292	ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
2293	if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
2294		if (ehci->amd_pll_fix == 1)
2295			usb_amd_quirk_pll_enable();
2296	}
2297
2298	if (list_is_singular(&stream->td_list))
2299		ehci_to_hcd(ehci)->self.bandwidth_allocated
2300				-= stream->bandwidth;
2301
2302done:
2303	sitd->urb = NULL;
2304
2305	/* Add to the end of the free list for later reuse */
2306	list_move_tail(&sitd->sitd_list, &stream->free_list);
2307
2308	/* Recycle the siTDs when the pipeline is empty (ep no longer in use) */
2309	if (list_empty(&stream->td_list)) {
2310		list_splice_tail_init(&stream->free_list,
2311				&ehci->cached_sitd_list);
2312		start_free_itds(ehci);
2313	}
2314
2315	return retval;
2316}
2317
2318
2319static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb,
2320	gfp_t mem_flags)
2321{
2322	int			status = -EINVAL;
2323	unsigned long		flags;
2324	struct ehci_iso_stream	*stream;
2325
2326	/* Get iso_stream head */
2327	stream = iso_stream_find (ehci, urb);
2328	if (stream == NULL) {
2329		ehci_dbg (ehci, "can't get iso stream\n");
2330		return -ENOMEM;
2331	}
2332	if (urb->interval != stream->ps.period) {
2333		ehci_dbg (ehci, "can't change iso interval %d --> %d\n",
2334			stream->ps.period, urb->interval);
2335		goto done;
2336	}
2337
2338#ifdef EHCI_URB_TRACE
2339	ehci_dbg (ehci,
2340		"submit %p dev%s ep%d%s-iso len %d\n",
2341		urb, urb->dev->devpath,
2342		usb_pipeendpoint (urb->pipe),
2343		usb_pipein (urb->pipe) ? "in" : "out",
2344		urb->transfer_buffer_length);
2345#endif
2346
2347	/* allocate SITDs */
2348	status = sitd_urb_transaction (stream, ehci, urb, mem_flags);
2349	if (status < 0) {
2350		ehci_dbg (ehci, "can't init sitds\n");
2351		goto done;
2352	}
2353
2354	/* schedule ... need to lock */
2355	spin_lock_irqsave (&ehci->lock, flags);
2356	if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
2357		status = -ESHUTDOWN;
2358		goto done_not_linked;
2359	}
2360	status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
2361	if (unlikely(status))
2362		goto done_not_linked;
2363	status = iso_stream_schedule(ehci, urb, stream);
2364	if (likely(status == 0)) {
2365		sitd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
2366	} else if (status > 0) {
2367		status = 0;
2368		ehci_urb_done(ehci, urb, 0);
2369	} else {
2370		usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
2371	}
2372 done_not_linked:
2373	spin_unlock_irqrestore (&ehci->lock, flags);
2374 done:
2375	return status;
2376}
2377
2378/*-------------------------------------------------------------------------*/
2379
2380static void scan_isoc(struct ehci_hcd *ehci)
2381{
2382	unsigned	uf, now_frame, frame;
2383	unsigned	fmask = ehci->periodic_size - 1;
2384	bool		modified, live;
2385
2386	/*
2387	 * When running, scan from last scan point up to "now"
2388	 * else clean up by scanning everything that's left.
2389	 * Touches as few pages as possible:  cache-friendly.
2390	 */
2391	if (ehci->rh_state >= EHCI_RH_RUNNING) {
2392		uf = ehci_read_frame_index(ehci);
2393		now_frame = (uf >> 3) & fmask;
2394		live = true;
2395	} else  {
2396		now_frame = (ehci->last_iso_frame - 1) & fmask;
2397		live = false;
2398	}
2399	ehci->now_frame = now_frame;
2400
2401	frame = ehci->last_iso_frame;
2402	for (;;) {
2403		union ehci_shadow	q, *q_p;
2404		__hc32			type, *hw_p;
2405
2406restart:
2407		/* scan each element in frame's queue for completions */
2408		q_p = &ehci->pshadow [frame];
2409		hw_p = &ehci->periodic [frame];
2410		q.ptr = q_p->ptr;
2411		type = Q_NEXT_TYPE(ehci, *hw_p);
2412		modified = false;
2413
2414		while (q.ptr != NULL) {
2415			switch (hc32_to_cpu(ehci, type)) {
2416			case Q_TYPE_ITD:
2417				/* If this ITD is still active, leave it for
2418				 * later processing ... check the next entry.
2419				 * No need to check for activity unless the
2420				 * frame is current.
2421				 */
2422				if (frame == now_frame && live) {
2423					rmb();
2424					for (uf = 0; uf < 8; uf++) {
2425						if (q.itd->hw_transaction[uf] &
2426							    ITD_ACTIVE(ehci))
2427							break;
2428					}
2429					if (uf < 8) {
2430						q_p = &q.itd->itd_next;
2431						hw_p = &q.itd->hw_next;
2432						type = Q_NEXT_TYPE(ehci,
2433							q.itd->hw_next);
2434						q = *q_p;
2435						break;
2436					}
2437				}
2438
2439				/* Take finished ITDs out of the schedule
2440				 * and process them:  recycle, maybe report
2441				 * URB completion.  HC won't cache the
2442				 * pointer for much longer, if at all.
2443				 */
2444				*q_p = q.itd->itd_next;
2445				if (!ehci->use_dummy_qh ||
2446				    q.itd->hw_next != EHCI_LIST_END(ehci))
2447					*hw_p = q.itd->hw_next;
2448				else
2449					*hw_p = cpu_to_hc32(ehci,
2450							ehci->dummy->qh_dma);
2451				type = Q_NEXT_TYPE(ehci, q.itd->hw_next);
2452				wmb();
2453				modified = itd_complete (ehci, q.itd);
2454				q = *q_p;
2455				break;
2456			case Q_TYPE_SITD:
2457				/* If this SITD is still active, leave it for
2458				 * later processing ... check the next entry.
2459				 * No need to check for activity unless the
2460				 * frame is current.
2461				 */
2462				if (((frame == now_frame) ||
2463				     (((frame + 1) & fmask) == now_frame))
2464				    && live
2465				    && (q.sitd->hw_results &
2466					SITD_ACTIVE(ehci))) {
2467
2468					q_p = &q.sitd->sitd_next;
2469					hw_p = &q.sitd->hw_next;
2470					type = Q_NEXT_TYPE(ehci,
2471							q.sitd->hw_next);
2472					q = *q_p;
2473					break;
2474				}
2475
2476				/* Take finished SITDs out of the schedule
2477				 * and process them:  recycle, maybe report
2478				 * URB completion.
2479				 */
2480				*q_p = q.sitd->sitd_next;
2481				if (!ehci->use_dummy_qh ||
2482				    q.sitd->hw_next != EHCI_LIST_END(ehci))
2483					*hw_p = q.sitd->hw_next;
2484				else
2485					*hw_p = cpu_to_hc32(ehci,
2486							ehci->dummy->qh_dma);
2487				type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
2488				wmb();
2489				modified = sitd_complete (ehci, q.sitd);
2490				q = *q_p;
2491				break;
2492			default:
2493				ehci_dbg(ehci, "corrupt type %d frame %d shadow %p\n",
2494					type, frame, q.ptr);
2495				// BUG ();
2496				/* FALL THROUGH */
2497			case Q_TYPE_QH:
2498			case Q_TYPE_FSTN:
2499				/* End of the iTDs and siTDs */
2500				q.ptr = NULL;
2501				break;
2502			}
2503
2504			/* assume completion callbacks modify the queue */
2505			if (unlikely(modified && ehci->isoc_count > 0))
2506				goto restart;
2507		}
2508
2509		/* Stop when we have reached the current frame */
2510		if (frame == now_frame)
2511			break;
2512
2513		/* The last frame may still have active siTDs */
2514		ehci->last_iso_frame = frame;
2515		frame = (frame + 1) & fmask;
2516	}
2517}
2518