This source file includes following definitions.
- xhci_segment_alloc
- xhci_segment_free
- xhci_free_segments_for_ring
- xhci_link_segments
- xhci_link_rings
- xhci_insert_segment_mapping
- xhci_remove_segment_mapping
- xhci_update_stream_segment_mapping
- xhci_remove_stream_mapping
- xhci_update_stream_mapping
- xhci_ring_free
- xhci_initialize_ring_info
- xhci_alloc_segments_for_ring
- xhci_ring_alloc
- xhci_free_endpoint_ring
- xhci_ring_expansion
- xhci_alloc_container_ctx
- xhci_free_container_ctx
- xhci_get_input_control_ctx
- xhci_get_slot_ctx
- xhci_get_ep_ctx
- xhci_free_stream_ctx
- xhci_alloc_stream_ctx
- xhci_dma_to_transfer_ring
- xhci_stream_id_to_ring
- xhci_alloc_stream_info
- xhci_setup_streams_ep_input_ctx
- xhci_setup_no_streams_ep_input_ctx
- xhci_free_stream_info
- xhci_init_endpoint_timer
- xhci_free_tt_info
- xhci_alloc_tt_info
- xhci_free_virt_device
- xhci_free_virt_devices_depth_first
- xhci_alloc_virt_device
- xhci_copy_ep0_dequeue_into_input_ctx
- xhci_find_real_port_number
- xhci_setup_addressable_virt_dev
- xhci_parse_exponent_interval
- xhci_microframes_to_exponent
- xhci_parse_microframe_interval
- xhci_parse_frame_interval
- xhci_get_endpoint_interval
- xhci_get_endpoint_mult
- xhci_get_endpoint_max_burst
- xhci_get_endpoint_type
- xhci_get_max_esit_payload
- xhci_endpoint_init
- xhci_endpoint_zero
- xhci_clear_endpoint_bw_info
- xhci_update_bw_info
- xhci_endpoint_copy
- xhci_slot_copy
- scratchpad_alloc
- scratchpad_free
- xhci_alloc_command
- xhci_alloc_command_with_ctx
- xhci_urb_free_priv
- xhci_free_command
- xhci_alloc_erst
- xhci_free_erst
- xhci_mem_cleanup
- xhci_test_trb_in_td
- xhci_check_trb_in_td_math
- xhci_set_hc_event_deq
- xhci_add_in_port
- xhci_create_rhub_port_array
- xhci_setup_port_arrays
- xhci_mem_init
1
2
3
4
5
6
7
8
9
10
11 #include <linux/usb.h>
12 #include <linux/pci.h>
13 #include <linux/slab.h>
14 #include <linux/dmapool.h>
15 #include <linux/dma-mapping.h>
16
17 #include "xhci.h"
18 #include "xhci-trace.h"
19 #include "xhci-debugfs.h"
20
21
22
23
24
25
26
27
28 static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
29 unsigned int cycle_state,
30 unsigned int max_packet,
31 gfp_t flags)
32 {
33 struct xhci_segment *seg;
34 dma_addr_t dma;
35 int i;
36 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
37
38 seg = kzalloc_node(sizeof(*seg), flags, dev_to_node(dev));
39 if (!seg)
40 return NULL;
41
42 seg->trbs = dma_pool_zalloc(xhci->segment_pool, flags, &dma);
43 if (!seg->trbs) {
44 kfree(seg);
45 return NULL;
46 }
47
48 if (max_packet) {
49 seg->bounce_buf = kzalloc_node(max_packet, flags,
50 dev_to_node(dev));
51 if (!seg->bounce_buf) {
52 dma_pool_free(xhci->segment_pool, seg->trbs, dma);
53 kfree(seg);
54 return NULL;
55 }
56 }
57
58 if (cycle_state == 0) {
59 for (i = 0; i < TRBS_PER_SEGMENT; i++)
60 seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE);
61 }
62 seg->dma = dma;
63 seg->next = NULL;
64
65 return seg;
66 }
67
68 static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
69 {
70 if (seg->trbs) {
71 dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
72 seg->trbs = NULL;
73 }
74 kfree(seg->bounce_buf);
75 kfree(seg);
76 }
77
78 static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
79 struct xhci_segment *first)
80 {
81 struct xhci_segment *seg;
82
83 seg = first->next;
84 while (seg != first) {
85 struct xhci_segment *next = seg->next;
86 xhci_segment_free(xhci, seg);
87 seg = next;
88 }
89 xhci_segment_free(xhci, first);
90 }
91
92
93
94
95
96
97
98
99 static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
100 struct xhci_segment *next, enum xhci_ring_type type)
101 {
102 u32 val;
103
104 if (!prev || !next)
105 return;
106 prev->next = next;
107 if (type != TYPE_EVENT) {
108 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
109 cpu_to_le64(next->dma);
110
111
112 val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
113 val &= ~TRB_TYPE_BITMASK;
114 val |= TRB_TYPE(TRB_LINK);
115
116
117 if (xhci_link_trb_quirk(xhci) ||
118 (type == TYPE_ISOC &&
119 (xhci->quirks & XHCI_AMD_0x96_HOST)))
120 val |= TRB_CHAIN;
121 prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
122 }
123 }
124
125
126
127
128
129 static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
130 struct xhci_segment *first, struct xhci_segment *last,
131 unsigned int num_segs)
132 {
133 struct xhci_segment *next;
134
135 if (!ring || !first || !last)
136 return;
137
138 next = ring->enq_seg->next;
139 xhci_link_segments(xhci, ring->enq_seg, first, ring->type);
140 xhci_link_segments(xhci, last, next, ring->type);
141 ring->num_segs += num_segs;
142 ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
143
144 if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
145 ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
146 &= ~cpu_to_le32(LINK_TOGGLE);
147 last->trbs[TRBS_PER_SEGMENT-1].link.control
148 |= cpu_to_le32(LINK_TOGGLE);
149 ring->last_seg = last;
150 }
151 }
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184 static int xhci_insert_segment_mapping(struct radix_tree_root *trb_address_map,
185 struct xhci_ring *ring,
186 struct xhci_segment *seg,
187 gfp_t mem_flags)
188 {
189 unsigned long key;
190 int ret;
191
192 key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
193
194 if (radix_tree_lookup(trb_address_map, key))
195 return 0;
196
197 ret = radix_tree_maybe_preload(mem_flags);
198 if (ret)
199 return ret;
200 ret = radix_tree_insert(trb_address_map,
201 key, ring);
202 radix_tree_preload_end();
203 return ret;
204 }
205
206 static void xhci_remove_segment_mapping(struct radix_tree_root *trb_address_map,
207 struct xhci_segment *seg)
208 {
209 unsigned long key;
210
211 key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
212 if (radix_tree_lookup(trb_address_map, key))
213 radix_tree_delete(trb_address_map, key);
214 }
215
216 static int xhci_update_stream_segment_mapping(
217 struct radix_tree_root *trb_address_map,
218 struct xhci_ring *ring,
219 struct xhci_segment *first_seg,
220 struct xhci_segment *last_seg,
221 gfp_t mem_flags)
222 {
223 struct xhci_segment *seg;
224 struct xhci_segment *failed_seg;
225 int ret;
226
227 if (WARN_ON_ONCE(trb_address_map == NULL))
228 return 0;
229
230 seg = first_seg;
231 do {
232 ret = xhci_insert_segment_mapping(trb_address_map,
233 ring, seg, mem_flags);
234 if (ret)
235 goto remove_streams;
236 if (seg == last_seg)
237 return 0;
238 seg = seg->next;
239 } while (seg != first_seg);
240
241 return 0;
242
243 remove_streams:
244 failed_seg = seg;
245 seg = first_seg;
246 do {
247 xhci_remove_segment_mapping(trb_address_map, seg);
248 if (seg == failed_seg)
249 return ret;
250 seg = seg->next;
251 } while (seg != first_seg);
252
253 return ret;
254 }
255
256 static void xhci_remove_stream_mapping(struct xhci_ring *ring)
257 {
258 struct xhci_segment *seg;
259
260 if (WARN_ON_ONCE(ring->trb_address_map == NULL))
261 return;
262
263 seg = ring->first_seg;
264 do {
265 xhci_remove_segment_mapping(ring->trb_address_map, seg);
266 seg = seg->next;
267 } while (seg != ring->first_seg);
268 }
269
270 static int xhci_update_stream_mapping(struct xhci_ring *ring, gfp_t mem_flags)
271 {
272 return xhci_update_stream_segment_mapping(ring->trb_address_map, ring,
273 ring->first_seg, ring->last_seg, mem_flags);
274 }
275
276
277 void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
278 {
279 if (!ring)
280 return;
281
282 trace_xhci_ring_free(ring);
283
284 if (ring->first_seg) {
285 if (ring->type == TYPE_STREAM)
286 xhci_remove_stream_mapping(ring);
287 xhci_free_segments_for_ring(xhci, ring->first_seg);
288 }
289
290 kfree(ring);
291 }
292
293 static void xhci_initialize_ring_info(struct xhci_ring *ring,
294 unsigned int cycle_state)
295 {
296
297 ring->enqueue = ring->first_seg->trbs;
298 ring->enq_seg = ring->first_seg;
299 ring->dequeue = ring->enqueue;
300 ring->deq_seg = ring->first_seg;
301
302
303
304
305
306
307
308 ring->cycle_state = cycle_state;
309
310
311
312
313
314 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
315 }
316
317
318 static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
319 struct xhci_segment **first, struct xhci_segment **last,
320 unsigned int num_segs, unsigned int cycle_state,
321 enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
322 {
323 struct xhci_segment *prev;
324
325 prev = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
326 if (!prev)
327 return -ENOMEM;
328 num_segs--;
329
330 *first = prev;
331 while (num_segs > 0) {
332 struct xhci_segment *next;
333
334 next = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
335 if (!next) {
336 prev = *first;
337 while (prev) {
338 next = prev->next;
339 xhci_segment_free(xhci, prev);
340 prev = next;
341 }
342 return -ENOMEM;
343 }
344 xhci_link_segments(xhci, prev, next, type);
345
346 prev = next;
347 num_segs--;
348 }
349 xhci_link_segments(xhci, prev, *first, type);
350 *last = prev;
351
352 return 0;
353 }
354
355
356
357
358
359
360
361
362 struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
363 unsigned int num_segs, unsigned int cycle_state,
364 enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
365 {
366 struct xhci_ring *ring;
367 int ret;
368 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
369
370 ring = kzalloc_node(sizeof(*ring), flags, dev_to_node(dev));
371 if (!ring)
372 return NULL;
373
374 ring->num_segs = num_segs;
375 ring->bounce_buf_len = max_packet;
376 INIT_LIST_HEAD(&ring->td_list);
377 ring->type = type;
378 if (num_segs == 0)
379 return ring;
380
381 ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg,
382 &ring->last_seg, num_segs, cycle_state, type,
383 max_packet, flags);
384 if (ret)
385 goto fail;
386
387
388 if (type != TYPE_EVENT) {
389
390 ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
391 cpu_to_le32(LINK_TOGGLE);
392 }
393 xhci_initialize_ring_info(ring, cycle_state);
394 trace_xhci_ring_alloc(ring);
395 return ring;
396
397 fail:
398 kfree(ring);
399 return NULL;
400 }
401
402 void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
403 struct xhci_virt_device *virt_dev,
404 unsigned int ep_index)
405 {
406 xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
407 virt_dev->eps[ep_index].ring = NULL;
408 }
409
410
411
412
413
414 int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
415 unsigned int num_trbs, gfp_t flags)
416 {
417 struct xhci_segment *first;
418 struct xhci_segment *last;
419 unsigned int num_segs;
420 unsigned int num_segs_needed;
421 int ret;
422
423 num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
424 (TRBS_PER_SEGMENT - 1);
425
426
427 num_segs = ring->num_segs > num_segs_needed ?
428 ring->num_segs : num_segs_needed;
429
430 ret = xhci_alloc_segments_for_ring(xhci, &first, &last,
431 num_segs, ring->cycle_state, ring->type,
432 ring->bounce_buf_len, flags);
433 if (ret)
434 return -ENOMEM;
435
436 if (ring->type == TYPE_STREAM)
437 ret = xhci_update_stream_segment_mapping(ring->trb_address_map,
438 ring, first, last, flags);
439 if (ret) {
440 struct xhci_segment *next;
441 do {
442 next = first->next;
443 xhci_segment_free(xhci, first);
444 if (first == last)
445 break;
446 first = next;
447 } while (true);
448 return ret;
449 }
450
451 xhci_link_rings(xhci, ring, first, last, num_segs);
452 trace_xhci_ring_expansion(ring);
453 xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
454 "ring expansion succeed, now has %d segments",
455 ring->num_segs);
456
457 return 0;
458 }
459
460 struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
461 int type, gfp_t flags)
462 {
463 struct xhci_container_ctx *ctx;
464 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
465
466 if ((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT))
467 return NULL;
468
469 ctx = kzalloc_node(sizeof(*ctx), flags, dev_to_node(dev));
470 if (!ctx)
471 return NULL;
472
473 ctx->type = type;
474 ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
475 if (type == XHCI_CTX_TYPE_INPUT)
476 ctx->size += CTX_SIZE(xhci->hcc_params);
477
478 ctx->bytes = dma_pool_zalloc(xhci->device_pool, flags, &ctx->dma);
479 if (!ctx->bytes) {
480 kfree(ctx);
481 return NULL;
482 }
483 return ctx;
484 }
485
486 void xhci_free_container_ctx(struct xhci_hcd *xhci,
487 struct xhci_container_ctx *ctx)
488 {
489 if (!ctx)
490 return;
491 dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
492 kfree(ctx);
493 }
494
495 struct xhci_input_control_ctx *xhci_get_input_control_ctx(
496 struct xhci_container_ctx *ctx)
497 {
498 if (ctx->type != XHCI_CTX_TYPE_INPUT)
499 return NULL;
500
501 return (struct xhci_input_control_ctx *)ctx->bytes;
502 }
503
504 struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
505 struct xhci_container_ctx *ctx)
506 {
507 if (ctx->type == XHCI_CTX_TYPE_DEVICE)
508 return (struct xhci_slot_ctx *)ctx->bytes;
509
510 return (struct xhci_slot_ctx *)
511 (ctx->bytes + CTX_SIZE(xhci->hcc_params));
512 }
513
514 struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
515 struct xhci_container_ctx *ctx,
516 unsigned int ep_index)
517 {
518
519 ep_index++;
520 if (ctx->type == XHCI_CTX_TYPE_INPUT)
521 ep_index++;
522
523 return (struct xhci_ep_ctx *)
524 (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
525 }
526
527
528
529
530 static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
531 unsigned int num_stream_ctxs,
532 struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
533 {
534 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
535 size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
536
537 if (size > MEDIUM_STREAM_ARRAY_SIZE)
538 dma_free_coherent(dev, size,
539 stream_ctx, dma);
540 else if (size <= SMALL_STREAM_ARRAY_SIZE)
541 return dma_pool_free(xhci->small_streams_pool,
542 stream_ctx, dma);
543 else
544 return dma_pool_free(xhci->medium_streams_pool,
545 stream_ctx, dma);
546 }
547
548
549
550
551
552
553
554
555
556
557
558 static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
559 unsigned int num_stream_ctxs, dma_addr_t *dma,
560 gfp_t mem_flags)
561 {
562 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
563 size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
564
565 if (size > MEDIUM_STREAM_ARRAY_SIZE)
566 return dma_alloc_coherent(dev, size,
567 dma, mem_flags);
568 else if (size <= SMALL_STREAM_ARRAY_SIZE)
569 return dma_pool_alloc(xhci->small_streams_pool,
570 mem_flags, dma);
571 else
572 return dma_pool_alloc(xhci->medium_streams_pool,
573 mem_flags, dma);
574 }
575
576 struct xhci_ring *xhci_dma_to_transfer_ring(
577 struct xhci_virt_ep *ep,
578 u64 address)
579 {
580 if (ep->ep_state & EP_HAS_STREAMS)
581 return radix_tree_lookup(&ep->stream_info->trb_address_map,
582 address >> TRB_SEGMENT_SHIFT);
583 return ep->ring;
584 }
585
586 struct xhci_ring *xhci_stream_id_to_ring(
587 struct xhci_virt_device *dev,
588 unsigned int ep_index,
589 unsigned int stream_id)
590 {
591 struct xhci_virt_ep *ep = &dev->eps[ep_index];
592
593 if (stream_id == 0)
594 return ep->ring;
595 if (!ep->stream_info)
596 return NULL;
597
598 if (stream_id >= ep->stream_info->num_streams)
599 return NULL;
600 return ep->stream_info->stream_rings[stream_id];
601 }
602
603
604
605
606
607
608
609
610
611
612 struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
613 unsigned int num_stream_ctxs,
614 unsigned int num_streams,
615 unsigned int max_packet, gfp_t mem_flags)
616 {
617 struct xhci_stream_info *stream_info;
618 u32 cur_stream;
619 struct xhci_ring *cur_ring;
620 u64 addr;
621 int ret;
622 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
623
624 xhci_dbg(xhci, "Allocating %u streams and %u "
625 "stream context array entries.\n",
626 num_streams, num_stream_ctxs);
627 if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) {
628 xhci_dbg(xhci, "Command ring has no reserved TRBs available\n");
629 return NULL;
630 }
631 xhci->cmd_ring_reserved_trbs++;
632
633 stream_info = kzalloc_node(sizeof(*stream_info), mem_flags,
634 dev_to_node(dev));
635 if (!stream_info)
636 goto cleanup_trbs;
637
638 stream_info->num_streams = num_streams;
639 stream_info->num_stream_ctxs = num_stream_ctxs;
640
641
642 stream_info->stream_rings = kcalloc_node(
643 num_streams, sizeof(struct xhci_ring *), mem_flags,
644 dev_to_node(dev));
645 if (!stream_info->stream_rings)
646 goto cleanup_info;
647
648
649 stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci,
650 num_stream_ctxs, &stream_info->ctx_array_dma,
651 mem_flags);
652 if (!stream_info->stream_ctx_array)
653 goto cleanup_ctx;
654 memset(stream_info->stream_ctx_array, 0,
655 sizeof(struct xhci_stream_ctx)*num_stream_ctxs);
656
657
658 stream_info->free_streams_command =
659 xhci_alloc_command_with_ctx(xhci, true, mem_flags);
660 if (!stream_info->free_streams_command)
661 goto cleanup_ctx;
662
663 INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
664
665
666
667
668
669
670 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
671 stream_info->stream_rings[cur_stream] =
672 xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, max_packet,
673 mem_flags);
674 cur_ring = stream_info->stream_rings[cur_stream];
675 if (!cur_ring)
676 goto cleanup_rings;
677 cur_ring->stream_id = cur_stream;
678 cur_ring->trb_address_map = &stream_info->trb_address_map;
679
680 addr = cur_ring->first_seg->dma |
681 SCT_FOR_CTX(SCT_PRI_TR) |
682 cur_ring->cycle_state;
683 stream_info->stream_ctx_array[cur_stream].stream_ring =
684 cpu_to_le64(addr);
685 xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
686 cur_stream, (unsigned long long) addr);
687
688 ret = xhci_update_stream_mapping(cur_ring, mem_flags);
689 if (ret) {
690 xhci_ring_free(xhci, cur_ring);
691 stream_info->stream_rings[cur_stream] = NULL;
692 goto cleanup_rings;
693 }
694 }
695
696
697
698
699
700
701
702 return stream_info;
703
704 cleanup_rings:
705 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
706 cur_ring = stream_info->stream_rings[cur_stream];
707 if (cur_ring) {
708 xhci_ring_free(xhci, cur_ring);
709 stream_info->stream_rings[cur_stream] = NULL;
710 }
711 }
712 xhci_free_command(xhci, stream_info->free_streams_command);
713 cleanup_ctx:
714 kfree(stream_info->stream_rings);
715 cleanup_info:
716 kfree(stream_info);
717 cleanup_trbs:
718 xhci->cmd_ring_reserved_trbs--;
719 return NULL;
720 }
721
722
723
724
725 void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
726 struct xhci_ep_ctx *ep_ctx,
727 struct xhci_stream_info *stream_info)
728 {
729 u32 max_primary_streams;
730
731
732
733
734 max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
735 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
736 "Setting number of stream ctx array entries to %u",
737 1 << (max_primary_streams + 1));
738 ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
739 ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
740 | EP_HAS_LSA);
741 ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma);
742 }
743
744
745
746
747
748
749 void xhci_setup_no_streams_ep_input_ctx(struct xhci_ep_ctx *ep_ctx,
750 struct xhci_virt_ep *ep)
751 {
752 dma_addr_t addr;
753 ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA));
754 addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
755 ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state);
756 }
757
758
759
760
761
762 void xhci_free_stream_info(struct xhci_hcd *xhci,
763 struct xhci_stream_info *stream_info)
764 {
765 int cur_stream;
766 struct xhci_ring *cur_ring;
767
768 if (!stream_info)
769 return;
770
771 for (cur_stream = 1; cur_stream < stream_info->num_streams;
772 cur_stream++) {
773 cur_ring = stream_info->stream_rings[cur_stream];
774 if (cur_ring) {
775 xhci_ring_free(xhci, cur_ring);
776 stream_info->stream_rings[cur_stream] = NULL;
777 }
778 }
779 xhci_free_command(xhci, stream_info->free_streams_command);
780 xhci->cmd_ring_reserved_trbs--;
781 if (stream_info->stream_ctx_array)
782 xhci_free_stream_ctx(xhci,
783 stream_info->num_stream_ctxs,
784 stream_info->stream_ctx_array,
785 stream_info->ctx_array_dma);
786
787 kfree(stream_info->stream_rings);
788 kfree(stream_info);
789 }
790
791
792
793
794 static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
795 struct xhci_virt_ep *ep)
796 {
797 timer_setup(&ep->stop_cmd_timer, xhci_stop_endpoint_command_watchdog,
798 0);
799 ep->xhci = xhci;
800 }
801
802 static void xhci_free_tt_info(struct xhci_hcd *xhci,
803 struct xhci_virt_device *virt_dev,
804 int slot_id)
805 {
806 struct list_head *tt_list_head;
807 struct xhci_tt_bw_info *tt_info, *next;
808 bool slot_found = false;
809
810
811
812
813 if (virt_dev->real_port == 0 ||
814 virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
815 xhci_dbg(xhci, "Bad real port.\n");
816 return;
817 }
818
819 tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
820 list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
821
822 if (tt_info->slot_id == slot_id) {
823 slot_found = true;
824 list_del(&tt_info->tt_list);
825 kfree(tt_info);
826 } else if (slot_found) {
827 break;
828 }
829 }
830 }
831
832 int xhci_alloc_tt_info(struct xhci_hcd *xhci,
833 struct xhci_virt_device *virt_dev,
834 struct usb_device *hdev,
835 struct usb_tt *tt, gfp_t mem_flags)
836 {
837 struct xhci_tt_bw_info *tt_info;
838 unsigned int num_ports;
839 int i, j;
840 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
841
842 if (!tt->multi)
843 num_ports = 1;
844 else
845 num_ports = hdev->maxchild;
846
847 for (i = 0; i < num_ports; i++, tt_info++) {
848 struct xhci_interval_bw_table *bw_table;
849
850 tt_info = kzalloc_node(sizeof(*tt_info), mem_flags,
851 dev_to_node(dev));
852 if (!tt_info)
853 goto free_tts;
854 INIT_LIST_HEAD(&tt_info->tt_list);
855 list_add(&tt_info->tt_list,
856 &xhci->rh_bw[virt_dev->real_port - 1].tts);
857 tt_info->slot_id = virt_dev->udev->slot_id;
858 if (tt->multi)
859 tt_info->ttport = i+1;
860 bw_table = &tt_info->bw_table;
861 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
862 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
863 }
864 return 0;
865
866 free_tts:
867 xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id);
868 return -ENOMEM;
869 }
870
871
872
873
874
875
876
877 void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
878 {
879 struct xhci_virt_device *dev;
880 int i;
881 int old_active_eps = 0;
882
883
884 if (slot_id == 0 || !xhci->devs[slot_id])
885 return;
886
887 dev = xhci->devs[slot_id];
888
889 xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
890 if (!dev)
891 return;
892
893 trace_xhci_free_virt_device(dev);
894
895 if (dev->tt_info)
896 old_active_eps = dev->tt_info->active_eps;
897
898 for (i = 0; i < 31; i++) {
899 if (dev->eps[i].ring)
900 xhci_ring_free(xhci, dev->eps[i].ring);
901 if (dev->eps[i].stream_info)
902 xhci_free_stream_info(xhci,
903 dev->eps[i].stream_info);
904
905
906
907
908
909 if (!list_empty(&dev->eps[i].bw_endpoint_list))
910 xhci_warn(xhci, "Slot %u endpoint %u "
911 "not removed from BW list!\n",
912 slot_id, i);
913 }
914
915 xhci_free_tt_info(xhci, dev, slot_id);
916
917 xhci_update_tt_active_eps(xhci, dev, old_active_eps);
918
919 if (dev->in_ctx)
920 xhci_free_container_ctx(xhci, dev->in_ctx);
921 if (dev->out_ctx)
922 xhci_free_container_ctx(xhci, dev->out_ctx);
923
924 if (dev->udev && dev->udev->slot_id)
925 dev->udev->slot_id = 0;
926 kfree(xhci->devs[slot_id]);
927 xhci->devs[slot_id] = NULL;
928 }
929
930
931
932
933
934
935
936 static void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
937 {
938 struct xhci_virt_device *vdev;
939 struct list_head *tt_list_head;
940 struct xhci_tt_bw_info *tt_info, *next;
941 int i;
942
943 vdev = xhci->devs[slot_id];
944 if (!vdev)
945 return;
946
947 if (vdev->real_port == 0 ||
948 vdev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
949 xhci_dbg(xhci, "Bad vdev->real_port.\n");
950 goto out;
951 }
952
953 tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
954 list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
955
956 if (tt_info->slot_id == slot_id) {
957
958 for (i = 1; i < HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
959 vdev = xhci->devs[i];
960 if (vdev && (vdev->tt_info == tt_info))
961 xhci_free_virt_devices_depth_first(
962 xhci, i);
963 }
964 }
965 }
966 out:
967
968 xhci_debugfs_remove_slot(xhci, slot_id);
969 xhci_free_virt_device(xhci, slot_id);
970 }
971
972 int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
973 struct usb_device *udev, gfp_t flags)
974 {
975 struct xhci_virt_device *dev;
976 int i;
977
978
979 if (slot_id == 0 || xhci->devs[slot_id]) {
980 xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
981 return 0;
982 }
983
984 dev = kzalloc(sizeof(*dev), flags);
985 if (!dev)
986 return 0;
987
988
989 dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
990 if (!dev->out_ctx)
991 goto fail;
992
993 xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
994 (unsigned long long)dev->out_ctx->dma);
995
996
997 dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
998 if (!dev->in_ctx)
999 goto fail;
1000
1001 xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
1002 (unsigned long long)dev->in_ctx->dma);
1003
1004
1005 for (i = 0; i < 31; i++) {
1006 xhci_init_endpoint_timer(xhci, &dev->eps[i]);
1007 INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
1008 INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
1009 }
1010
1011
1012 dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, 0, flags);
1013 if (!dev->eps[0].ring)
1014 goto fail;
1015
1016 dev->udev = udev;
1017
1018
1019 xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma);
1020 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
1021 slot_id,
1022 &xhci->dcbaa->dev_context_ptrs[slot_id],
1023 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
1024
1025 trace_xhci_alloc_virt_device(dev);
1026
1027 xhci->devs[slot_id] = dev;
1028
1029 return 1;
1030 fail:
1031
1032 if (dev->in_ctx)
1033 xhci_free_container_ctx(xhci, dev->in_ctx);
1034 if (dev->out_ctx)
1035 xhci_free_container_ctx(xhci, dev->out_ctx);
1036 kfree(dev);
1037
1038 return 0;
1039 }
1040
1041 void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
1042 struct usb_device *udev)
1043 {
1044 struct xhci_virt_device *virt_dev;
1045 struct xhci_ep_ctx *ep0_ctx;
1046 struct xhci_ring *ep_ring;
1047
1048 virt_dev = xhci->devs[udev->slot_id];
1049 ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0);
1050 ep_ring = virt_dev->eps[0].ring;
1051
1052
1053
1054
1055
1056
1057
1058 ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg,
1059 ep_ring->enqueue)
1060 | ep_ring->cycle_state);
1061 }
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073 static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
1074 struct usb_device *udev)
1075 {
1076 struct usb_device *top_dev;
1077 struct usb_hcd *hcd;
1078
1079 if (udev->speed >= USB_SPEED_SUPER)
1080 hcd = xhci->shared_hcd;
1081 else
1082 hcd = xhci->main_hcd;
1083
1084 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
1085 top_dev = top_dev->parent)
1086 ;
1087
1088 return xhci_find_raw_port_number(hcd, top_dev->portnum);
1089 }
1090
1091
1092 int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
1093 {
1094 struct xhci_virt_device *dev;
1095 struct xhci_ep_ctx *ep0_ctx;
1096 struct xhci_slot_ctx *slot_ctx;
1097 u32 port_num;
1098 u32 max_packets;
1099 struct usb_device *top_dev;
1100
1101 dev = xhci->devs[udev->slot_id];
1102
1103 if (udev->slot_id == 0 || !dev) {
1104 xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
1105 udev->slot_id);
1106 return -EINVAL;
1107 }
1108 ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
1109 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
1110
1111
1112 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
1113 switch (udev->speed) {
1114 case USB_SPEED_SUPER_PLUS:
1115 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SSP);
1116 max_packets = MAX_PACKET(512);
1117 break;
1118 case USB_SPEED_SUPER:
1119 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
1120 max_packets = MAX_PACKET(512);
1121 break;
1122 case USB_SPEED_HIGH:
1123 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
1124 max_packets = MAX_PACKET(64);
1125 break;
1126
1127 case USB_SPEED_FULL:
1128 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
1129 max_packets = MAX_PACKET(64);
1130 break;
1131 case USB_SPEED_LOW:
1132 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
1133 max_packets = MAX_PACKET(8);
1134 break;
1135 case USB_SPEED_WIRELESS:
1136 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
1137 return -EINVAL;
1138 break;
1139 default:
1140
1141 return -EINVAL;
1142 }
1143
1144 port_num = xhci_find_real_port_number(xhci, udev);
1145 if (!port_num)
1146 return -EINVAL;
1147 slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num));
1148
1149 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
1150 top_dev = top_dev->parent)
1151 ;
1152 dev->fake_port = top_dev->portnum;
1153 dev->real_port = port_num;
1154 xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num);
1155 xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port);
1156
1157
1158
1159
1160
1161
1162
1163 if (!udev->tt || !udev->tt->hub->parent) {
1164 dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table;
1165 } else {
1166 struct xhci_root_port_bw_info *rh_bw;
1167 struct xhci_tt_bw_info *tt_bw;
1168
1169 rh_bw = &xhci->rh_bw[port_num - 1];
1170
1171 list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) {
1172 if (tt_bw->slot_id != udev->tt->hub->slot_id)
1173 continue;
1174
1175 if (!dev->udev->tt->multi ||
1176 (udev->tt->multi &&
1177 tt_bw->ttport == dev->udev->ttport)) {
1178 dev->bw_table = &tt_bw->bw_table;
1179 dev->tt_info = tt_bw;
1180 break;
1181 }
1182 }
1183 if (!dev->tt_info)
1184 xhci_warn(xhci, "WARN: Didn't find a matching TT\n");
1185 }
1186
1187
1188 if (udev->tt && udev->tt->hub->parent) {
1189 slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id |
1190 (udev->ttport << 8));
1191 if (udev->tt->multi)
1192 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
1193 }
1194 xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
1195 xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
1196
1197
1198
1199 ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
1200
1201
1202 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) |
1203 max_packets);
1204
1205 ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma |
1206 dev->eps[0].ring->cycle_state);
1207
1208 trace_xhci_setup_addressable_virt_device(dev);
1209
1210
1211
1212 return 0;
1213 }
1214
1215
1216
1217
1218
1219
1220 static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
1221 struct usb_host_endpoint *ep)
1222 {
1223 unsigned int interval;
1224
1225 interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
1226 if (interval != ep->desc.bInterval - 1)
1227 dev_warn(&udev->dev,
1228 "ep %#x - rounding interval to %d %sframes\n",
1229 ep->desc.bEndpointAddress,
1230 1 << interval,
1231 udev->speed == USB_SPEED_FULL ? "" : "micro");
1232
1233 if (udev->speed == USB_SPEED_FULL) {
1234
1235
1236
1237
1238
1239 interval += 3;
1240 }
1241
1242 return interval;
1243 }
1244
1245
1246
1247
1248
1249 static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
1250 struct usb_host_endpoint *ep, unsigned int desc_interval,
1251 unsigned int min_exponent, unsigned int max_exponent)
1252 {
1253 unsigned int interval;
1254
1255 interval = fls(desc_interval) - 1;
1256 interval = clamp_val(interval, min_exponent, max_exponent);
1257 if ((1 << interval) != desc_interval)
1258 dev_dbg(&udev->dev,
1259 "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
1260 ep->desc.bEndpointAddress,
1261 1 << interval,
1262 desc_interval);
1263
1264 return interval;
1265 }
1266
1267 static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
1268 struct usb_host_endpoint *ep)
1269 {
1270 if (ep->desc.bInterval == 0)
1271 return 0;
1272 return xhci_microframes_to_exponent(udev, ep,
1273 ep->desc.bInterval, 0, 15);
1274 }
1275
1276
1277 static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
1278 struct usb_host_endpoint *ep)
1279 {
1280 return xhci_microframes_to_exponent(udev, ep,
1281 ep->desc.bInterval * 8, 3, 10);
1282 }
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292 static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
1293 struct usb_host_endpoint *ep)
1294 {
1295 unsigned int interval = 0;
1296
1297 switch (udev->speed) {
1298 case USB_SPEED_HIGH:
1299
1300 if (usb_endpoint_xfer_control(&ep->desc) ||
1301 usb_endpoint_xfer_bulk(&ep->desc)) {
1302 interval = xhci_parse_microframe_interval(udev, ep);
1303 break;
1304 }
1305
1306
1307 case USB_SPEED_SUPER_PLUS:
1308 case USB_SPEED_SUPER:
1309 if (usb_endpoint_xfer_int(&ep->desc) ||
1310 usb_endpoint_xfer_isoc(&ep->desc)) {
1311 interval = xhci_parse_exponent_interval(udev, ep);
1312 }
1313 break;
1314
1315 case USB_SPEED_FULL:
1316 if (usb_endpoint_xfer_isoc(&ep->desc)) {
1317 interval = xhci_parse_exponent_interval(udev, ep);
1318 break;
1319 }
1320
1321
1322
1323
1324
1325
1326
1327 case USB_SPEED_LOW:
1328 if (usb_endpoint_xfer_int(&ep->desc) ||
1329 usb_endpoint_xfer_isoc(&ep->desc)) {
1330
1331 interval = xhci_parse_frame_interval(udev, ep);
1332 }
1333 break;
1334
1335 default:
1336 BUG();
1337 }
1338 return interval;
1339 }
1340
1341
1342
1343
1344
1345
1346 static u32 xhci_get_endpoint_mult(struct usb_device *udev,
1347 struct usb_host_endpoint *ep)
1348 {
1349 if (udev->speed < USB_SPEED_SUPER ||
1350 !usb_endpoint_xfer_isoc(&ep->desc))
1351 return 0;
1352 return ep->ss_ep_comp.bmAttributes;
1353 }
1354
1355 static u32 xhci_get_endpoint_max_burst(struct usb_device *udev,
1356 struct usb_host_endpoint *ep)
1357 {
1358
1359 if (udev->speed >= USB_SPEED_SUPER)
1360 return ep->ss_ep_comp.bMaxBurst;
1361
1362 if (udev->speed == USB_SPEED_HIGH &&
1363 (usb_endpoint_xfer_isoc(&ep->desc) ||
1364 usb_endpoint_xfer_int(&ep->desc)))
1365 return usb_endpoint_maxp_mult(&ep->desc) - 1;
1366
1367 return 0;
1368 }
1369
1370 static u32 xhci_get_endpoint_type(struct usb_host_endpoint *ep)
1371 {
1372 int in;
1373
1374 in = usb_endpoint_dir_in(&ep->desc);
1375
1376 switch (usb_endpoint_type(&ep->desc)) {
1377 case USB_ENDPOINT_XFER_CONTROL:
1378 return CTRL_EP;
1379 case USB_ENDPOINT_XFER_BULK:
1380 return in ? BULK_IN_EP : BULK_OUT_EP;
1381 case USB_ENDPOINT_XFER_ISOC:
1382 return in ? ISOC_IN_EP : ISOC_OUT_EP;
1383 case USB_ENDPOINT_XFER_INT:
1384 return in ? INT_IN_EP : INT_OUT_EP;
1385 }
1386 return 0;
1387 }
1388
1389
1390
1391
1392
1393 static u32 xhci_get_max_esit_payload(struct usb_device *udev,
1394 struct usb_host_endpoint *ep)
1395 {
1396 int max_burst;
1397 int max_packet;
1398
1399
1400 if (usb_endpoint_xfer_control(&ep->desc) ||
1401 usb_endpoint_xfer_bulk(&ep->desc))
1402 return 0;
1403
1404
1405 if ((udev->speed >= USB_SPEED_SUPER_PLUS) &&
1406 USB_SS_SSP_ISOC_COMP(ep->ss_ep_comp.bmAttributes))
1407 return le32_to_cpu(ep->ssp_isoc_ep_comp.dwBytesPerInterval);
1408
1409 else if (udev->speed >= USB_SPEED_SUPER)
1410 return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
1411
1412 max_packet = usb_endpoint_maxp(&ep->desc);
1413 max_burst = usb_endpoint_maxp_mult(&ep->desc);
1414
1415 return max_packet * max_burst;
1416 }
1417
1418
1419
1420
1421 int xhci_endpoint_init(struct xhci_hcd *xhci,
1422 struct xhci_virt_device *virt_dev,
1423 struct usb_device *udev,
1424 struct usb_host_endpoint *ep,
1425 gfp_t mem_flags)
1426 {
1427 unsigned int ep_index;
1428 struct xhci_ep_ctx *ep_ctx;
1429 struct xhci_ring *ep_ring;
1430 unsigned int max_packet;
1431 enum xhci_ring_type ring_type;
1432 u32 max_esit_payload;
1433 u32 endpoint_type;
1434 unsigned int max_burst;
1435 unsigned int interval;
1436 unsigned int mult;
1437 unsigned int avg_trb_len;
1438 unsigned int err_count = 0;
1439
1440 ep_index = xhci_get_endpoint_index(&ep->desc);
1441 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1442
1443 endpoint_type = xhci_get_endpoint_type(ep);
1444 if (!endpoint_type)
1445 return -EINVAL;
1446
1447 ring_type = usb_endpoint_type(&ep->desc);
1448
1449
1450
1451
1452
1453
1454
1455 max_esit_payload = xhci_get_max_esit_payload(udev, ep);
1456 interval = xhci_get_endpoint_interval(udev, ep);
1457
1458
1459 if (usb_endpoint_xfer_int(&ep->desc) ||
1460 usb_endpoint_xfer_isoc(&ep->desc)) {
1461 if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_7) &&
1462 udev->speed >= USB_SPEED_HIGH &&
1463 interval >= 7) {
1464 interval = 6;
1465 }
1466 }
1467
1468 mult = xhci_get_endpoint_mult(udev, ep);
1469 max_packet = usb_endpoint_maxp(&ep->desc);
1470 max_burst = xhci_get_endpoint_max_burst(udev, ep);
1471 avg_trb_len = max_esit_payload;
1472
1473
1474
1475
1476 if (!usb_endpoint_xfer_isoc(&ep->desc))
1477 err_count = 3;
1478
1479 if (usb_endpoint_xfer_bulk(&ep->desc)) {
1480 if (udev->speed == USB_SPEED_HIGH)
1481 max_packet = 512;
1482 if (udev->speed == USB_SPEED_FULL) {
1483 max_packet = rounddown_pow_of_two(max_packet);
1484 max_packet = clamp_val(max_packet, 8, 64);
1485 }
1486 }
1487
1488 if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
1489 avg_trb_len = 8;
1490
1491 if ((xhci->hci_version > 0x100) && HCC2_LEC(xhci->hcc_params2))
1492 mult = 0;
1493
1494
1495 virt_dev->eps[ep_index].new_ring =
1496 xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
1497 if (!virt_dev->eps[ep_index].new_ring)
1498 return -ENOMEM;
1499
1500 virt_dev->eps[ep_index].skip = false;
1501 ep_ring = virt_dev->eps[ep_index].new_ring;
1502
1503
1504 ep_ctx->ep_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_HI(max_esit_payload) |
1505 EP_INTERVAL(interval) |
1506 EP_MULT(mult));
1507 ep_ctx->ep_info2 = cpu_to_le32(EP_TYPE(endpoint_type) |
1508 MAX_PACKET(max_packet) |
1509 MAX_BURST(max_burst) |
1510 ERROR_COUNT(err_count));
1511 ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma |
1512 ep_ring->cycle_state);
1513
1514 ep_ctx->tx_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_LO(max_esit_payload) |
1515 EP_AVG_TRB_LENGTH(avg_trb_len));
1516
1517 return 0;
1518 }
1519
1520 void xhci_endpoint_zero(struct xhci_hcd *xhci,
1521 struct xhci_virt_device *virt_dev,
1522 struct usb_host_endpoint *ep)
1523 {
1524 unsigned int ep_index;
1525 struct xhci_ep_ctx *ep_ctx;
1526
1527 ep_index = xhci_get_endpoint_index(&ep->desc);
1528 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1529
1530 ep_ctx->ep_info = 0;
1531 ep_ctx->ep_info2 = 0;
1532 ep_ctx->deq = 0;
1533 ep_ctx->tx_info = 0;
1534
1535
1536
1537 }
1538
1539 void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info)
1540 {
1541 bw_info->ep_interval = 0;
1542 bw_info->mult = 0;
1543 bw_info->num_packets = 0;
1544 bw_info->max_packet_size = 0;
1545 bw_info->type = 0;
1546 bw_info->max_esit_payload = 0;
1547 }
1548
1549 void xhci_update_bw_info(struct xhci_hcd *xhci,
1550 struct xhci_container_ctx *in_ctx,
1551 struct xhci_input_control_ctx *ctrl_ctx,
1552 struct xhci_virt_device *virt_dev)
1553 {
1554 struct xhci_bw_info *bw_info;
1555 struct xhci_ep_ctx *ep_ctx;
1556 unsigned int ep_type;
1557 int i;
1558
1559 for (i = 1; i < 31; i++) {
1560 bw_info = &virt_dev->eps[i].bw_info;
1561
1562
1563
1564
1565
1566
1567 if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) {
1568
1569 xhci_clear_endpoint_bw_info(bw_info);
1570 continue;
1571 }
1572
1573 if (EP_IS_ADDED(ctrl_ctx, i)) {
1574 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i);
1575 ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
1576
1577
1578 if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
1579 ep_type != ISOC_IN_EP &&
1580 ep_type != INT_IN_EP)
1581 continue;
1582
1583
1584 bw_info->ep_interval = CTX_TO_EP_INTERVAL(
1585 le32_to_cpu(ep_ctx->ep_info));
1586
1587
1588
1589
1590 bw_info->mult = CTX_TO_EP_MULT(
1591 le32_to_cpu(ep_ctx->ep_info)) + 1;
1592 bw_info->num_packets = CTX_TO_MAX_BURST(
1593 le32_to_cpu(ep_ctx->ep_info2)) + 1;
1594 bw_info->max_packet_size = MAX_PACKET_DECODED(
1595 le32_to_cpu(ep_ctx->ep_info2));
1596 bw_info->type = ep_type;
1597 bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD(
1598 le32_to_cpu(ep_ctx->tx_info));
1599 }
1600 }
1601 }
1602
1603
1604
1605
1606
1607 void xhci_endpoint_copy(struct xhci_hcd *xhci,
1608 struct xhci_container_ctx *in_ctx,
1609 struct xhci_container_ctx *out_ctx,
1610 unsigned int ep_index)
1611 {
1612 struct xhci_ep_ctx *out_ep_ctx;
1613 struct xhci_ep_ctx *in_ep_ctx;
1614
1615 out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1616 in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1617
1618 in_ep_ctx->ep_info = out_ep_ctx->ep_info;
1619 in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
1620 in_ep_ctx->deq = out_ep_ctx->deq;
1621 in_ep_ctx->tx_info = out_ep_ctx->tx_info;
1622 if (xhci->quirks & XHCI_MTK_HOST) {
1623 in_ep_ctx->reserved[0] = out_ep_ctx->reserved[0];
1624 in_ep_ctx->reserved[1] = out_ep_ctx->reserved[1];
1625 }
1626 }
1627
1628
1629
1630
1631
1632
1633 void xhci_slot_copy(struct xhci_hcd *xhci,
1634 struct xhci_container_ctx *in_ctx,
1635 struct xhci_container_ctx *out_ctx)
1636 {
1637 struct xhci_slot_ctx *in_slot_ctx;
1638 struct xhci_slot_ctx *out_slot_ctx;
1639
1640 in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1641 out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx);
1642
1643 in_slot_ctx->dev_info = out_slot_ctx->dev_info;
1644 in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
1645 in_slot_ctx->tt_info = out_slot_ctx->tt_info;
1646 in_slot_ctx->dev_state = out_slot_ctx->dev_state;
1647 }
1648
1649
1650 static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
1651 {
1652 int i;
1653 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1654 int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1655
1656 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1657 "Allocating %d scratchpad buffers", num_sp);
1658
1659 if (!num_sp)
1660 return 0;
1661
1662 xhci->scratchpad = kzalloc_node(sizeof(*xhci->scratchpad), flags,
1663 dev_to_node(dev));
1664 if (!xhci->scratchpad)
1665 goto fail_sp;
1666
1667 xhci->scratchpad->sp_array = dma_alloc_coherent(dev,
1668 num_sp * sizeof(u64),
1669 &xhci->scratchpad->sp_dma, flags);
1670 if (!xhci->scratchpad->sp_array)
1671 goto fail_sp2;
1672
1673 xhci->scratchpad->sp_buffers = kcalloc_node(num_sp, sizeof(void *),
1674 flags, dev_to_node(dev));
1675 if (!xhci->scratchpad->sp_buffers)
1676 goto fail_sp3;
1677
1678 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
1679 for (i = 0; i < num_sp; i++) {
1680 dma_addr_t dma;
1681 void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
1682 flags);
1683 if (!buf)
1684 goto fail_sp4;
1685
1686 xhci->scratchpad->sp_array[i] = dma;
1687 xhci->scratchpad->sp_buffers[i] = buf;
1688 }
1689
1690 return 0;
1691
1692 fail_sp4:
1693 for (i = i - 1; i >= 0; i--) {
1694 dma_free_coherent(dev, xhci->page_size,
1695 xhci->scratchpad->sp_buffers[i],
1696 xhci->scratchpad->sp_array[i]);
1697 }
1698
1699 kfree(xhci->scratchpad->sp_buffers);
1700
1701 fail_sp3:
1702 dma_free_coherent(dev, num_sp * sizeof(u64),
1703 xhci->scratchpad->sp_array,
1704 xhci->scratchpad->sp_dma);
1705
1706 fail_sp2:
1707 kfree(xhci->scratchpad);
1708 xhci->scratchpad = NULL;
1709
1710 fail_sp:
1711 return -ENOMEM;
1712 }
1713
1714 static void scratchpad_free(struct xhci_hcd *xhci)
1715 {
1716 int num_sp;
1717 int i;
1718 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1719
1720 if (!xhci->scratchpad)
1721 return;
1722
1723 num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1724
1725 for (i = 0; i < num_sp; i++) {
1726 dma_free_coherent(dev, xhci->page_size,
1727 xhci->scratchpad->sp_buffers[i],
1728 xhci->scratchpad->sp_array[i]);
1729 }
1730 kfree(xhci->scratchpad->sp_buffers);
1731 dma_free_coherent(dev, num_sp * sizeof(u64),
1732 xhci->scratchpad->sp_array,
1733 xhci->scratchpad->sp_dma);
1734 kfree(xhci->scratchpad);
1735 xhci->scratchpad = NULL;
1736 }
1737
1738 struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
1739 bool allocate_completion, gfp_t mem_flags)
1740 {
1741 struct xhci_command *command;
1742 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1743
1744 command = kzalloc_node(sizeof(*command), mem_flags, dev_to_node(dev));
1745 if (!command)
1746 return NULL;
1747
1748 if (allocate_completion) {
1749 command->completion =
1750 kzalloc_node(sizeof(struct completion), mem_flags,
1751 dev_to_node(dev));
1752 if (!command->completion) {
1753 kfree(command);
1754 return NULL;
1755 }
1756 init_completion(command->completion);
1757 }
1758
1759 command->status = 0;
1760 INIT_LIST_HEAD(&command->cmd_list);
1761 return command;
1762 }
1763
1764 struct xhci_command *xhci_alloc_command_with_ctx(struct xhci_hcd *xhci,
1765 bool allocate_completion, gfp_t mem_flags)
1766 {
1767 struct xhci_command *command;
1768
1769 command = xhci_alloc_command(xhci, allocate_completion, mem_flags);
1770 if (!command)
1771 return NULL;
1772
1773 command->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
1774 mem_flags);
1775 if (!command->in_ctx) {
1776 kfree(command->completion);
1777 kfree(command);
1778 return NULL;
1779 }
1780 return command;
1781 }
1782
1783 void xhci_urb_free_priv(struct urb_priv *urb_priv)
1784 {
1785 kfree(urb_priv);
1786 }
1787
1788 void xhci_free_command(struct xhci_hcd *xhci,
1789 struct xhci_command *command)
1790 {
1791 xhci_free_container_ctx(xhci,
1792 command->in_ctx);
1793 kfree(command->completion);
1794 kfree(command);
1795 }
1796
1797 int xhci_alloc_erst(struct xhci_hcd *xhci,
1798 struct xhci_ring *evt_ring,
1799 struct xhci_erst *erst,
1800 gfp_t flags)
1801 {
1802 size_t size;
1803 unsigned int val;
1804 struct xhci_segment *seg;
1805 struct xhci_erst_entry *entry;
1806
1807 size = sizeof(struct xhci_erst_entry) * evt_ring->num_segs;
1808 erst->entries = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev,
1809 size, &erst->erst_dma_addr, flags);
1810 if (!erst->entries)
1811 return -ENOMEM;
1812
1813 erst->num_entries = evt_ring->num_segs;
1814
1815 seg = evt_ring->first_seg;
1816 for (val = 0; val < evt_ring->num_segs; val++) {
1817 entry = &erst->entries[val];
1818 entry->seg_addr = cpu_to_le64(seg->dma);
1819 entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
1820 entry->rsvd = 0;
1821 seg = seg->next;
1822 }
1823
1824 return 0;
1825 }
1826
1827 void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
1828 {
1829 size_t size;
1830 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1831
1832 size = sizeof(struct xhci_erst_entry) * (erst->num_entries);
1833 if (erst->entries)
1834 dma_free_coherent(dev, size,
1835 erst->entries,
1836 erst->erst_dma_addr);
1837 erst->entries = NULL;
1838 }
1839
1840 void xhci_mem_cleanup(struct xhci_hcd *xhci)
1841 {
1842 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1843 int i, j, num_ports;
1844
1845 cancel_delayed_work_sync(&xhci->cmd_timer);
1846
1847 xhci_free_erst(xhci, &xhci->erst);
1848
1849 if (xhci->event_ring)
1850 xhci_ring_free(xhci, xhci->event_ring);
1851 xhci->event_ring = NULL;
1852 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring");
1853
1854 if (xhci->lpm_command)
1855 xhci_free_command(xhci, xhci->lpm_command);
1856 xhci->lpm_command = NULL;
1857 if (xhci->cmd_ring)
1858 xhci_ring_free(xhci, xhci->cmd_ring);
1859 xhci->cmd_ring = NULL;
1860 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring");
1861 xhci_cleanup_command_queue(xhci);
1862
1863 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1864 for (i = 0; i < num_ports && xhci->rh_bw; i++) {
1865 struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
1866 for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
1867 struct list_head *ep = &bwt->interval_bw[j].endpoints;
1868 while (!list_empty(ep))
1869 list_del_init(ep->next);
1870 }
1871 }
1872
1873 for (i = HCS_MAX_SLOTS(xhci->hcs_params1); i > 0; i--)
1874 xhci_free_virt_devices_depth_first(xhci, i);
1875
1876 dma_pool_destroy(xhci->segment_pool);
1877 xhci->segment_pool = NULL;
1878 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed segment pool");
1879
1880 dma_pool_destroy(xhci->device_pool);
1881 xhci->device_pool = NULL;
1882 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed device context pool");
1883
1884 dma_pool_destroy(xhci->small_streams_pool);
1885 xhci->small_streams_pool = NULL;
1886 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1887 "Freed small stream array pool");
1888
1889 dma_pool_destroy(xhci->medium_streams_pool);
1890 xhci->medium_streams_pool = NULL;
1891 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1892 "Freed medium stream array pool");
1893
1894 if (xhci->dcbaa)
1895 dma_free_coherent(dev, sizeof(*xhci->dcbaa),
1896 xhci->dcbaa, xhci->dcbaa->dma);
1897 xhci->dcbaa = NULL;
1898
1899 scratchpad_free(xhci);
1900
1901 if (!xhci->rh_bw)
1902 goto no_bw;
1903
1904 for (i = 0; i < num_ports; i++) {
1905 struct xhci_tt_bw_info *tt, *n;
1906 list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
1907 list_del(&tt->tt_list);
1908 kfree(tt);
1909 }
1910 }
1911
1912 no_bw:
1913 xhci->cmd_ring_reserved_trbs = 0;
1914 xhci->usb2_rhub.num_ports = 0;
1915 xhci->usb3_rhub.num_ports = 0;
1916 xhci->num_active_eps = 0;
1917 kfree(xhci->usb2_rhub.ports);
1918 kfree(xhci->usb3_rhub.ports);
1919 kfree(xhci->hw_ports);
1920 kfree(xhci->rh_bw);
1921 kfree(xhci->ext_caps);
1922 for (i = 0; i < xhci->num_port_caps; i++)
1923 kfree(xhci->port_caps[i].psi);
1924 kfree(xhci->port_caps);
1925 xhci->num_port_caps = 0;
1926
1927 xhci->usb2_rhub.ports = NULL;
1928 xhci->usb3_rhub.ports = NULL;
1929 xhci->hw_ports = NULL;
1930 xhci->rh_bw = NULL;
1931 xhci->ext_caps = NULL;
1932
1933 xhci->page_size = 0;
1934 xhci->page_shift = 0;
1935 xhci->usb2_rhub.bus_state.bus_suspended = 0;
1936 xhci->usb3_rhub.bus_state.bus_suspended = 0;
1937 }
1938
1939 static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
1940 struct xhci_segment *input_seg,
1941 union xhci_trb *start_trb,
1942 union xhci_trb *end_trb,
1943 dma_addr_t input_dma,
1944 struct xhci_segment *result_seg,
1945 char *test_name, int test_number)
1946 {
1947 unsigned long long start_dma;
1948 unsigned long long end_dma;
1949 struct xhci_segment *seg;
1950
1951 start_dma = xhci_trb_virt_to_dma(input_seg, start_trb);
1952 end_dma = xhci_trb_virt_to_dma(input_seg, end_trb);
1953
1954 seg = trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma, false);
1955 if (seg != result_seg) {
1956 xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
1957 test_name, test_number);
1958 xhci_warn(xhci, "Tested TRB math w/ seg %p and "
1959 "input DMA 0x%llx\n",
1960 input_seg,
1961 (unsigned long long) input_dma);
1962 xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
1963 "ending TRB %p (0x%llx DMA)\n",
1964 start_trb, start_dma,
1965 end_trb, end_dma);
1966 xhci_warn(xhci, "Expected seg %p, got seg %p\n",
1967 result_seg, seg);
1968 trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma,
1969 true);
1970 return -1;
1971 }
1972 return 0;
1973 }
1974
1975
1976 static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci)
1977 {
1978 struct {
1979 dma_addr_t input_dma;
1980 struct xhci_segment *result_seg;
1981 } simple_test_vector [] = {
1982
1983 { 0, NULL },
1984
1985 { xhci->event_ring->first_seg->dma - 16, NULL },
1986
1987 { xhci->event_ring->first_seg->dma - 1, NULL },
1988
1989 { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg },
1990
1991 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
1992 xhci->event_ring->first_seg },
1993
1994 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
1995
1996 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL },
1997
1998 { (dma_addr_t) (~0), NULL },
1999 };
2000 struct {
2001 struct xhci_segment *input_seg;
2002 union xhci_trb *start_trb;
2003 union xhci_trb *end_trb;
2004 dma_addr_t input_dma;
2005 struct xhci_segment *result_seg;
2006 } complex_test_vector [] = {
2007
2008 { .input_seg = xhci->event_ring->first_seg,
2009 .start_trb = xhci->event_ring->first_seg->trbs,
2010 .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
2011 .input_dma = xhci->cmd_ring->first_seg->dma,
2012 .result_seg = NULL,
2013 },
2014
2015 { .input_seg = xhci->event_ring->first_seg,
2016 .start_trb = xhci->event_ring->first_seg->trbs,
2017 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
2018 .input_dma = xhci->cmd_ring->first_seg->dma,
2019 .result_seg = NULL,
2020 },
2021
2022 { .input_seg = xhci->event_ring->first_seg,
2023 .start_trb = xhci->cmd_ring->first_seg->trbs,
2024 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
2025 .input_dma = xhci->cmd_ring->first_seg->dma,
2026 .result_seg = NULL,
2027 },
2028
2029 { .input_seg = xhci->event_ring->first_seg,
2030 .start_trb = &xhci->event_ring->first_seg->trbs[0],
2031 .end_trb = &xhci->event_ring->first_seg->trbs[3],
2032 .input_dma = xhci->event_ring->first_seg->dma + 4*16,
2033 .result_seg = NULL,
2034 },
2035
2036 { .input_seg = xhci->event_ring->first_seg,
2037 .start_trb = &xhci->event_ring->first_seg->trbs[3],
2038 .end_trb = &xhci->event_ring->first_seg->trbs[6],
2039 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
2040 .result_seg = NULL,
2041 },
2042
2043 { .input_seg = xhci->event_ring->first_seg,
2044 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
2045 .end_trb = &xhci->event_ring->first_seg->trbs[1],
2046 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
2047 .result_seg = NULL,
2048 },
2049
2050 { .input_seg = xhci->event_ring->first_seg,
2051 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
2052 .end_trb = &xhci->event_ring->first_seg->trbs[1],
2053 .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16,
2054 .result_seg = NULL,
2055 },
2056
2057 { .input_seg = xhci->event_ring->first_seg,
2058 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
2059 .end_trb = &xhci->event_ring->first_seg->trbs[1],
2060 .input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
2061 .result_seg = NULL,
2062 },
2063 };
2064
2065 unsigned int num_tests;
2066 int i, ret;
2067
2068 num_tests = ARRAY_SIZE(simple_test_vector);
2069 for (i = 0; i < num_tests; i++) {
2070 ret = xhci_test_trb_in_td(xhci,
2071 xhci->event_ring->first_seg,
2072 xhci->event_ring->first_seg->trbs,
2073 &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
2074 simple_test_vector[i].input_dma,
2075 simple_test_vector[i].result_seg,
2076 "Simple", i);
2077 if (ret < 0)
2078 return ret;
2079 }
2080
2081 num_tests = ARRAY_SIZE(complex_test_vector);
2082 for (i = 0; i < num_tests; i++) {
2083 ret = xhci_test_trb_in_td(xhci,
2084 complex_test_vector[i].input_seg,
2085 complex_test_vector[i].start_trb,
2086 complex_test_vector[i].end_trb,
2087 complex_test_vector[i].input_dma,
2088 complex_test_vector[i].result_seg,
2089 "Complex", i);
2090 if (ret < 0)
2091 return ret;
2092 }
2093 xhci_dbg(xhci, "TRB math tests passed.\n");
2094 return 0;
2095 }
2096
2097 static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
2098 {
2099 u64 temp;
2100 dma_addr_t deq;
2101
2102 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2103 xhci->event_ring->dequeue);
2104 if (deq == 0 && !in_interrupt())
2105 xhci_warn(xhci, "WARN something wrong with SW event ring "
2106 "dequeue ptr.\n");
2107
2108 temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2109 temp &= ERST_PTR_MASK;
2110
2111
2112
2113 temp &= ~ERST_EHB;
2114 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2115 "// Write event ring dequeue pointer, "
2116 "preserving EHB bit");
2117 xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
2118 &xhci->ir_set->erst_dequeue);
2119 }
2120
2121 static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
2122 __le32 __iomem *addr, int max_caps)
2123 {
2124 u32 temp, port_offset, port_count;
2125 int i;
2126 u8 major_revision, minor_revision;
2127 struct xhci_hub *rhub;
2128 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
2129 struct xhci_port_cap *port_cap;
2130
2131 temp = readl(addr);
2132 major_revision = XHCI_EXT_PORT_MAJOR(temp);
2133 minor_revision = XHCI_EXT_PORT_MINOR(temp);
2134
2135 if (major_revision == 0x03) {
2136 rhub = &xhci->usb3_rhub;
2137 } else if (major_revision <= 0x02) {
2138 rhub = &xhci->usb2_rhub;
2139 } else {
2140 xhci_warn(xhci, "Ignoring unknown port speed, "
2141 "Ext Cap %p, revision = 0x%x\n",
2142 addr, major_revision);
2143
2144 return;
2145 }
2146 rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp);
2147
2148 if (rhub->min_rev < minor_revision)
2149 rhub->min_rev = minor_revision;
2150
2151
2152 temp = readl(addr + 2);
2153 port_offset = XHCI_EXT_PORT_OFF(temp);
2154 port_count = XHCI_EXT_PORT_COUNT(temp);
2155 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2156 "Ext Cap %p, port offset = %u, "
2157 "count = %u, revision = 0x%x",
2158 addr, port_offset, port_count, major_revision);
2159
2160 if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
2161
2162 return;
2163
2164 port_cap = &xhci->port_caps[xhci->num_port_caps++];
2165 if (xhci->num_port_caps > max_caps)
2166 return;
2167
2168 port_cap->maj_rev = major_revision;
2169 port_cap->min_rev = minor_revision;
2170 port_cap->psi_count = XHCI_EXT_PORT_PSIC(temp);
2171
2172 if (port_cap->psi_count) {
2173 port_cap->psi = kcalloc_node(port_cap->psi_count,
2174 sizeof(*port_cap->psi),
2175 GFP_KERNEL, dev_to_node(dev));
2176 if (!port_cap->psi)
2177 port_cap->psi_count = 0;
2178
2179 port_cap->psi_uid_count++;
2180 for (i = 0; i < port_cap->psi_count; i++) {
2181 port_cap->psi[i] = readl(addr + 4 + i);
2182
2183
2184
2185
2186 if (i && (XHCI_EXT_PORT_PSIV(port_cap->psi[i]) !=
2187 XHCI_EXT_PORT_PSIV(port_cap->psi[i - 1])))
2188 port_cap->psi_uid_count++;
2189
2190 xhci_dbg(xhci, "PSIV:%d PSIE:%d PLT:%d PFD:%d LP:%d PSIM:%d\n",
2191 XHCI_EXT_PORT_PSIV(port_cap->psi[i]),
2192 XHCI_EXT_PORT_PSIE(port_cap->psi[i]),
2193 XHCI_EXT_PORT_PLT(port_cap->psi[i]),
2194 XHCI_EXT_PORT_PFD(port_cap->psi[i]),
2195 XHCI_EXT_PORT_LP(port_cap->psi[i]),
2196 XHCI_EXT_PORT_PSIM(port_cap->psi[i]));
2197 }
2198 }
2199
2200 if (major_revision < 0x03 && xhci->num_ext_caps < max_caps)
2201 xhci->ext_caps[xhci->num_ext_caps++] = temp;
2202
2203 if ((xhci->hci_version >= 0x100) && (major_revision != 0x03) &&
2204 (temp & XHCI_HLC)) {
2205 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2206 "xHCI 1.0: support USB2 hardware lpm");
2207 xhci->hw_lpm_support = 1;
2208 }
2209
2210 port_offset--;
2211 for (i = port_offset; i < (port_offset + port_count); i++) {
2212 struct xhci_port *hw_port = &xhci->hw_ports[i];
2213
2214 if (hw_port->rhub) {
2215 xhci_warn(xhci, "Duplicate port entry, Ext Cap %p,"
2216 " port %u\n", addr, i);
2217 xhci_warn(xhci, "Port was marked as USB %u, "
2218 "duplicated as USB %u\n",
2219 hw_port->rhub->maj_rev, major_revision);
2220
2221
2222
2223 if (hw_port->rhub != rhub &&
2224 hw_port->hcd_portnum != DUPLICATE_ENTRY) {
2225 hw_port->rhub->num_ports--;
2226 hw_port->hcd_portnum = DUPLICATE_ENTRY;
2227 }
2228 continue;
2229 }
2230 hw_port->rhub = rhub;
2231 hw_port->port_cap = port_cap;
2232 rhub->num_ports++;
2233 }
2234
2235 }
2236
2237 static void xhci_create_rhub_port_array(struct xhci_hcd *xhci,
2238 struct xhci_hub *rhub, gfp_t flags)
2239 {
2240 int port_index = 0;
2241 int i;
2242 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
2243
2244 if (!rhub->num_ports)
2245 return;
2246 rhub->ports = kcalloc_node(rhub->num_ports, sizeof(rhub->ports), flags,
2247 dev_to_node(dev));
2248 for (i = 0; i < HCS_MAX_PORTS(xhci->hcs_params1); i++) {
2249 if (xhci->hw_ports[i].rhub != rhub ||
2250 xhci->hw_ports[i].hcd_portnum == DUPLICATE_ENTRY)
2251 continue;
2252 xhci->hw_ports[i].hcd_portnum = port_index;
2253 rhub->ports[port_index] = &xhci->hw_ports[i];
2254 port_index++;
2255 if (port_index == rhub->num_ports)
2256 break;
2257 }
2258 }
2259
2260
2261
2262
2263
2264
2265
2266
2267 static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
2268 {
2269 void __iomem *base;
2270 u32 offset;
2271 unsigned int num_ports;
2272 int i, j;
2273 int cap_count = 0;
2274 u32 cap_start;
2275 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
2276
2277 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
2278 xhci->hw_ports = kcalloc_node(num_ports, sizeof(*xhci->hw_ports),
2279 flags, dev_to_node(dev));
2280 if (!xhci->hw_ports)
2281 return -ENOMEM;
2282
2283 for (i = 0; i < num_ports; i++) {
2284 xhci->hw_ports[i].addr = &xhci->op_regs->port_status_base +
2285 NUM_PORT_REGS * i;
2286 xhci->hw_ports[i].hw_portnum = i;
2287 }
2288
2289 xhci->rh_bw = kcalloc_node(num_ports, sizeof(*xhci->rh_bw), flags,
2290 dev_to_node(dev));
2291 if (!xhci->rh_bw)
2292 return -ENOMEM;
2293 for (i = 0; i < num_ports; i++) {
2294 struct xhci_interval_bw_table *bw_table;
2295
2296 INIT_LIST_HEAD(&xhci->rh_bw[i].tts);
2297 bw_table = &xhci->rh_bw[i].bw_table;
2298 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
2299 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
2300 }
2301 base = &xhci->cap_regs->hc_capbase;
2302
2303 cap_start = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_PROTOCOL);
2304 if (!cap_start) {
2305 xhci_err(xhci, "No Extended Capability registers, unable to set up roothub\n");
2306 return -ENODEV;
2307 }
2308
2309 offset = cap_start;
2310
2311 while (offset) {
2312 cap_count++;
2313 offset = xhci_find_next_ext_cap(base, offset,
2314 XHCI_EXT_CAPS_PROTOCOL);
2315 }
2316
2317 xhci->ext_caps = kcalloc_node(cap_count, sizeof(*xhci->ext_caps),
2318 flags, dev_to_node(dev));
2319 if (!xhci->ext_caps)
2320 return -ENOMEM;
2321
2322 xhci->port_caps = kcalloc_node(cap_count, sizeof(*xhci->port_caps),
2323 flags, dev_to_node(dev));
2324 if (!xhci->port_caps)
2325 return -ENOMEM;
2326
2327 offset = cap_start;
2328
2329 while (offset) {
2330 xhci_add_in_port(xhci, num_ports, base + offset, cap_count);
2331 if (xhci->usb2_rhub.num_ports + xhci->usb3_rhub.num_ports ==
2332 num_ports)
2333 break;
2334 offset = xhci_find_next_ext_cap(base, offset,
2335 XHCI_EXT_CAPS_PROTOCOL);
2336 }
2337 if (xhci->usb2_rhub.num_ports == 0 && xhci->usb3_rhub.num_ports == 0) {
2338 xhci_warn(xhci, "No ports on the roothubs?\n");
2339 return -ENODEV;
2340 }
2341 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2342 "Found %u USB 2.0 ports and %u USB 3.0 ports.",
2343 xhci->usb2_rhub.num_ports, xhci->usb3_rhub.num_ports);
2344
2345
2346
2347
2348 if (xhci->usb3_rhub.num_ports > USB_SS_MAXPORTS) {
2349 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2350 "Limiting USB 3.0 roothub ports to %u.",
2351 USB_SS_MAXPORTS);
2352 xhci->usb3_rhub.num_ports = USB_SS_MAXPORTS;
2353 }
2354 if (xhci->usb2_rhub.num_ports > USB_MAXCHILDREN) {
2355 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2356 "Limiting USB 2.0 roothub ports to %u.",
2357 USB_MAXCHILDREN);
2358 xhci->usb2_rhub.num_ports = USB_MAXCHILDREN;
2359 }
2360
2361
2362
2363
2364
2365
2366 xhci_create_rhub_port_array(xhci, &xhci->usb2_rhub, flags);
2367 xhci_create_rhub_port_array(xhci, &xhci->usb3_rhub, flags);
2368
2369 return 0;
2370 }
2371
2372 int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2373 {
2374 dma_addr_t dma;
2375 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
2376 unsigned int val, val2;
2377 u64 val_64;
2378 u32 page_size, temp;
2379 int i, ret;
2380
2381 INIT_LIST_HEAD(&xhci->cmd_list);
2382
2383
2384 INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout);
2385 init_completion(&xhci->cmd_ring_stop_completion);
2386
2387 page_size = readl(&xhci->op_regs->page_size);
2388 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2389 "Supported page size register = 0x%x", page_size);
2390 for (i = 0; i < 16; i++) {
2391 if ((0x1 & page_size) != 0)
2392 break;
2393 page_size = page_size >> 1;
2394 }
2395 if (i < 16)
2396 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2397 "Supported page size of %iK", (1 << (i+12)) / 1024);
2398 else
2399 xhci_warn(xhci, "WARN: no supported page size\n");
2400
2401 xhci->page_shift = 12;
2402 xhci->page_size = 1 << xhci->page_shift;
2403 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2404 "HCD page size set to %iK", xhci->page_size / 1024);
2405
2406
2407
2408
2409
2410 val = HCS_MAX_SLOTS(readl(&xhci->cap_regs->hcs_params1));
2411 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2412 "// xHC can handle at most %d device slots.", val);
2413 val2 = readl(&xhci->op_regs->config_reg);
2414 val |= (val2 & ~HCS_SLOTS_MASK);
2415 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2416 "// Setting Max device slots reg = 0x%x.", val);
2417 writel(val, &xhci->op_regs->config_reg);
2418
2419
2420
2421
2422
2423 xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
2424 flags);
2425 if (!xhci->dcbaa)
2426 goto fail;
2427 xhci->dcbaa->dma = dma;
2428 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2429 "// Device context base array address = 0x%llx (DMA), %p (virt)",
2430 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
2431 xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
2432
2433
2434
2435
2436
2437
2438
2439
2440 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
2441 TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size);
2442
2443
2444 xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
2445 2112, 64, xhci->page_size);
2446 if (!xhci->segment_pool || !xhci->device_pool)
2447 goto fail;
2448
2449
2450
2451
2452 xhci->small_streams_pool =
2453 dma_pool_create("xHCI 256 byte stream ctx arrays",
2454 dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
2455 xhci->medium_streams_pool =
2456 dma_pool_create("xHCI 1KB stream ctx arrays",
2457 dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
2458
2459
2460
2461
2462 if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
2463 goto fail;
2464
2465
2466 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, 0, flags);
2467 if (!xhci->cmd_ring)
2468 goto fail;
2469 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2470 "Allocated command ring at %p", xhci->cmd_ring);
2471 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%llx",
2472 (unsigned long long)xhci->cmd_ring->first_seg->dma);
2473
2474
2475 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
2476 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
2477 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
2478 xhci->cmd_ring->cycle_state;
2479 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2480 "// Setting command ring address to 0x%016llx", val_64);
2481 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
2482
2483 xhci->lpm_command = xhci_alloc_command_with_ctx(xhci, true, flags);
2484 if (!xhci->lpm_command)
2485 goto fail;
2486
2487
2488
2489
2490
2491 xhci->cmd_ring_reserved_trbs++;
2492
2493 val = readl(&xhci->cap_regs->db_off);
2494 val &= DBOFF_MASK;
2495 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2496 "// Doorbell array is located at offset 0x%x"
2497 " from cap regs base addr", val);
2498 xhci->dba = (void __iomem *) xhci->cap_regs + val;
2499
2500 xhci->ir_set = &xhci->run_regs->ir_set[0];
2501
2502
2503
2504
2505
2506 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
2507 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
2508 0, flags);
2509 if (!xhci->event_ring)
2510 goto fail;
2511 if (xhci_check_trb_in_td_math(xhci) < 0)
2512 goto fail;
2513
2514 ret = xhci_alloc_erst(xhci, xhci->event_ring, &xhci->erst, flags);
2515 if (ret)
2516 goto fail;
2517
2518
2519 val = readl(&xhci->ir_set->erst_size);
2520 val &= ERST_SIZE_MASK;
2521 val |= ERST_NUM_SEGS;
2522 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2523 "// Write ERST size = %i to ir_set 0 (some bits preserved)",
2524 val);
2525 writel(val, &xhci->ir_set->erst_size);
2526
2527 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2528 "// Set ERST entries to point to event ring.");
2529
2530 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2531 "// Set ERST base address for ir_set 0 = 0x%llx",
2532 (unsigned long long)xhci->erst.erst_dma_addr);
2533 val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
2534 val_64 &= ERST_PTR_MASK;
2535 val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
2536 xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
2537
2538
2539 xhci_set_hc_event_deq(xhci);
2540 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2541 "Wrote ERST address to ir_set 0.");
2542
2543
2544
2545
2546
2547
2548 for (i = 0; i < MAX_HC_SLOTS; i++)
2549 xhci->devs[i] = NULL;
2550 for (i = 0; i < USB_MAXCHILDREN; i++) {
2551 xhci->usb2_rhub.bus_state.resume_done[i] = 0;
2552 xhci->usb3_rhub.bus_state.resume_done[i] = 0;
2553
2554 init_completion(&xhci->usb2_rhub.bus_state.rexit_done[i]);
2555 init_completion(&xhci->usb3_rhub.bus_state.u3exit_done[i]);
2556 }
2557
2558 if (scratchpad_alloc(xhci, flags))
2559 goto fail;
2560 if (xhci_setup_port_arrays(xhci, flags))
2561 goto fail;
2562
2563
2564
2565
2566
2567 temp = readl(&xhci->op_regs->dev_notification);
2568 temp &= ~DEV_NOTE_MASK;
2569 temp |= DEV_NOTE_FWAKE;
2570 writel(temp, &xhci->op_regs->dev_notification);
2571
2572 return 0;
2573
2574 fail:
2575 xhci_halt(xhci);
2576 xhci_reset(xhci);
2577 xhci_mem_cleanup(xhci);
2578 return -ENOMEM;
2579 }