This source file includes following definitions.
- ring_interrupt_index
- ring_interrupt_active
- nhi_disable_interrupts
- ring_desc_base
- ring_options_base
- ring_iowrite_cons
- ring_iowrite_prod
- ring_iowrite32desc
- ring_iowrite64desc
- ring_iowrite32options
- ring_full
- ring_empty
- ring_write_descriptors
- ring_work
- __tb_ring_enqueue
- tb_ring_poll
- __ring_interrupt_mask
- __ring_interrupt
- tb_ring_poll_complete
- ring_msix
- ring_request_msix
- ring_release_msix
- nhi_alloc_hop
- tb_ring_alloc
- tb_ring_alloc_tx
- tb_ring_alloc_rx
- tb_ring_start
- tb_ring_stop
- tb_ring_free
- nhi_mailbox_cmd
- nhi_mailbox_mode
- nhi_interrupt_work
- nhi_msi
- __nhi_suspend_noirq
- nhi_suspend_noirq
- nhi_wake_supported
- nhi_poweroff_noirq
- nhi_enable_int_throttling
- nhi_resume_noirq
- nhi_suspend
- nhi_complete
- nhi_runtime_suspend
- nhi_runtime_resume
- nhi_shutdown
- nhi_init_msi
- nhi_imr_valid
- nhi_probe
- nhi_remove
- nhi_init
- nhi_unload
1
2
3
4
5
6
7
8
9
10
11
12 #include <linux/pm_runtime.h>
13 #include <linux/slab.h>
14 #include <linux/errno.h>
15 #include <linux/pci.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/delay.h>
19 #include <linux/property.h>
20
21 #include "nhi.h"
22 #include "nhi_regs.h"
23 #include "tb.h"
24
25 #define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
26
27
28
29
30
31 #define RING_E2E_UNUSED_HOPID 2
32 #define RING_FIRST_USABLE_HOPID TB_PATH_MIN_HOPID
33
34
35
36
37
38 #define MSIX_MIN_VECS 6
39 #define MSIX_MAX_VECS 16
40
41 #define NHI_MAILBOX_TIMEOUT 500
42
43 static int ring_interrupt_index(struct tb_ring *ring)
44 {
45 int bit = ring->hop;
46 if (!ring->is_tx)
47 bit += ring->nhi->hop_count;
48 return bit;
49 }
50
51
52
53
54
55
56 static void ring_interrupt_active(struct tb_ring *ring, bool active)
57 {
58 int reg = REG_RING_INTERRUPT_BASE +
59 ring_interrupt_index(ring) / 32 * 4;
60 int bit = ring_interrupt_index(ring) & 31;
61 int mask = 1 << bit;
62 u32 old, new;
63
64 if (ring->irq > 0) {
65 u32 step, shift, ivr, misc;
66 void __iomem *ivr_base;
67 int index;
68
69 if (ring->is_tx)
70 index = ring->hop;
71 else
72 index = ring->hop + ring->nhi->hop_count;
73
74
75
76
77
78 misc = ioread32(ring->nhi->iobase + REG_DMA_MISC);
79 if (!(misc & REG_DMA_MISC_INT_AUTO_CLEAR)) {
80 misc |= REG_DMA_MISC_INT_AUTO_CLEAR;
81 iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC);
82 }
83
84 ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE;
85 step = index / REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
86 shift = index % REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
87 ivr = ioread32(ivr_base + step);
88 ivr &= ~(REG_INT_VEC_ALLOC_MASK << shift);
89 if (active)
90 ivr |= ring->vector << shift;
91 iowrite32(ivr, ivr_base + step);
92 }
93
94 old = ioread32(ring->nhi->iobase + reg);
95 if (active)
96 new = old | mask;
97 else
98 new = old & ~mask;
99
100 dev_dbg(&ring->nhi->pdev->dev,
101 "%s interrupt at register %#x bit %d (%#x -> %#x)\n",
102 active ? "enabling" : "disabling", reg, bit, old, new);
103
104 if (new == old)
105 dev_WARN(&ring->nhi->pdev->dev,
106 "interrupt for %s %d is already %s\n",
107 RING_TYPE(ring), ring->hop,
108 active ? "enabled" : "disabled");
109 iowrite32(new, ring->nhi->iobase + reg);
110 }
111
112
113
114
115
116
117 static void nhi_disable_interrupts(struct tb_nhi *nhi)
118 {
119 int i = 0;
120
121 for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++)
122 iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i);
123
124
125 for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++)
126 ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i);
127 }
128
129
130
131 static void __iomem *ring_desc_base(struct tb_ring *ring)
132 {
133 void __iomem *io = ring->nhi->iobase;
134 io += ring->is_tx ? REG_TX_RING_BASE : REG_RX_RING_BASE;
135 io += ring->hop * 16;
136 return io;
137 }
138
139 static void __iomem *ring_options_base(struct tb_ring *ring)
140 {
141 void __iomem *io = ring->nhi->iobase;
142 io += ring->is_tx ? REG_TX_OPTIONS_BASE : REG_RX_OPTIONS_BASE;
143 io += ring->hop * 32;
144 return io;
145 }
146
147 static void ring_iowrite_cons(struct tb_ring *ring, u16 cons)
148 {
149
150
151
152
153
154 iowrite32(cons, ring_desc_base(ring) + 8);
155 }
156
157 static void ring_iowrite_prod(struct tb_ring *ring, u16 prod)
158 {
159
160 iowrite32(prod << 16, ring_desc_base(ring) + 8);
161 }
162
163 static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset)
164 {
165 iowrite32(value, ring_desc_base(ring) + offset);
166 }
167
168 static void ring_iowrite64desc(struct tb_ring *ring, u64 value, u32 offset)
169 {
170 iowrite32(value, ring_desc_base(ring) + offset);
171 iowrite32(value >> 32, ring_desc_base(ring) + offset + 4);
172 }
173
174 static void ring_iowrite32options(struct tb_ring *ring, u32 value, u32 offset)
175 {
176 iowrite32(value, ring_options_base(ring) + offset);
177 }
178
179 static bool ring_full(struct tb_ring *ring)
180 {
181 return ((ring->head + 1) % ring->size) == ring->tail;
182 }
183
184 static bool ring_empty(struct tb_ring *ring)
185 {
186 return ring->head == ring->tail;
187 }
188
189
190
191
192
193
194 static void ring_write_descriptors(struct tb_ring *ring)
195 {
196 struct ring_frame *frame, *n;
197 struct ring_desc *descriptor;
198 list_for_each_entry_safe(frame, n, &ring->queue, list) {
199 if (ring_full(ring))
200 break;
201 list_move_tail(&frame->list, &ring->in_flight);
202 descriptor = &ring->descriptors[ring->head];
203 descriptor->phys = frame->buffer_phy;
204 descriptor->time = 0;
205 descriptor->flags = RING_DESC_POSTED | RING_DESC_INTERRUPT;
206 if (ring->is_tx) {
207 descriptor->length = frame->size;
208 descriptor->eof = frame->eof;
209 descriptor->sof = frame->sof;
210 }
211 ring->head = (ring->head + 1) % ring->size;
212 if (ring->is_tx)
213 ring_iowrite_prod(ring, ring->head);
214 else
215 ring_iowrite_cons(ring, ring->head);
216 }
217 }
218
219
220
221
222
223
224
225
226
227
228 static void ring_work(struct work_struct *work)
229 {
230 struct tb_ring *ring = container_of(work, typeof(*ring), work);
231 struct ring_frame *frame;
232 bool canceled = false;
233 unsigned long flags;
234 LIST_HEAD(done);
235
236 spin_lock_irqsave(&ring->lock, flags);
237
238 if (!ring->running) {
239
240 list_splice_tail_init(&ring->in_flight, &done);
241 list_splice_tail_init(&ring->queue, &done);
242 canceled = true;
243 goto invoke_callback;
244 }
245
246 while (!ring_empty(ring)) {
247 if (!(ring->descriptors[ring->tail].flags
248 & RING_DESC_COMPLETED))
249 break;
250 frame = list_first_entry(&ring->in_flight, typeof(*frame),
251 list);
252 list_move_tail(&frame->list, &done);
253 if (!ring->is_tx) {
254 frame->size = ring->descriptors[ring->tail].length;
255 frame->eof = ring->descriptors[ring->tail].eof;
256 frame->sof = ring->descriptors[ring->tail].sof;
257 frame->flags = ring->descriptors[ring->tail].flags;
258 }
259 ring->tail = (ring->tail + 1) % ring->size;
260 }
261 ring_write_descriptors(ring);
262
263 invoke_callback:
264
265 spin_unlock_irqrestore(&ring->lock, flags);
266 while (!list_empty(&done)) {
267 frame = list_first_entry(&done, typeof(*frame), list);
268
269
270
271
272 list_del_init(&frame->list);
273 if (frame->callback)
274 frame->callback(ring, frame, canceled);
275 }
276 }
277
278 int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame)
279 {
280 unsigned long flags;
281 int ret = 0;
282
283 spin_lock_irqsave(&ring->lock, flags);
284 if (ring->running) {
285 list_add_tail(&frame->list, &ring->queue);
286 ring_write_descriptors(ring);
287 } else {
288 ret = -ESHUTDOWN;
289 }
290 spin_unlock_irqrestore(&ring->lock, flags);
291 return ret;
292 }
293 EXPORT_SYMBOL_GPL(__tb_ring_enqueue);
294
295
296
297
298
299
300
301
302
303
304 struct ring_frame *tb_ring_poll(struct tb_ring *ring)
305 {
306 struct ring_frame *frame = NULL;
307 unsigned long flags;
308
309 spin_lock_irqsave(&ring->lock, flags);
310 if (!ring->running)
311 goto unlock;
312 if (ring_empty(ring))
313 goto unlock;
314
315 if (ring->descriptors[ring->tail].flags & RING_DESC_COMPLETED) {
316 frame = list_first_entry(&ring->in_flight, typeof(*frame),
317 list);
318 list_del_init(&frame->list);
319
320 if (!ring->is_tx) {
321 frame->size = ring->descriptors[ring->tail].length;
322 frame->eof = ring->descriptors[ring->tail].eof;
323 frame->sof = ring->descriptors[ring->tail].sof;
324 frame->flags = ring->descriptors[ring->tail].flags;
325 }
326
327 ring->tail = (ring->tail + 1) % ring->size;
328 }
329
330 unlock:
331 spin_unlock_irqrestore(&ring->lock, flags);
332 return frame;
333 }
334 EXPORT_SYMBOL_GPL(tb_ring_poll);
335
336 static void __ring_interrupt_mask(struct tb_ring *ring, bool mask)
337 {
338 int idx = ring_interrupt_index(ring);
339 int reg = REG_RING_INTERRUPT_BASE + idx / 32 * 4;
340 int bit = idx % 32;
341 u32 val;
342
343 val = ioread32(ring->nhi->iobase + reg);
344 if (mask)
345 val &= ~BIT(bit);
346 else
347 val |= BIT(bit);
348 iowrite32(val, ring->nhi->iobase + reg);
349 }
350
351
352 static void __ring_interrupt(struct tb_ring *ring)
353 {
354 if (!ring->running)
355 return;
356
357 if (ring->start_poll) {
358 __ring_interrupt_mask(ring, true);
359 ring->start_poll(ring->poll_data);
360 } else {
361 schedule_work(&ring->work);
362 }
363 }
364
365
366
367
368
369
370
371
372 void tb_ring_poll_complete(struct tb_ring *ring)
373 {
374 unsigned long flags;
375
376 spin_lock_irqsave(&ring->nhi->lock, flags);
377 spin_lock(&ring->lock);
378 if (ring->start_poll)
379 __ring_interrupt_mask(ring, false);
380 spin_unlock(&ring->lock);
381 spin_unlock_irqrestore(&ring->nhi->lock, flags);
382 }
383 EXPORT_SYMBOL_GPL(tb_ring_poll_complete);
384
385 static irqreturn_t ring_msix(int irq, void *data)
386 {
387 struct tb_ring *ring = data;
388
389 spin_lock(&ring->nhi->lock);
390 spin_lock(&ring->lock);
391 __ring_interrupt(ring);
392 spin_unlock(&ring->lock);
393 spin_unlock(&ring->nhi->lock);
394
395 return IRQ_HANDLED;
396 }
397
398 static int ring_request_msix(struct tb_ring *ring, bool no_suspend)
399 {
400 struct tb_nhi *nhi = ring->nhi;
401 unsigned long irqflags;
402 int ret;
403
404 if (!nhi->pdev->msix_enabled)
405 return 0;
406
407 ret = ida_simple_get(&nhi->msix_ida, 0, MSIX_MAX_VECS, GFP_KERNEL);
408 if (ret < 0)
409 return ret;
410
411 ring->vector = ret;
412
413 ring->irq = pci_irq_vector(ring->nhi->pdev, ring->vector);
414 if (ring->irq < 0)
415 return ring->irq;
416
417 irqflags = no_suspend ? IRQF_NO_SUSPEND : 0;
418 return request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring);
419 }
420
421 static void ring_release_msix(struct tb_ring *ring)
422 {
423 if (ring->irq <= 0)
424 return;
425
426 free_irq(ring->irq, ring);
427 ida_simple_remove(&ring->nhi->msix_ida, ring->vector);
428 ring->vector = 0;
429 ring->irq = 0;
430 }
431
432 static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
433 {
434 int ret = 0;
435
436 spin_lock_irq(&nhi->lock);
437
438 if (ring->hop < 0) {
439 unsigned int i;
440
441
442
443
444
445 for (i = RING_FIRST_USABLE_HOPID; i < nhi->hop_count; i++) {
446 if (ring->is_tx) {
447 if (!nhi->tx_rings[i]) {
448 ring->hop = i;
449 break;
450 }
451 } else {
452 if (!nhi->rx_rings[i]) {
453 ring->hop = i;
454 break;
455 }
456 }
457 }
458 }
459
460 if (ring->hop < 0 || ring->hop >= nhi->hop_count) {
461 dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop);
462 ret = -EINVAL;
463 goto err_unlock;
464 }
465 if (ring->is_tx && nhi->tx_rings[ring->hop]) {
466 dev_warn(&nhi->pdev->dev, "TX hop %d already allocated\n",
467 ring->hop);
468 ret = -EBUSY;
469 goto err_unlock;
470 } else if (!ring->is_tx && nhi->rx_rings[ring->hop]) {
471 dev_warn(&nhi->pdev->dev, "RX hop %d already allocated\n",
472 ring->hop);
473 ret = -EBUSY;
474 goto err_unlock;
475 }
476
477 if (ring->is_tx)
478 nhi->tx_rings[ring->hop] = ring;
479 else
480 nhi->rx_rings[ring->hop] = ring;
481
482 err_unlock:
483 spin_unlock_irq(&nhi->lock);
484
485 return ret;
486 }
487
488 static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
489 bool transmit, unsigned int flags,
490 u16 sof_mask, u16 eof_mask,
491 void (*start_poll)(void *),
492 void *poll_data)
493 {
494 struct tb_ring *ring = NULL;
495
496 dev_dbg(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
497 transmit ? "TX" : "RX", hop, size);
498
499
500 if (transmit && hop == RING_E2E_UNUSED_HOPID)
501 return NULL;
502
503 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
504 if (!ring)
505 return NULL;
506
507 spin_lock_init(&ring->lock);
508 INIT_LIST_HEAD(&ring->queue);
509 INIT_LIST_HEAD(&ring->in_flight);
510 INIT_WORK(&ring->work, ring_work);
511
512 ring->nhi = nhi;
513 ring->hop = hop;
514 ring->is_tx = transmit;
515 ring->size = size;
516 ring->flags = flags;
517 ring->sof_mask = sof_mask;
518 ring->eof_mask = eof_mask;
519 ring->head = 0;
520 ring->tail = 0;
521 ring->running = false;
522 ring->start_poll = start_poll;
523 ring->poll_data = poll_data;
524
525 ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev,
526 size * sizeof(*ring->descriptors),
527 &ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO);
528 if (!ring->descriptors)
529 goto err_free_ring;
530
531 if (ring_request_msix(ring, flags & RING_FLAG_NO_SUSPEND))
532 goto err_free_descs;
533
534 if (nhi_alloc_hop(nhi, ring))
535 goto err_release_msix;
536
537 return ring;
538
539 err_release_msix:
540 ring_release_msix(ring);
541 err_free_descs:
542 dma_free_coherent(&ring->nhi->pdev->dev,
543 ring->size * sizeof(*ring->descriptors),
544 ring->descriptors, ring->descriptors_dma);
545 err_free_ring:
546 kfree(ring);
547
548 return NULL;
549 }
550
551
552
553
554
555
556
557
558 struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
559 unsigned int flags)
560 {
561 return tb_ring_alloc(nhi, hop, size, true, flags, 0, 0, NULL, NULL);
562 }
563 EXPORT_SYMBOL_GPL(tb_ring_alloc_tx);
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578 struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
579 unsigned int flags, u16 sof_mask, u16 eof_mask,
580 void (*start_poll)(void *), void *poll_data)
581 {
582 return tb_ring_alloc(nhi, hop, size, false, flags, sof_mask, eof_mask,
583 start_poll, poll_data);
584 }
585 EXPORT_SYMBOL_GPL(tb_ring_alloc_rx);
586
587
588
589
590
591
592 void tb_ring_start(struct tb_ring *ring)
593 {
594 u16 frame_size;
595 u32 flags;
596
597 spin_lock_irq(&ring->nhi->lock);
598 spin_lock(&ring->lock);
599 if (ring->nhi->going_away)
600 goto err;
601 if (ring->running) {
602 dev_WARN(&ring->nhi->pdev->dev, "ring already started\n");
603 goto err;
604 }
605 dev_dbg(&ring->nhi->pdev->dev, "starting %s %d\n",
606 RING_TYPE(ring), ring->hop);
607
608 if (ring->flags & RING_FLAG_FRAME) {
609
610 frame_size = 0;
611 flags = RING_FLAG_ENABLE;
612 } else {
613 frame_size = TB_FRAME_SIZE;
614 flags = RING_FLAG_ENABLE | RING_FLAG_RAW;
615 }
616
617 if (ring->flags & RING_FLAG_E2E && !ring->is_tx) {
618 u32 hop;
619
620
621
622
623
624
625 hop = RING_E2E_UNUSED_HOPID << REG_RX_OPTIONS_E2E_HOP_SHIFT;
626 hop &= REG_RX_OPTIONS_E2E_HOP_MASK;
627 flags |= hop | RING_FLAG_E2E_FLOW_CONTROL;
628 }
629
630 ring_iowrite64desc(ring, ring->descriptors_dma, 0);
631 if (ring->is_tx) {
632 ring_iowrite32desc(ring, ring->size, 12);
633 ring_iowrite32options(ring, 0, 4);
634 ring_iowrite32options(ring, flags, 0);
635 } else {
636 u32 sof_eof_mask = ring->sof_mask << 16 | ring->eof_mask;
637
638 ring_iowrite32desc(ring, (frame_size << 16) | ring->size, 12);
639 ring_iowrite32options(ring, sof_eof_mask, 4);
640 ring_iowrite32options(ring, flags, 0);
641 }
642 ring_interrupt_active(ring, true);
643 ring->running = true;
644 err:
645 spin_unlock(&ring->lock);
646 spin_unlock_irq(&ring->nhi->lock);
647 }
648 EXPORT_SYMBOL_GPL(tb_ring_start);
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663 void tb_ring_stop(struct tb_ring *ring)
664 {
665 spin_lock_irq(&ring->nhi->lock);
666 spin_lock(&ring->lock);
667 dev_dbg(&ring->nhi->pdev->dev, "stopping %s %d\n",
668 RING_TYPE(ring), ring->hop);
669 if (ring->nhi->going_away)
670 goto err;
671 if (!ring->running) {
672 dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n",
673 RING_TYPE(ring), ring->hop);
674 goto err;
675 }
676 ring_interrupt_active(ring, false);
677
678 ring_iowrite32options(ring, 0, 0);
679 ring_iowrite64desc(ring, 0, 0);
680 ring_iowrite32desc(ring, 0, 8);
681 ring_iowrite32desc(ring, 0, 12);
682 ring->head = 0;
683 ring->tail = 0;
684 ring->running = false;
685
686 err:
687 spin_unlock(&ring->lock);
688 spin_unlock_irq(&ring->nhi->lock);
689
690
691
692
693 schedule_work(&ring->work);
694 flush_work(&ring->work);
695 }
696 EXPORT_SYMBOL_GPL(tb_ring_stop);
697
698
699
700
701
702
703
704
705
706
707
708 void tb_ring_free(struct tb_ring *ring)
709 {
710 spin_lock_irq(&ring->nhi->lock);
711
712
713
714
715 if (ring->is_tx)
716 ring->nhi->tx_rings[ring->hop] = NULL;
717 else
718 ring->nhi->rx_rings[ring->hop] = NULL;
719
720 if (ring->running) {
721 dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n",
722 RING_TYPE(ring), ring->hop);
723 }
724 spin_unlock_irq(&ring->nhi->lock);
725
726 ring_release_msix(ring);
727
728 dma_free_coherent(&ring->nhi->pdev->dev,
729 ring->size * sizeof(*ring->descriptors),
730 ring->descriptors, ring->descriptors_dma);
731
732 ring->descriptors = NULL;
733 ring->descriptors_dma = 0;
734
735
736 dev_dbg(&ring->nhi->pdev->dev, "freeing %s %d\n", RING_TYPE(ring),
737 ring->hop);
738
739
740
741
742
743
744 flush_work(&ring->work);
745 kfree(ring);
746 }
747 EXPORT_SYMBOL_GPL(tb_ring_free);
748
749
750
751
752
753
754
755
756
757
758 int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data)
759 {
760 ktime_t timeout;
761 u32 val;
762
763 iowrite32(data, nhi->iobase + REG_INMAIL_DATA);
764
765 val = ioread32(nhi->iobase + REG_INMAIL_CMD);
766 val &= ~(REG_INMAIL_CMD_MASK | REG_INMAIL_ERROR);
767 val |= REG_INMAIL_OP_REQUEST | cmd;
768 iowrite32(val, nhi->iobase + REG_INMAIL_CMD);
769
770 timeout = ktime_add_ms(ktime_get(), NHI_MAILBOX_TIMEOUT);
771 do {
772 val = ioread32(nhi->iobase + REG_INMAIL_CMD);
773 if (!(val & REG_INMAIL_OP_REQUEST))
774 break;
775 usleep_range(10, 20);
776 } while (ktime_before(ktime_get(), timeout));
777
778 if (val & REG_INMAIL_OP_REQUEST)
779 return -ETIMEDOUT;
780 if (val & REG_INMAIL_ERROR)
781 return -EIO;
782
783 return 0;
784 }
785
786
787
788
789
790
791
792
793 enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi)
794 {
795 u32 val;
796
797 val = ioread32(nhi->iobase + REG_OUTMAIL_CMD);
798 val &= REG_OUTMAIL_CMD_OPMODE_MASK;
799 val >>= REG_OUTMAIL_CMD_OPMODE_SHIFT;
800
801 return (enum nhi_fw_mode)val;
802 }
803
804 static void nhi_interrupt_work(struct work_struct *work)
805 {
806 struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work);
807 int value = 0;
808 int bit;
809 int hop = -1;
810 int type = 0;
811 struct tb_ring *ring;
812
813 spin_lock_irq(&nhi->lock);
814
815
816
817
818
819
820 for (bit = 0; bit < 3 * nhi->hop_count; bit++) {
821 if (bit % 32 == 0)
822 value = ioread32(nhi->iobase
823 + REG_RING_NOTIFY_BASE
824 + 4 * (bit / 32));
825 if (++hop == nhi->hop_count) {
826 hop = 0;
827 type++;
828 }
829 if ((value & (1 << (bit % 32))) == 0)
830 continue;
831 if (type == 2) {
832 dev_warn(&nhi->pdev->dev,
833 "RX overflow for ring %d\n",
834 hop);
835 continue;
836 }
837 if (type == 0)
838 ring = nhi->tx_rings[hop];
839 else
840 ring = nhi->rx_rings[hop];
841 if (ring == NULL) {
842 dev_warn(&nhi->pdev->dev,
843 "got interrupt for inactive %s ring %d\n",
844 type ? "RX" : "TX",
845 hop);
846 continue;
847 }
848
849 spin_lock(&ring->lock);
850 __ring_interrupt(ring);
851 spin_unlock(&ring->lock);
852 }
853 spin_unlock_irq(&nhi->lock);
854 }
855
856 static irqreturn_t nhi_msi(int irq, void *data)
857 {
858 struct tb_nhi *nhi = data;
859 schedule_work(&nhi->interrupt_work);
860 return IRQ_HANDLED;
861 }
862
863 static int __nhi_suspend_noirq(struct device *dev, bool wakeup)
864 {
865 struct pci_dev *pdev = to_pci_dev(dev);
866 struct tb *tb = pci_get_drvdata(pdev);
867 struct tb_nhi *nhi = tb->nhi;
868 int ret;
869
870 ret = tb_domain_suspend_noirq(tb);
871 if (ret)
872 return ret;
873
874 if (nhi->ops && nhi->ops->suspend_noirq) {
875 ret = nhi->ops->suspend_noirq(tb->nhi, wakeup);
876 if (ret)
877 return ret;
878 }
879
880 return 0;
881 }
882
883 static int nhi_suspend_noirq(struct device *dev)
884 {
885 return __nhi_suspend_noirq(dev, device_may_wakeup(dev));
886 }
887
888 static bool nhi_wake_supported(struct pci_dev *pdev)
889 {
890 u8 val;
891
892
893
894
895
896 if (device_property_read_u8(&pdev->dev, "WAKE_SUPPORTED", &val))
897 return !!val;
898
899 return true;
900 }
901
902 static int nhi_poweroff_noirq(struct device *dev)
903 {
904 struct pci_dev *pdev = to_pci_dev(dev);
905 bool wakeup;
906
907 wakeup = device_may_wakeup(dev) && nhi_wake_supported(pdev);
908 return __nhi_suspend_noirq(dev, wakeup);
909 }
910
911 static void nhi_enable_int_throttling(struct tb_nhi *nhi)
912 {
913
914 u32 throttle = DIV_ROUND_UP(128 * NSEC_PER_USEC, 256);
915 unsigned int i;
916
917
918
919
920
921 for (i = 0; i < MSIX_MAX_VECS; i++) {
922 u32 reg = REG_INT_THROTTLING_RATE + i * 4;
923 iowrite32(throttle, nhi->iobase + reg);
924 }
925 }
926
927 static int nhi_resume_noirq(struct device *dev)
928 {
929 struct pci_dev *pdev = to_pci_dev(dev);
930 struct tb *tb = pci_get_drvdata(pdev);
931 struct tb_nhi *nhi = tb->nhi;
932 int ret;
933
934
935
936
937
938
939 if (!pci_device_is_present(pdev)) {
940 nhi->going_away = true;
941 } else {
942 if (nhi->ops && nhi->ops->resume_noirq) {
943 ret = nhi->ops->resume_noirq(nhi);
944 if (ret)
945 return ret;
946 }
947 nhi_enable_int_throttling(tb->nhi);
948 }
949
950 return tb_domain_resume_noirq(tb);
951 }
952
953 static int nhi_suspend(struct device *dev)
954 {
955 struct pci_dev *pdev = to_pci_dev(dev);
956 struct tb *tb = pci_get_drvdata(pdev);
957
958 return tb_domain_suspend(tb);
959 }
960
961 static void nhi_complete(struct device *dev)
962 {
963 struct pci_dev *pdev = to_pci_dev(dev);
964 struct tb *tb = pci_get_drvdata(pdev);
965
966
967
968
969
970
971 if (pm_runtime_suspended(&pdev->dev))
972 pm_runtime_resume(&pdev->dev);
973 else
974 tb_domain_complete(tb);
975 }
976
977 static int nhi_runtime_suspend(struct device *dev)
978 {
979 struct pci_dev *pdev = to_pci_dev(dev);
980 struct tb *tb = pci_get_drvdata(pdev);
981 struct tb_nhi *nhi = tb->nhi;
982 int ret;
983
984 ret = tb_domain_runtime_suspend(tb);
985 if (ret)
986 return ret;
987
988 if (nhi->ops && nhi->ops->runtime_suspend) {
989 ret = nhi->ops->runtime_suspend(tb->nhi);
990 if (ret)
991 return ret;
992 }
993 return 0;
994 }
995
996 static int nhi_runtime_resume(struct device *dev)
997 {
998 struct pci_dev *pdev = to_pci_dev(dev);
999 struct tb *tb = pci_get_drvdata(pdev);
1000 struct tb_nhi *nhi = tb->nhi;
1001 int ret;
1002
1003 if (nhi->ops && nhi->ops->runtime_resume) {
1004 ret = nhi->ops->runtime_resume(nhi);
1005 if (ret)
1006 return ret;
1007 }
1008
1009 nhi_enable_int_throttling(nhi);
1010 return tb_domain_runtime_resume(tb);
1011 }
1012
1013 static void nhi_shutdown(struct tb_nhi *nhi)
1014 {
1015 int i;
1016
1017 dev_dbg(&nhi->pdev->dev, "shutdown\n");
1018
1019 for (i = 0; i < nhi->hop_count; i++) {
1020 if (nhi->tx_rings[i])
1021 dev_WARN(&nhi->pdev->dev,
1022 "TX ring %d is still active\n", i);
1023 if (nhi->rx_rings[i])
1024 dev_WARN(&nhi->pdev->dev,
1025 "RX ring %d is still active\n", i);
1026 }
1027 nhi_disable_interrupts(nhi);
1028
1029
1030
1031
1032 if (!nhi->pdev->msix_enabled) {
1033 devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi);
1034 flush_work(&nhi->interrupt_work);
1035 }
1036 ida_destroy(&nhi->msix_ida);
1037
1038 if (nhi->ops && nhi->ops->shutdown)
1039 nhi->ops->shutdown(nhi);
1040 }
1041
1042 static int nhi_init_msi(struct tb_nhi *nhi)
1043 {
1044 struct pci_dev *pdev = nhi->pdev;
1045 int res, irq, nvec;
1046
1047
1048 nhi_disable_interrupts(nhi);
1049
1050 nhi_enable_int_throttling(nhi);
1051
1052 ida_init(&nhi->msix_ida);
1053
1054
1055
1056
1057
1058
1059
1060 nvec = pci_alloc_irq_vectors(pdev, MSIX_MIN_VECS, MSIX_MAX_VECS,
1061 PCI_IRQ_MSIX);
1062 if (nvec < 0) {
1063 nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
1064 if (nvec < 0)
1065 return nvec;
1066
1067 INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work);
1068
1069 irq = pci_irq_vector(nhi->pdev, 0);
1070 if (irq < 0)
1071 return irq;
1072
1073 res = devm_request_irq(&pdev->dev, irq, nhi_msi,
1074 IRQF_NO_SUSPEND, "thunderbolt", nhi);
1075 if (res) {
1076 dev_err(&pdev->dev, "request_irq failed, aborting\n");
1077 return res;
1078 }
1079 }
1080
1081 return 0;
1082 }
1083
1084 static bool nhi_imr_valid(struct pci_dev *pdev)
1085 {
1086 u8 val;
1087
1088 if (!device_property_read_u8(&pdev->dev, "IMR_VALID", &val))
1089 return !!val;
1090
1091 return true;
1092 }
1093
1094 static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1095 {
1096 struct tb_nhi *nhi;
1097 struct tb *tb;
1098 int res;
1099
1100 if (!nhi_imr_valid(pdev)) {
1101 dev_warn(&pdev->dev, "firmware image not valid, aborting\n");
1102 return -ENODEV;
1103 }
1104
1105 res = pcim_enable_device(pdev);
1106 if (res) {
1107 dev_err(&pdev->dev, "cannot enable PCI device, aborting\n");
1108 return res;
1109 }
1110
1111 res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt");
1112 if (res) {
1113 dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
1114 return res;
1115 }
1116
1117 nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL);
1118 if (!nhi)
1119 return -ENOMEM;
1120
1121 nhi->pdev = pdev;
1122 nhi->ops = (const struct tb_nhi_ops *)id->driver_data;
1123
1124 nhi->iobase = pcim_iomap_table(pdev)[0];
1125 nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
1126 if (nhi->hop_count != 12 && nhi->hop_count != 32)
1127 dev_warn(&pdev->dev, "unexpected hop count: %d\n",
1128 nhi->hop_count);
1129
1130 nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
1131 sizeof(*nhi->tx_rings), GFP_KERNEL);
1132 nhi->rx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
1133 sizeof(*nhi->rx_rings), GFP_KERNEL);
1134 if (!nhi->tx_rings || !nhi->rx_rings)
1135 return -ENOMEM;
1136
1137 res = nhi_init_msi(nhi);
1138 if (res) {
1139 dev_err(&pdev->dev, "cannot enable MSI, aborting\n");
1140 return res;
1141 }
1142
1143 spin_lock_init(&nhi->lock);
1144
1145 res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1146 if (res)
1147 res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1148 if (res) {
1149 dev_err(&pdev->dev, "failed to set DMA mask\n");
1150 return res;
1151 }
1152
1153 pci_set_master(pdev);
1154
1155 if (nhi->ops && nhi->ops->init) {
1156 res = nhi->ops->init(nhi);
1157 if (res)
1158 return res;
1159 }
1160
1161 tb = icm_probe(nhi);
1162 if (!tb)
1163 tb = tb_probe(nhi);
1164 if (!tb) {
1165 dev_err(&nhi->pdev->dev,
1166 "failed to determine connection manager, aborting\n");
1167 return -ENODEV;
1168 }
1169
1170 dev_dbg(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
1171
1172 res = tb_domain_add(tb);
1173 if (res) {
1174
1175
1176
1177
1178 tb_domain_put(tb);
1179 nhi_shutdown(nhi);
1180 return res;
1181 }
1182 pci_set_drvdata(pdev, tb);
1183
1184 pm_runtime_allow(&pdev->dev);
1185 pm_runtime_set_autosuspend_delay(&pdev->dev, TB_AUTOSUSPEND_DELAY);
1186 pm_runtime_use_autosuspend(&pdev->dev);
1187 pm_runtime_put_autosuspend(&pdev->dev);
1188
1189 return 0;
1190 }
1191
1192 static void nhi_remove(struct pci_dev *pdev)
1193 {
1194 struct tb *tb = pci_get_drvdata(pdev);
1195 struct tb_nhi *nhi = tb->nhi;
1196
1197 pm_runtime_get_sync(&pdev->dev);
1198 pm_runtime_dont_use_autosuspend(&pdev->dev);
1199 pm_runtime_forbid(&pdev->dev);
1200
1201 tb_domain_remove(tb);
1202 nhi_shutdown(nhi);
1203 }
1204
1205
1206
1207
1208
1209
1210 static const struct dev_pm_ops nhi_pm_ops = {
1211 .suspend_noirq = nhi_suspend_noirq,
1212 .resume_noirq = nhi_resume_noirq,
1213 .freeze_noirq = nhi_suspend_noirq,
1214
1215
1216
1217 .thaw_noirq = nhi_resume_noirq,
1218 .restore_noirq = nhi_resume_noirq,
1219 .suspend = nhi_suspend,
1220 .freeze = nhi_suspend,
1221 .poweroff_noirq = nhi_poweroff_noirq,
1222 .poweroff = nhi_suspend,
1223 .complete = nhi_complete,
1224 .runtime_suspend = nhi_runtime_suspend,
1225 .runtime_resume = nhi_runtime_resume,
1226 };
1227
1228 static struct pci_device_id nhi_ids[] = {
1229
1230
1231
1232
1233 {
1234 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
1235 .vendor = PCI_VENDOR_ID_INTEL,
1236 .device = PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
1237 .subvendor = 0x2222, .subdevice = 0x1111,
1238 },
1239 {
1240 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
1241 .vendor = PCI_VENDOR_ID_INTEL,
1242 .device = PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
1243 .subvendor = 0x2222, .subdevice = 0x1111,
1244 },
1245 {
1246 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
1247 .vendor = PCI_VENDOR_ID_INTEL,
1248 .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI,
1249 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
1250 },
1251 {
1252 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
1253 .vendor = PCI_VENDOR_ID_INTEL,
1254 .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI,
1255 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
1256 },
1257
1258
1259 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI) },
1260 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI) },
1261 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_USBONLY_NHI) },
1262 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI) },
1263 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_USBONLY_NHI) },
1264 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI) },
1265 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI) },
1266 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI) },
1267 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI) },
1268 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI) },
1269 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI0),
1270 .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1271 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI1),
1272 .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1273
1274 { 0,}
1275 };
1276
1277 MODULE_DEVICE_TABLE(pci, nhi_ids);
1278 MODULE_LICENSE("GPL");
1279
1280 static struct pci_driver nhi_driver = {
1281 .name = "thunderbolt",
1282 .id_table = nhi_ids,
1283 .probe = nhi_probe,
1284 .remove = nhi_remove,
1285 .driver.pm = &nhi_pm_ops,
1286 };
1287
1288 static int __init nhi_init(void)
1289 {
1290 int ret;
1291
1292 ret = tb_domain_init();
1293 if (ret)
1294 return ret;
1295 ret = pci_register_driver(&nhi_driver);
1296 if (ret)
1297 tb_domain_exit();
1298 return ret;
1299 }
1300
1301 static void __exit nhi_unload(void)
1302 {
1303 pci_unregister_driver(&nhi_driver);
1304 tb_domain_exit();
1305 }
1306
1307 rootfs_initcall(nhi_init);
1308 module_exit(nhi_unload);