This source file includes following definitions.
- mei_txe_reg_read
- mei_txe_reg_write
- mei_txe_sec_reg_read_silent
- mei_txe_sec_reg_read
- mei_txe_sec_reg_write_silent
- mei_txe_sec_reg_write
- mei_txe_br_reg_read
- mei_txe_br_reg_write
- mei_txe_aliveness_set
- mei_txe_aliveness_req_get
- mei_txe_aliveness_get
- mei_txe_aliveness_poll
- mei_txe_aliveness_wait
- mei_txe_aliveness_set_sync
- mei_txe_pg_in_transition
- mei_txe_pg_is_enabled
- mei_txe_pg_state
- mei_txe_input_ready_interrupt_enable
- mei_txe_input_doorbell_set
- mei_txe_output_ready_set
- mei_txe_is_input_ready
- mei_txe_intr_clear
- mei_txe_intr_disable
- mei_txe_intr_enable
- mei_txe_synchronize_irq
- mei_txe_pending_interrupts
- mei_txe_input_payload_write
- mei_txe_out_data_read
- mei_txe_readiness_set_host_rdy
- mei_txe_readiness_clear
- mei_txe_readiness_get
- mei_txe_readiness_is_sec_rdy
- mei_txe_hw_is_ready
- mei_txe_host_is_ready
- mei_txe_readiness_wait
- mei_txe_fw_status
- mei_txe_hw_config
- mei_txe_write
- mei_txe_hbuf_depth
- mei_txe_hbuf_empty_slots
- mei_txe_count_full_read_slots
- mei_txe_read_hdr
- mei_txe_read
- mei_txe_hw_reset
- mei_txe_hw_start
- mei_txe_check_and_ack_intrs
- mei_txe_irq_quick_handler
- mei_txe_irq_thread_handler
- mei_txe_dev_init
- mei_txe_setup_satt2
1
2
3
4
5
6
7 #include <linux/pci.h>
8 #include <linux/jiffies.h>
9 #include <linux/ktime.h>
10 #include <linux/delay.h>
11 #include <linux/kthread.h>
12 #include <linux/interrupt.h>
13 #include <linux/pm_runtime.h>
14
15 #include <linux/mei.h>
16
17 #include "mei_dev.h"
18 #include "hw-txe.h"
19 #include "client.h"
20 #include "hbm.h"
21
22 #include "mei-trace.h"
23
24 #define TXE_HBUF_DEPTH (PAYLOAD_SIZE / MEI_SLOT_SIZE)
25
26
27
28
29
30
31
32
33
34 static inline u32 mei_txe_reg_read(void __iomem *base_addr,
35 unsigned long offset)
36 {
37 return ioread32(base_addr + offset);
38 }
39
40
41
42
43
44
45
46
47 static inline void mei_txe_reg_write(void __iomem *base_addr,
48 unsigned long offset, u32 value)
49 {
50 iowrite32(value, base_addr + offset);
51 }
52
53
54
55
56
57
58
59
60
61
62
63 static inline u32 mei_txe_sec_reg_read_silent(struct mei_txe_hw *hw,
64 unsigned long offset)
65 {
66 return mei_txe_reg_read(hw->mem_addr[SEC_BAR], offset);
67 }
68
69
70
71
72
73
74
75
76
77
78
79 static inline u32 mei_txe_sec_reg_read(struct mei_txe_hw *hw,
80 unsigned long offset)
81 {
82 WARN(!hw->aliveness, "sec read: aliveness not asserted\n");
83 return mei_txe_sec_reg_read_silent(hw, offset);
84 }
85
86
87
88
89
90
91
92
93
94
95 static inline void mei_txe_sec_reg_write_silent(struct mei_txe_hw *hw,
96 unsigned long offset, u32 value)
97 {
98 mei_txe_reg_write(hw->mem_addr[SEC_BAR], offset, value);
99 }
100
101
102
103
104
105
106
107
108
109
110 static inline void mei_txe_sec_reg_write(struct mei_txe_hw *hw,
111 unsigned long offset, u32 value)
112 {
113 WARN(!hw->aliveness, "sec write: aliveness not asserted\n");
114 mei_txe_sec_reg_write_silent(hw, offset, value);
115 }
116
117
118
119
120
121
122
123
124 static inline u32 mei_txe_br_reg_read(struct mei_txe_hw *hw,
125 unsigned long offset)
126 {
127 return mei_txe_reg_read(hw->mem_addr[BRIDGE_BAR], offset);
128 }
129
130
131
132
133
134
135
136
137 static inline void mei_txe_br_reg_write(struct mei_txe_hw *hw,
138 unsigned long offset, u32 value)
139 {
140 mei_txe_reg_write(hw->mem_addr[BRIDGE_BAR], offset, value);
141 }
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157 static bool mei_txe_aliveness_set(struct mei_device *dev, u32 req)
158 {
159
160 struct mei_txe_hw *hw = to_txe_hw(dev);
161 bool do_req = hw->aliveness != req;
162
163 dev_dbg(dev->dev, "Aliveness current=%d request=%d\n",
164 hw->aliveness, req);
165 if (do_req) {
166 dev->pg_event = MEI_PG_EVENT_WAIT;
167 mei_txe_br_reg_write(hw, SICR_HOST_ALIVENESS_REQ_REG, req);
168 }
169 return do_req;
170 }
171
172
173
174
175
176
177
178
179
180
181
182
183 static u32 mei_txe_aliveness_req_get(struct mei_device *dev)
184 {
185 struct mei_txe_hw *hw = to_txe_hw(dev);
186 u32 reg;
187
188 reg = mei_txe_br_reg_read(hw, SICR_HOST_ALIVENESS_REQ_REG);
189 return reg & SICR_HOST_ALIVENESS_REQ_REQUESTED;
190 }
191
192
193
194
195
196
197
198
199
200 static u32 mei_txe_aliveness_get(struct mei_device *dev)
201 {
202 struct mei_txe_hw *hw = to_txe_hw(dev);
203 u32 reg;
204
205 reg = mei_txe_br_reg_read(hw, HICR_HOST_ALIVENESS_RESP_REG);
206 return reg & HICR_HOST_ALIVENESS_RESP_ACK;
207 }
208
209
210
211
212
213
214
215
216
217
218
219 static int mei_txe_aliveness_poll(struct mei_device *dev, u32 expected)
220 {
221 struct mei_txe_hw *hw = to_txe_hw(dev);
222 ktime_t stop, start;
223
224 start = ktime_get();
225 stop = ktime_add(start, ms_to_ktime(SEC_ALIVENESS_WAIT_TIMEOUT));
226 do {
227 hw->aliveness = mei_txe_aliveness_get(dev);
228 if (hw->aliveness == expected) {
229 dev->pg_event = MEI_PG_EVENT_IDLE;
230 dev_dbg(dev->dev, "aliveness settled after %lld usecs\n",
231 ktime_to_us(ktime_sub(ktime_get(), start)));
232 return 0;
233 }
234 usleep_range(20, 50);
235 } while (ktime_before(ktime_get(), stop));
236
237 dev->pg_event = MEI_PG_EVENT_IDLE;
238 dev_err(dev->dev, "aliveness timed out\n");
239 return -ETIME;
240 }
241
242
243
244
245
246
247
248
249
250
251
252 static int mei_txe_aliveness_wait(struct mei_device *dev, u32 expected)
253 {
254 struct mei_txe_hw *hw = to_txe_hw(dev);
255 const unsigned long timeout =
256 msecs_to_jiffies(SEC_ALIVENESS_WAIT_TIMEOUT);
257 long err;
258 int ret;
259
260 hw->aliveness = mei_txe_aliveness_get(dev);
261 if (hw->aliveness == expected)
262 return 0;
263
264 mutex_unlock(&dev->device_lock);
265 err = wait_event_timeout(hw->wait_aliveness_resp,
266 dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
267 mutex_lock(&dev->device_lock);
268
269 hw->aliveness = mei_txe_aliveness_get(dev);
270 ret = hw->aliveness == expected ? 0 : -ETIME;
271
272 if (ret)
273 dev_warn(dev->dev, "aliveness timed out = %ld aliveness = %d event = %d\n",
274 err, hw->aliveness, dev->pg_event);
275 else
276 dev_dbg(dev->dev, "aliveness settled after = %d msec aliveness = %d event = %d\n",
277 jiffies_to_msecs(timeout - err),
278 hw->aliveness, dev->pg_event);
279
280 dev->pg_event = MEI_PG_EVENT_IDLE;
281 return ret;
282 }
283
284
285
286
287
288
289
290
291
292 int mei_txe_aliveness_set_sync(struct mei_device *dev, u32 req)
293 {
294 if (mei_txe_aliveness_set(dev, req))
295 return mei_txe_aliveness_wait(dev, req);
296 return 0;
297 }
298
299
300
301
302
303
304
305
306 static bool mei_txe_pg_in_transition(struct mei_device *dev)
307 {
308 return dev->pg_event == MEI_PG_EVENT_WAIT;
309 }
310
311
312
313
314
315
316
317
318 static bool mei_txe_pg_is_enabled(struct mei_device *dev)
319 {
320 return true;
321 }
322
323
324
325
326
327
328
329
330
331 static inline enum mei_pg_state mei_txe_pg_state(struct mei_device *dev)
332 {
333 struct mei_txe_hw *hw = to_txe_hw(dev);
334
335 return hw->aliveness ? MEI_PG_OFF : MEI_PG_ON;
336 }
337
338
339
340
341
342
343 static void mei_txe_input_ready_interrupt_enable(struct mei_device *dev)
344 {
345 struct mei_txe_hw *hw = to_txe_hw(dev);
346 u32 hintmsk;
347
348 hintmsk = mei_txe_sec_reg_read(hw, SEC_IPC_HOST_INT_MASK_REG);
349 hintmsk |= SEC_IPC_HOST_INT_MASK_IN_RDY;
350 mei_txe_sec_reg_write(hw, SEC_IPC_HOST_INT_MASK_REG, hintmsk);
351 }
352
353
354
355
356
357
358
359 static void mei_txe_input_doorbell_set(struct mei_txe_hw *hw)
360 {
361
362 clear_bit(TXE_INTR_IN_READY_BIT, &hw->intr_cause);
363 mei_txe_sec_reg_write(hw, SEC_IPC_INPUT_DOORBELL_REG, 1);
364 }
365
366
367
368
369
370
371 static void mei_txe_output_ready_set(struct mei_txe_hw *hw)
372 {
373 mei_txe_br_reg_write(hw,
374 SICR_SEC_IPC_OUTPUT_STATUS_REG,
375 SEC_IPC_OUTPUT_STATUS_RDY);
376 }
377
378
379
380
381
382
383
384
385 static bool mei_txe_is_input_ready(struct mei_device *dev)
386 {
387 struct mei_txe_hw *hw = to_txe_hw(dev);
388 u32 status;
389
390 status = mei_txe_sec_reg_read(hw, SEC_IPC_INPUT_STATUS_REG);
391 return !!(SEC_IPC_INPUT_STATUS_RDY & status);
392 }
393
394
395
396
397
398
399 static inline void mei_txe_intr_clear(struct mei_device *dev)
400 {
401 struct mei_txe_hw *hw = to_txe_hw(dev);
402
403 mei_txe_sec_reg_write_silent(hw, SEC_IPC_HOST_INT_STATUS_REG,
404 SEC_IPC_HOST_INT_STATUS_PENDING);
405 mei_txe_br_reg_write(hw, HISR_REG, HISR_INT_STS_MSK);
406 mei_txe_br_reg_write(hw, HHISR_REG, IPC_HHIER_MSK);
407 }
408
409
410
411
412
413
414 static void mei_txe_intr_disable(struct mei_device *dev)
415 {
416 struct mei_txe_hw *hw = to_txe_hw(dev);
417
418 mei_txe_br_reg_write(hw, HHIER_REG, 0);
419 mei_txe_br_reg_write(hw, HIER_REG, 0);
420 }
421
422
423
424
425
426 static void mei_txe_intr_enable(struct mei_device *dev)
427 {
428 struct mei_txe_hw *hw = to_txe_hw(dev);
429
430 mei_txe_br_reg_write(hw, HHIER_REG, IPC_HHIER_MSK);
431 mei_txe_br_reg_write(hw, HIER_REG, HIER_INT_EN_MSK);
432 }
433
434
435
436
437
438
439 static void mei_txe_synchronize_irq(struct mei_device *dev)
440 {
441 struct pci_dev *pdev = to_pci_dev(dev->dev);
442
443 synchronize_irq(pdev->irq);
444 }
445
446
447
448
449
450
451
452
453
454
455
456
457 static bool mei_txe_pending_interrupts(struct mei_device *dev)
458 {
459
460 struct mei_txe_hw *hw = to_txe_hw(dev);
461 bool ret = (hw->intr_cause & (TXE_INTR_READINESS |
462 TXE_INTR_ALIVENESS |
463 TXE_INTR_IN_READY |
464 TXE_INTR_OUT_DB));
465
466 if (ret) {
467 dev_dbg(dev->dev,
468 "Pending Interrupts InReady=%01d Readiness=%01d, Aliveness=%01d, OutDoor=%01d\n",
469 !!(hw->intr_cause & TXE_INTR_IN_READY),
470 !!(hw->intr_cause & TXE_INTR_READINESS),
471 !!(hw->intr_cause & TXE_INTR_ALIVENESS),
472 !!(hw->intr_cause & TXE_INTR_OUT_DB));
473 }
474 return ret;
475 }
476
477
478
479
480
481
482
483
484
485 static void mei_txe_input_payload_write(struct mei_device *dev,
486 unsigned long idx, u32 value)
487 {
488 struct mei_txe_hw *hw = to_txe_hw(dev);
489
490 mei_txe_sec_reg_write(hw, SEC_IPC_INPUT_PAYLOAD_REG +
491 (idx * sizeof(u32)), value);
492 }
493
494
495
496
497
498
499
500
501
502
503 static u32 mei_txe_out_data_read(const struct mei_device *dev,
504 unsigned long idx)
505 {
506 struct mei_txe_hw *hw = to_txe_hw(dev);
507
508 return mei_txe_br_reg_read(hw,
509 BRIDGE_IPC_OUTPUT_PAYLOAD_REG + (idx * sizeof(u32)));
510 }
511
512
513
514
515
516
517
518
519 static void mei_txe_readiness_set_host_rdy(struct mei_device *dev)
520 {
521 struct mei_txe_hw *hw = to_txe_hw(dev);
522
523 mei_txe_br_reg_write(hw,
524 SICR_HOST_IPC_READINESS_REQ_REG,
525 SICR_HOST_IPC_READINESS_HOST_RDY);
526 }
527
528
529
530
531
532
533 static void mei_txe_readiness_clear(struct mei_device *dev)
534 {
535 struct mei_txe_hw *hw = to_txe_hw(dev);
536
537 mei_txe_br_reg_write(hw, SICR_HOST_IPC_READINESS_REQ_REG,
538 SICR_HOST_IPC_READINESS_RDY_CLR);
539 }
540
541
542
543
544
545
546
547
548 static u32 mei_txe_readiness_get(struct mei_device *dev)
549 {
550 struct mei_txe_hw *hw = to_txe_hw(dev);
551
552 return mei_txe_br_reg_read(hw, HICR_SEC_IPC_READINESS_REG);
553 }
554
555
556
557
558
559
560
561
562
563
564 static inline bool mei_txe_readiness_is_sec_rdy(u32 readiness)
565 {
566 return !!(readiness & HICR_SEC_IPC_READINESS_SEC_RDY);
567 }
568
569
570
571
572
573
574
575
576 static bool mei_txe_hw_is_ready(struct mei_device *dev)
577 {
578 u32 readiness = mei_txe_readiness_get(dev);
579
580 return mei_txe_readiness_is_sec_rdy(readiness);
581 }
582
583
584
585
586
587
588
589
590 static inline bool mei_txe_host_is_ready(struct mei_device *dev)
591 {
592 struct mei_txe_hw *hw = to_txe_hw(dev);
593 u32 reg = mei_txe_br_reg_read(hw, HICR_SEC_IPC_READINESS_REG);
594
595 return !!(reg & HICR_SEC_IPC_READINESS_HOST_RDY);
596 }
597
598
599
600
601
602
603
604
605 static int mei_txe_readiness_wait(struct mei_device *dev)
606 {
607 if (mei_txe_hw_is_ready(dev))
608 return 0;
609
610 mutex_unlock(&dev->device_lock);
611 wait_event_timeout(dev->wait_hw_ready, dev->recvd_hw_ready,
612 msecs_to_jiffies(SEC_RESET_WAIT_TIMEOUT));
613 mutex_lock(&dev->device_lock);
614 if (!dev->recvd_hw_ready) {
615 dev_err(dev->dev, "wait for readiness failed\n");
616 return -ETIME;
617 }
618
619 dev->recvd_hw_ready = false;
620 return 0;
621 }
622
623 static const struct mei_fw_status mei_txe_fw_sts = {
624 .count = 2,
625 .status[0] = PCI_CFG_TXE_FW_STS0,
626 .status[1] = PCI_CFG_TXE_FW_STS1
627 };
628
629
630
631
632
633
634
635
636
637 static int mei_txe_fw_status(struct mei_device *dev,
638 struct mei_fw_status *fw_status)
639 {
640 const struct mei_fw_status *fw_src = &mei_txe_fw_sts;
641 struct pci_dev *pdev = to_pci_dev(dev->dev);
642 int ret;
643 int i;
644
645 if (!fw_status)
646 return -EINVAL;
647
648 fw_status->count = fw_src->count;
649 for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
650 ret = pci_read_config_dword(pdev, fw_src->status[i],
651 &fw_status->status[i]);
652 trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HSF_X",
653 fw_src->status[i],
654 fw_status->status[i]);
655 if (ret)
656 return ret;
657 }
658
659 return 0;
660 }
661
662
663
664
665
666
667
668
669
670 static void mei_txe_hw_config(struct mei_device *dev)
671 {
672
673 struct mei_txe_hw *hw = to_txe_hw(dev);
674
675 hw->aliveness = mei_txe_aliveness_get(dev);
676 hw->readiness = mei_txe_readiness_get(dev);
677
678 dev_dbg(dev->dev, "aliveness_resp = 0x%08x, readiness = 0x%08x.\n",
679 hw->aliveness, hw->readiness);
680 }
681
682
683
684
685
686
687
688
689
690
691
692
693 static int mei_txe_write(struct mei_device *dev,
694 const void *hdr, size_t hdr_len,
695 const void *data, size_t data_len)
696 {
697 struct mei_txe_hw *hw = to_txe_hw(dev);
698 unsigned long rem;
699 const u32 *reg_buf;
700 u32 slots = TXE_HBUF_DEPTH;
701 u32 dw_cnt;
702 unsigned long i, j;
703
704 if (WARN_ON(!hdr || !data || hdr_len & 0x3))
705 return -EINVAL;
706
707 dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM((struct mei_msg_hdr *)hdr));
708
709 dw_cnt = mei_data2slots(hdr_len + data_len);
710 if (dw_cnt > slots)
711 return -EMSGSIZE;
712
713 if (WARN(!hw->aliveness, "txe write: aliveness not asserted\n"))
714 return -EAGAIN;
715
716
717 mei_txe_input_ready_interrupt_enable(dev);
718
719 if (!mei_txe_is_input_ready(dev)) {
720 char fw_sts_str[MEI_FW_STATUS_STR_SZ];
721
722 mei_fw_status_str(dev, fw_sts_str, MEI_FW_STATUS_STR_SZ);
723 dev_err(dev->dev, "Input is not ready %s\n", fw_sts_str);
724 return -EAGAIN;
725 }
726
727 reg_buf = hdr;
728 for (i = 0; i < hdr_len / MEI_SLOT_SIZE; i++)
729 mei_txe_input_payload_write(dev, i, reg_buf[i]);
730
731 reg_buf = data;
732 for (j = 0; j < data_len / MEI_SLOT_SIZE; j++)
733 mei_txe_input_payload_write(dev, i + j, reg_buf[j]);
734
735 rem = data_len & 0x3;
736 if (rem > 0) {
737 u32 reg = 0;
738
739 memcpy(®, (const u8 *)data + data_len - rem, rem);
740 mei_txe_input_payload_write(dev, i + j, reg);
741 }
742
743
744 hw->slots = 0;
745
746
747 mei_txe_input_doorbell_set(hw);
748
749 return 0;
750 }
751
752
753
754
755
756
757
758
759 static u32 mei_txe_hbuf_depth(const struct mei_device *dev)
760 {
761 return TXE_HBUF_DEPTH;
762 }
763
764
765
766
767
768
769
770
771 static int mei_txe_hbuf_empty_slots(struct mei_device *dev)
772 {
773 struct mei_txe_hw *hw = to_txe_hw(dev);
774
775 return hw->slots;
776 }
777
778
779
780
781
782
783
784
785 static int mei_txe_count_full_read_slots(struct mei_device *dev)
786 {
787
788 return TXE_HBUF_DEPTH;
789 }
790
791
792
793
794
795
796
797
798
799 static u32 mei_txe_read_hdr(const struct mei_device *dev)
800 {
801 return mei_txe_out_data_read(dev, 0);
802 }
803
804
805
806
807
808
809
810
811
812 static int mei_txe_read(struct mei_device *dev,
813 unsigned char *buf, unsigned long len)
814 {
815
816 struct mei_txe_hw *hw = to_txe_hw(dev);
817 u32 *reg_buf, reg;
818 u32 rem;
819 u32 i;
820
821 if (WARN_ON(!buf || !len))
822 return -EINVAL;
823
824 reg_buf = (u32 *)buf;
825 rem = len & 0x3;
826
827 dev_dbg(dev->dev, "buffer-length = %lu buf[0]0x%08X\n",
828 len, mei_txe_out_data_read(dev, 0));
829
830 for (i = 0; i < len / MEI_SLOT_SIZE; i++) {
831
832 reg = mei_txe_out_data_read(dev, i + 1);
833 dev_dbg(dev->dev, "buf[%d] = 0x%08X\n", i, reg);
834 *reg_buf++ = reg;
835 }
836
837 if (rem) {
838 reg = mei_txe_out_data_read(dev, i + 1);
839 memcpy(reg_buf, ®, rem);
840 }
841
842 mei_txe_output_ready_set(hw);
843 return 0;
844 }
845
846
847
848
849
850
851
852
853
854 static int mei_txe_hw_reset(struct mei_device *dev, bool intr_enable)
855 {
856 struct mei_txe_hw *hw = to_txe_hw(dev);
857
858 u32 aliveness_req;
859
860
861
862
863 (void)mei_txe_sec_reg_read_silent(hw, SEC_IPC_INPUT_DOORBELL_REG);
864
865 aliveness_req = mei_txe_aliveness_req_get(dev);
866 hw->aliveness = mei_txe_aliveness_get(dev);
867
868
869 mei_txe_intr_disable(dev);
870
871
872
873
874
875
876 if (aliveness_req != hw->aliveness)
877 if (mei_txe_aliveness_poll(dev, aliveness_req) < 0) {
878 dev_err(dev->dev, "wait for aliveness settle failed ... bailing out\n");
879 return -EIO;
880 }
881
882
883
884
885 if (aliveness_req) {
886 mei_txe_aliveness_set(dev, 0);
887 if (mei_txe_aliveness_poll(dev, 0) < 0) {
888 dev_err(dev->dev, "wait for aliveness failed ... bailing out\n");
889 return -EIO;
890 }
891 }
892
893
894
895
896 mei_txe_readiness_clear(dev);
897
898 return 0;
899 }
900
901
902
903
904
905
906
907
908 static int mei_txe_hw_start(struct mei_device *dev)
909 {
910 struct mei_txe_hw *hw = to_txe_hw(dev);
911 int ret;
912
913 u32 hisr;
914
915
916 mei_txe_intr_enable(dev);
917
918 ret = mei_txe_readiness_wait(dev);
919 if (ret < 0) {
920 dev_err(dev->dev, "waiting for readiness failed\n");
921 return ret;
922 }
923
924
925
926
927 hisr = mei_txe_br_reg_read(hw, HISR_REG);
928 if (hisr & HISR_INT_2_STS)
929 mei_txe_br_reg_write(hw, HISR_REG, HISR_INT_2_STS);
930
931
932 clear_bit(TXE_INTR_OUT_DB_BIT, &hw->intr_cause);
933
934 ret = mei_txe_aliveness_set_sync(dev, 1);
935 if (ret < 0) {
936 dev_err(dev->dev, "wait for aliveness failed ... bailing out\n");
937 return ret;
938 }
939
940 pm_runtime_set_active(dev->dev);
941
942
943
944
945 mei_txe_input_ready_interrupt_enable(dev);
946
947
948
949 mei_txe_output_ready_set(hw);
950
951
952
953 mei_txe_readiness_set_host_rdy(dev);
954
955 return 0;
956 }
957
958
959
960
961
962
963
964
965
966
967 static bool mei_txe_check_and_ack_intrs(struct mei_device *dev, bool do_ack)
968 {
969 struct mei_txe_hw *hw = to_txe_hw(dev);
970 u32 hisr;
971 u32 hhisr;
972 u32 ipc_isr;
973 u32 aliveness;
974 bool generated;
975
976
977 hhisr = mei_txe_br_reg_read(hw, HHISR_REG);
978 generated = (hhisr & IPC_HHIER_MSK);
979 if (!generated)
980 goto out;
981
982 hisr = mei_txe_br_reg_read(hw, HISR_REG);
983
984 aliveness = mei_txe_aliveness_get(dev);
985 if (hhisr & IPC_HHIER_SEC && aliveness) {
986 ipc_isr = mei_txe_sec_reg_read_silent(hw,
987 SEC_IPC_HOST_INT_STATUS_REG);
988 } else {
989 ipc_isr = 0;
990 hhisr &= ~IPC_HHIER_SEC;
991 }
992
993 generated = generated ||
994 (hisr & HISR_INT_STS_MSK) ||
995 (ipc_isr & SEC_IPC_HOST_INT_STATUS_PENDING);
996
997 if (generated && do_ack) {
998
999 hw->intr_cause |= hisr & HISR_INT_STS_MSK;
1000 if (ipc_isr & SEC_IPC_HOST_INT_STATUS_IN_RDY)
1001 hw->intr_cause |= TXE_INTR_IN_READY;
1002
1003
1004 mei_txe_intr_disable(dev);
1005
1006
1007 mei_txe_sec_reg_write_silent(hw,
1008 SEC_IPC_HOST_INT_STATUS_REG, ipc_isr);
1009 mei_txe_br_reg_write(hw, HISR_REG, hisr);
1010 mei_txe_br_reg_write(hw, HHISR_REG, hhisr);
1011 }
1012
1013 out:
1014 return generated;
1015 }
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026 irqreturn_t mei_txe_irq_quick_handler(int irq, void *dev_id)
1027 {
1028 struct mei_device *dev = dev_id;
1029
1030 if (mei_txe_check_and_ack_intrs(dev, true))
1031 return IRQ_WAKE_THREAD;
1032 return IRQ_NONE;
1033 }
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044 irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
1045 {
1046 struct mei_device *dev = (struct mei_device *) dev_id;
1047 struct mei_txe_hw *hw = to_txe_hw(dev);
1048 struct list_head cmpl_list;
1049 s32 slots;
1050 int rets = 0;
1051
1052 dev_dbg(dev->dev, "irq thread: Interrupt Registers HHISR|HISR|SEC=%02X|%04X|%02X\n",
1053 mei_txe_br_reg_read(hw, HHISR_REG),
1054 mei_txe_br_reg_read(hw, HISR_REG),
1055 mei_txe_sec_reg_read_silent(hw, SEC_IPC_HOST_INT_STATUS_REG));
1056
1057
1058
1059 mutex_lock(&dev->device_lock);
1060 INIT_LIST_HEAD(&cmpl_list);
1061
1062 if (pci_dev_msi_enabled(to_pci_dev(dev->dev)))
1063 mei_txe_check_and_ack_intrs(dev, true);
1064
1065
1066 mei_txe_pending_interrupts(dev);
1067
1068 hw->aliveness = mei_txe_aliveness_get(dev);
1069 hw->readiness = mei_txe_readiness_get(dev);
1070
1071
1072
1073
1074
1075 if (test_and_clear_bit(TXE_INTR_READINESS_BIT, &hw->intr_cause)) {
1076 dev_dbg(dev->dev, "Readiness Interrupt was received...\n");
1077
1078
1079 if (mei_txe_readiness_is_sec_rdy(hw->readiness)) {
1080 dev_dbg(dev->dev, "we need to start the dev.\n");
1081 dev->recvd_hw_ready = true;
1082 } else {
1083 dev->recvd_hw_ready = false;
1084 if (dev->dev_state != MEI_DEV_RESETTING) {
1085
1086 dev_warn(dev->dev, "FW not ready: resetting.\n");
1087 schedule_work(&dev->reset_work);
1088 goto end;
1089
1090 }
1091 }
1092 wake_up(&dev->wait_hw_ready);
1093 }
1094
1095
1096
1097
1098
1099
1100
1101 if (test_and_clear_bit(TXE_INTR_ALIVENESS_BIT, &hw->intr_cause)) {
1102
1103 dev_dbg(dev->dev,
1104 "Aliveness Interrupt: Status: %d\n", hw->aliveness);
1105 dev->pg_event = MEI_PG_EVENT_RECEIVED;
1106 if (waitqueue_active(&hw->wait_aliveness_resp))
1107 wake_up(&hw->wait_aliveness_resp);
1108 }
1109
1110
1111
1112
1113
1114 slots = mei_count_full_read_slots(dev);
1115 if (test_and_clear_bit(TXE_INTR_OUT_DB_BIT, &hw->intr_cause)) {
1116
1117 rets = mei_irq_read_handler(dev, &cmpl_list, &slots);
1118 if (rets &&
1119 (dev->dev_state != MEI_DEV_RESETTING &&
1120 dev->dev_state != MEI_DEV_POWER_DOWN)) {
1121 dev_err(dev->dev,
1122 "mei_irq_read_handler ret = %d.\n", rets);
1123
1124 schedule_work(&dev->reset_work);
1125 goto end;
1126 }
1127 }
1128
1129 if (test_and_clear_bit(TXE_INTR_IN_READY_BIT, &hw->intr_cause)) {
1130 dev->hbuf_is_ready = true;
1131 hw->slots = TXE_HBUF_DEPTH;
1132 }
1133
1134 if (hw->aliveness && dev->hbuf_is_ready) {
1135
1136 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
1137 rets = mei_irq_write_handler(dev, &cmpl_list);
1138 if (rets && rets != -EMSGSIZE)
1139 dev_err(dev->dev, "mei_irq_write_handler ret = %d.\n",
1140 rets);
1141 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
1142 }
1143
1144 mei_irq_compl_handler(dev, &cmpl_list);
1145
1146 end:
1147 dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
1148
1149 mutex_unlock(&dev->device_lock);
1150
1151 mei_enable_interrupts(dev);
1152 return IRQ_HANDLED;
1153 }
1154
1155 static const struct mei_hw_ops mei_txe_hw_ops = {
1156
1157 .host_is_ready = mei_txe_host_is_ready,
1158
1159 .fw_status = mei_txe_fw_status,
1160 .pg_state = mei_txe_pg_state,
1161
1162 .hw_is_ready = mei_txe_hw_is_ready,
1163 .hw_reset = mei_txe_hw_reset,
1164 .hw_config = mei_txe_hw_config,
1165 .hw_start = mei_txe_hw_start,
1166
1167 .pg_in_transition = mei_txe_pg_in_transition,
1168 .pg_is_enabled = mei_txe_pg_is_enabled,
1169
1170 .intr_clear = mei_txe_intr_clear,
1171 .intr_enable = mei_txe_intr_enable,
1172 .intr_disable = mei_txe_intr_disable,
1173 .synchronize_irq = mei_txe_synchronize_irq,
1174
1175 .hbuf_free_slots = mei_txe_hbuf_empty_slots,
1176 .hbuf_is_ready = mei_txe_is_input_ready,
1177 .hbuf_depth = mei_txe_hbuf_depth,
1178
1179 .write = mei_txe_write,
1180
1181 .rdbuf_full_slots = mei_txe_count_full_read_slots,
1182 .read_hdr = mei_txe_read_hdr,
1183
1184 .read = mei_txe_read,
1185
1186 };
1187
1188
1189
1190
1191
1192
1193
1194
1195 struct mei_device *mei_txe_dev_init(struct pci_dev *pdev)
1196 {
1197 struct mei_device *dev;
1198 struct mei_txe_hw *hw;
1199
1200 dev = devm_kzalloc(&pdev->dev, sizeof(struct mei_device) +
1201 sizeof(struct mei_txe_hw), GFP_KERNEL);
1202 if (!dev)
1203 return NULL;
1204
1205 mei_device_init(dev, &pdev->dev, &mei_txe_hw_ops);
1206
1207 hw = to_txe_hw(dev);
1208
1209 init_waitqueue_head(&hw->wait_aliveness_resp);
1210
1211 return dev;
1212 }
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223 int mei_txe_setup_satt2(struct mei_device *dev, phys_addr_t addr, u32 range)
1224 {
1225 struct mei_txe_hw *hw = to_txe_hw(dev);
1226
1227 u32 lo32 = lower_32_bits(addr);
1228 u32 hi32 = upper_32_bits(addr);
1229 u32 ctrl;
1230
1231
1232 if (hi32 & ~0xF)
1233 return -EINVAL;
1234
1235
1236 if (lo32 & 0xF)
1237 return -EINVAL;
1238
1239
1240 if (range & 0x4)
1241 return -EINVAL;
1242
1243
1244 if (range > SATT_RANGE_MAX)
1245 return -EINVAL;
1246
1247 ctrl = SATT2_CTRL_VALID_MSK;
1248 ctrl |= hi32 << SATT2_CTRL_BR_BASE_ADDR_REG_SHIFT;
1249
1250 mei_txe_br_reg_write(hw, SATT2_SAP_SIZE_REG, range);
1251 mei_txe_br_reg_write(hw, SATT2_BRG_BA_LSB_REG, lo32);
1252 mei_txe_br_reg_write(hw, SATT2_CTRL_REG, ctrl);
1253 dev_dbg(dev->dev, "SATT2: SAP_SIZE_OFFSET=0x%08X, BRG_BA_LSB_OFFSET=0x%08X, CTRL_OFFSET=0x%08X\n",
1254 range, lo32, ctrl);
1255
1256 return 0;
1257 }