This source file includes following definitions.
- ti_sci_debug_show
- ti_sci_debugfs_create
- ti_sci_debugfs_destroy
- ti_sci_debugfs_create
- ti_sci_debugfs_destroy
- ti_sci_dump_header_dbg
- ti_sci_rx_callback
- ti_sci_get_one_xfer
- ti_sci_put_one_xfer
- ti_sci_do_xfer
- ti_sci_cmd_get_revision
- ti_sci_is_response_ack
- ti_sci_set_device_state
- ti_sci_get_device_state
- ti_sci_cmd_get_device
- ti_sci_cmd_get_device_exclusive
- ti_sci_cmd_idle_device
- ti_sci_cmd_idle_device_exclusive
- ti_sci_cmd_put_device
- ti_sci_cmd_dev_is_valid
- ti_sci_cmd_dev_get_clcnt
- ti_sci_cmd_dev_is_idle
- ti_sci_cmd_dev_is_stop
- ti_sci_cmd_dev_is_on
- ti_sci_cmd_dev_is_trans
- ti_sci_cmd_set_device_resets
- ti_sci_cmd_get_device_resets
- ti_sci_set_clock_state
- ti_sci_cmd_get_clock_state
- ti_sci_cmd_get_clock
- ti_sci_cmd_idle_clock
- ti_sci_cmd_put_clock
- ti_sci_cmd_clk_is_auto
- ti_sci_cmd_clk_is_on
- ti_sci_cmd_clk_is_off
- ti_sci_cmd_clk_set_parent
- ti_sci_cmd_clk_get_parent
- ti_sci_cmd_clk_get_num_parents
- ti_sci_cmd_clk_get_match_freq
- ti_sci_cmd_clk_set_freq
- ti_sci_cmd_clk_get_freq
- ti_sci_cmd_core_reboot
- ti_sci_get_resource_type
- ti_sci_get_resource_range
- ti_sci_cmd_get_resource_range
- ti_sci_cmd_get_resource_range_from_shost
- ti_sci_manage_irq
- ti_sci_set_irq
- ti_sci_free_irq
- ti_sci_cmd_set_irq
- ti_sci_cmd_set_event_map
- ti_sci_cmd_free_irq
- ti_sci_cmd_free_event_map
- ti_sci_cmd_ring_config
- ti_sci_cmd_ring_get_config
- ti_sci_cmd_rm_psil_pair
- ti_sci_cmd_rm_psil_unpair
- ti_sci_cmd_rm_udmap_tx_ch_cfg
- ti_sci_cmd_rm_udmap_rx_ch_cfg
- ti_sci_cmd_rm_udmap_rx_flow_cfg
- ti_sci_cmd_proc_request
- ti_sci_cmd_proc_release
- ti_sci_cmd_proc_handover
- ti_sci_cmd_proc_set_config
- ti_sci_cmd_proc_set_control
- ti_sci_cmd_proc_get_status
- ti_sci_setup_ops
- ti_sci_get_handle
- ti_sci_put_handle
- devm_ti_sci_release
- devm_ti_sci_get_handle
- ti_sci_get_by_phandle
- devm_ti_sci_get_by_phandle
- ti_sci_get_free_resource
- ti_sci_release_resource
- ti_sci_get_num_resources
- devm_ti_sci_get_of_resource
- tisci_reboot_handler
- ti_sci_probe
- ti_sci_remove
1
2
3
4
5
6
7
8
9 #define pr_fmt(fmt) "%s: " fmt, __func__
10
11 #include <linux/bitmap.h>
12 #include <linux/debugfs.h>
13 #include <linux/export.h>
14 #include <linux/io.h>
15 #include <linux/kernel.h>
16 #include <linux/mailbox_client.h>
17 #include <linux/module.h>
18 #include <linux/of_device.h>
19 #include <linux/semaphore.h>
20 #include <linux/slab.h>
21 #include <linux/soc/ti/ti-msgmgr.h>
22 #include <linux/soc/ti/ti_sci_protocol.h>
23 #include <linux/reboot.h>
24
25 #include "ti_sci.h"
26
27
28 static LIST_HEAD(ti_sci_list);
29
30 static DEFINE_MUTEX(ti_sci_list_mutex);
31
32
33
34
35
36
37
38
39
40
41
42 struct ti_sci_xfer {
43 struct ti_msgmgr_message tx_message;
44 u8 rx_len;
45 u8 *xfer_buf;
46 struct completion done;
47 };
48
49
50
51
52
53
54
55
56
57
58
59 struct ti_sci_xfers_info {
60 struct semaphore sem_xfer_count;
61 struct ti_sci_xfer *xfer_block;
62 unsigned long *xfer_alloc_table;
63
64 spinlock_t xfer_lock;
65 };
66
67
68
69
70
71
72
73
74
75
76
77
78 struct ti_sci_rm_type_map {
79 u32 dev_id;
80 u16 type;
81 };
82
83
84
85
86
87
88
89
90
91
92 struct ti_sci_desc {
93 u8 default_host_id;
94 int max_rx_timeout_ms;
95 int max_msgs;
96 int max_msg_size;
97 struct ti_sci_rm_type_map *rm_type_map;
98 };
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118 struct ti_sci_info {
119 struct device *dev;
120 struct notifier_block nb;
121 const struct ti_sci_desc *desc;
122 struct dentry *d;
123 void __iomem *debug_region;
124 char *debug_buffer;
125 size_t debug_region_size;
126 struct ti_sci_handle handle;
127 struct mbox_client cl;
128 struct mbox_chan *chan_tx;
129 struct mbox_chan *chan_rx;
130 struct ti_sci_xfers_info minfo;
131 struct list_head node;
132 u8 host_id;
133
134 int users;
135
136 };
137
138 #define cl_to_ti_sci_info(c) container_of(c, struct ti_sci_info, cl)
139 #define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
140 #define reboot_to_ti_sci_info(n) container_of(n, struct ti_sci_info, nb)
141
142 #ifdef CONFIG_DEBUG_FS
143
144
145
146
147
148
149
150
151 static int ti_sci_debug_show(struct seq_file *s, void *unused)
152 {
153 struct ti_sci_info *info = s->private;
154
155 memcpy_fromio(info->debug_buffer, info->debug_region,
156 info->debug_region_size);
157
158
159
160
161
162
163 seq_puts(s, info->debug_buffer);
164 return 0;
165 }
166
167
168 DEFINE_SHOW_ATTRIBUTE(ti_sci_debug);
169
170
171
172
173
174
175
176
177 static int ti_sci_debugfs_create(struct platform_device *pdev,
178 struct ti_sci_info *info)
179 {
180 struct device *dev = &pdev->dev;
181 struct resource *res;
182 char debug_name[50] = "ti_sci_debug@";
183
184
185 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
186 "debug_messages");
187 info->debug_region = devm_ioremap_resource(dev, res);
188 if (IS_ERR(info->debug_region))
189 return 0;
190 info->debug_region_size = resource_size(res);
191
192 info->debug_buffer = devm_kcalloc(dev, info->debug_region_size + 1,
193 sizeof(char), GFP_KERNEL);
194 if (!info->debug_buffer)
195 return -ENOMEM;
196
197 info->debug_buffer[info->debug_region_size] = 0;
198
199 info->d = debugfs_create_file(strncat(debug_name, dev_name(dev),
200 sizeof(debug_name) -
201 sizeof("ti_sci_debug@")),
202 0444, NULL, info, &ti_sci_debug_fops);
203 if (IS_ERR(info->d))
204 return PTR_ERR(info->d);
205
206 dev_dbg(dev, "Debug region => %p, size = %zu bytes, resource: %pr\n",
207 info->debug_region, info->debug_region_size, res);
208 return 0;
209 }
210
211
212
213
214
215
216 static void ti_sci_debugfs_destroy(struct platform_device *pdev,
217 struct ti_sci_info *info)
218 {
219 if (IS_ERR(info->debug_region))
220 return;
221
222 debugfs_remove(info->d);
223 }
224 #else
225 static inline int ti_sci_debugfs_create(struct platform_device *dev,
226 struct ti_sci_info *info)
227 {
228 return 0;
229 }
230
231 static inline void ti_sci_debugfs_destroy(struct platform_device *dev,
232 struct ti_sci_info *info)
233 {
234 }
235 #endif
236
237
238
239
240
241
242 static inline void ti_sci_dump_header_dbg(struct device *dev,
243 struct ti_sci_msg_hdr *hdr)
244 {
245 dev_dbg(dev, "MSGHDR:type=0x%04x host=0x%02x seq=0x%02x flags=0x%08x\n",
246 hdr->type, hdr->host, hdr->seq, hdr->flags);
247 }
248
249
250
251
252
253
254
255
256
257
258
259
260 static void ti_sci_rx_callback(struct mbox_client *cl, void *m)
261 {
262 struct ti_sci_info *info = cl_to_ti_sci_info(cl);
263 struct device *dev = info->dev;
264 struct ti_sci_xfers_info *minfo = &info->minfo;
265 struct ti_msgmgr_message *mbox_msg = m;
266 struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)mbox_msg->buf;
267 struct ti_sci_xfer *xfer;
268 u8 xfer_id;
269
270 xfer_id = hdr->seq;
271
272
273
274
275
276 if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
277 dev_err(dev, "Message for %d is not expected!\n", xfer_id);
278 return;
279 }
280
281 xfer = &minfo->xfer_block[xfer_id];
282
283
284 if (mbox_msg->len > info->desc->max_msg_size) {
285 dev_err(dev, "Unable to handle %zu xfer(max %d)\n",
286 mbox_msg->len, info->desc->max_msg_size);
287 ti_sci_dump_header_dbg(dev, hdr);
288 return;
289 }
290 if (mbox_msg->len < xfer->rx_len) {
291 dev_err(dev, "Recv xfer %zu < expected %d length\n",
292 mbox_msg->len, xfer->rx_len);
293 ti_sci_dump_header_dbg(dev, hdr);
294 return;
295 }
296
297 ti_sci_dump_header_dbg(dev, hdr);
298
299 memcpy(xfer->xfer_buf, mbox_msg->buf, xfer->rx_len);
300 complete(&xfer->done);
301 }
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320 static struct ti_sci_xfer *ti_sci_get_one_xfer(struct ti_sci_info *info,
321 u16 msg_type, u32 msg_flags,
322 size_t tx_message_size,
323 size_t rx_message_size)
324 {
325 struct ti_sci_xfers_info *minfo = &info->minfo;
326 struct ti_sci_xfer *xfer;
327 struct ti_sci_msg_hdr *hdr;
328 unsigned long flags;
329 unsigned long bit_pos;
330 u8 xfer_id;
331 int ret;
332 int timeout;
333
334
335 if (rx_message_size > info->desc->max_msg_size ||
336 tx_message_size > info->desc->max_msg_size ||
337 rx_message_size < sizeof(*hdr) || tx_message_size < sizeof(*hdr))
338 return ERR_PTR(-ERANGE);
339
340
341
342
343
344
345 timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms) * 5;
346 ret = down_timeout(&minfo->sem_xfer_count, timeout);
347 if (ret < 0)
348 return ERR_PTR(ret);
349
350
351 spin_lock_irqsave(&minfo->xfer_lock, flags);
352 bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
353 info->desc->max_msgs);
354 set_bit(bit_pos, minfo->xfer_alloc_table);
355 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
356
357
358
359
360
361
362
363
364 xfer_id = (u8)bit_pos;
365
366 xfer = &minfo->xfer_block[xfer_id];
367
368 hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
369 xfer->tx_message.len = tx_message_size;
370 xfer->rx_len = (u8)rx_message_size;
371
372 reinit_completion(&xfer->done);
373
374 hdr->seq = xfer_id;
375 hdr->type = msg_type;
376 hdr->host = info->host_id;
377 hdr->flags = msg_flags;
378
379 return xfer;
380 }
381
382
383
384
385
386
387
388
389 static void ti_sci_put_one_xfer(struct ti_sci_xfers_info *minfo,
390 struct ti_sci_xfer *xfer)
391 {
392 unsigned long flags;
393 struct ti_sci_msg_hdr *hdr;
394 u8 xfer_id;
395
396 hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
397 xfer_id = hdr->seq;
398
399
400
401
402
403
404 spin_lock_irqsave(&minfo->xfer_lock, flags);
405 clear_bit(xfer_id, minfo->xfer_alloc_table);
406 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
407
408
409 up(&minfo->sem_xfer_count);
410 }
411
412
413
414
415
416
417
418
419
420
421 static inline int ti_sci_do_xfer(struct ti_sci_info *info,
422 struct ti_sci_xfer *xfer)
423 {
424 int ret;
425 int timeout;
426 struct device *dev = info->dev;
427
428 ret = mbox_send_message(info->chan_tx, &xfer->tx_message);
429 if (ret < 0)
430 return ret;
431
432 ret = 0;
433
434
435 timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
436 if (!wait_for_completion_timeout(&xfer->done, timeout)) {
437 dev_err(dev, "Mbox timedout in resp(caller: %pS)\n",
438 (void *)_RET_IP_);
439 ret = -ETIMEDOUT;
440 }
441
442
443
444
445
446
447 mbox_client_txdone(info->chan_tx, ret);
448
449 return ret;
450 }
451
452
453
454
455
456
457
458
459
460 static int ti_sci_cmd_get_revision(struct ti_sci_info *info)
461 {
462 struct device *dev = info->dev;
463 struct ti_sci_handle *handle = &info->handle;
464 struct ti_sci_version_info *ver = &handle->version;
465 struct ti_sci_msg_resp_version *rev_info;
466 struct ti_sci_xfer *xfer;
467 int ret;
468
469 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION,
470 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
471 sizeof(struct ti_sci_msg_hdr),
472 sizeof(*rev_info));
473 if (IS_ERR(xfer)) {
474 ret = PTR_ERR(xfer);
475 dev_err(dev, "Message alloc failed(%d)\n", ret);
476 return ret;
477 }
478
479 rev_info = (struct ti_sci_msg_resp_version *)xfer->xfer_buf;
480
481 ret = ti_sci_do_xfer(info, xfer);
482 if (ret) {
483 dev_err(dev, "Mbox send fail %d\n", ret);
484 goto fail;
485 }
486
487 ver->abi_major = rev_info->abi_major;
488 ver->abi_minor = rev_info->abi_minor;
489 ver->firmware_revision = rev_info->firmware_revision;
490 strncpy(ver->firmware_description, rev_info->firmware_description,
491 sizeof(ver->firmware_description));
492
493 fail:
494 ti_sci_put_one_xfer(&info->minfo, xfer);
495 return ret;
496 }
497
498
499
500
501
502
503
504 static inline bool ti_sci_is_response_ack(void *r)
505 {
506 struct ti_sci_msg_hdr *hdr = r;
507
508 return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
509 }
510
511
512
513
514
515
516
517
518
519
520 static int ti_sci_set_device_state(const struct ti_sci_handle *handle,
521 u32 id, u32 flags, u8 state)
522 {
523 struct ti_sci_info *info;
524 struct ti_sci_msg_req_set_device_state *req;
525 struct ti_sci_msg_hdr *resp;
526 struct ti_sci_xfer *xfer;
527 struct device *dev;
528 int ret = 0;
529
530 if (IS_ERR(handle))
531 return PTR_ERR(handle);
532 if (!handle)
533 return -EINVAL;
534
535 info = handle_to_ti_sci_info(handle);
536 dev = info->dev;
537
538 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
539 flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
540 sizeof(*req), sizeof(*resp));
541 if (IS_ERR(xfer)) {
542 ret = PTR_ERR(xfer);
543 dev_err(dev, "Message alloc failed(%d)\n", ret);
544 return ret;
545 }
546 req = (struct ti_sci_msg_req_set_device_state *)xfer->xfer_buf;
547 req->id = id;
548 req->state = state;
549
550 ret = ti_sci_do_xfer(info, xfer);
551 if (ret) {
552 dev_err(dev, "Mbox send fail %d\n", ret);
553 goto fail;
554 }
555
556 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
557
558 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
559
560 fail:
561 ti_sci_put_one_xfer(&info->minfo, xfer);
562
563 return ret;
564 }
565
566
567
568
569
570
571
572
573
574
575
576
577 static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
578 u32 id, u32 *clcnt, u32 *resets,
579 u8 *p_state, u8 *c_state)
580 {
581 struct ti_sci_info *info;
582 struct ti_sci_msg_req_get_device_state *req;
583 struct ti_sci_msg_resp_get_device_state *resp;
584 struct ti_sci_xfer *xfer;
585 struct device *dev;
586 int ret = 0;
587
588 if (IS_ERR(handle))
589 return PTR_ERR(handle);
590 if (!handle)
591 return -EINVAL;
592
593 if (!clcnt && !resets && !p_state && !c_state)
594 return -EINVAL;
595
596 info = handle_to_ti_sci_info(handle);
597 dev = info->dev;
598
599 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
600 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
601 sizeof(*req), sizeof(*resp));
602 if (IS_ERR(xfer)) {
603 ret = PTR_ERR(xfer);
604 dev_err(dev, "Message alloc failed(%d)\n", ret);
605 return ret;
606 }
607 req = (struct ti_sci_msg_req_get_device_state *)xfer->xfer_buf;
608 req->id = id;
609
610 ret = ti_sci_do_xfer(info, xfer);
611 if (ret) {
612 dev_err(dev, "Mbox send fail %d\n", ret);
613 goto fail;
614 }
615
616 resp = (struct ti_sci_msg_resp_get_device_state *)xfer->xfer_buf;
617 if (!ti_sci_is_response_ack(resp)) {
618 ret = -ENODEV;
619 goto fail;
620 }
621
622 if (clcnt)
623 *clcnt = resp->context_loss_count;
624 if (resets)
625 *resets = resp->resets;
626 if (p_state)
627 *p_state = resp->programmed_state;
628 if (c_state)
629 *c_state = resp->current_state;
630 fail:
631 ti_sci_put_one_xfer(&info->minfo, xfer);
632
633 return ret;
634 }
635
636
637
638
639
640
641
642
643
644
645
646
647
648 static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
649 {
650 return ti_sci_set_device_state(handle, id, 0,
651 MSG_DEVICE_SW_STATE_ON);
652 }
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667 static int ti_sci_cmd_get_device_exclusive(const struct ti_sci_handle *handle,
668 u32 id)
669 {
670 return ti_sci_set_device_state(handle, id,
671 MSG_FLAG_DEVICE_EXCLUSIVE,
672 MSG_DEVICE_SW_STATE_ON);
673 }
674
675
676
677
678
679
680
681
682
683
684
685
686 static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
687 {
688 return ti_sci_set_device_state(handle, id, 0,
689 MSG_DEVICE_SW_STATE_RETENTION);
690 }
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705 static int ti_sci_cmd_idle_device_exclusive(const struct ti_sci_handle *handle,
706 u32 id)
707 {
708 return ti_sci_set_device_state(handle, id,
709 MSG_FLAG_DEVICE_EXCLUSIVE,
710 MSG_DEVICE_SW_STATE_RETENTION);
711 }
712
713
714
715
716
717
718
719
720
721
722
723
724 static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)
725 {
726 return ti_sci_set_device_state(handle, id,
727 0, MSG_DEVICE_SW_STATE_AUTO_OFF);
728 }
729
730
731
732
733
734
735
736
737
738 static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)
739 {
740 u8 unused;
741
742
743 return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused);
744 }
745
746
747
748
749
750
751
752
753
754 static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id,
755 u32 *count)
756 {
757 return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL);
758 }
759
760
761
762
763
764
765
766
767
768 static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id,
769 bool *r_state)
770 {
771 int ret;
772 u8 state;
773
774 if (!r_state)
775 return -EINVAL;
776
777 ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL);
778 if (ret)
779 return ret;
780
781 *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
782
783 return 0;
784 }
785
786
787
788
789
790
791
792
793
794
795 static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id,
796 bool *r_state, bool *curr_state)
797 {
798 int ret;
799 u8 p_state, c_state;
800
801 if (!r_state && !curr_state)
802 return -EINVAL;
803
804 ret =
805 ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
806 if (ret)
807 return ret;
808
809 if (r_state)
810 *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
811 if (curr_state)
812 *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
813
814 return 0;
815 }
816
817
818
819
820
821
822
823
824
825
826 static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id,
827 bool *r_state, bool *curr_state)
828 {
829 int ret;
830 u8 p_state, c_state;
831
832 if (!r_state && !curr_state)
833 return -EINVAL;
834
835 ret =
836 ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
837 if (ret)
838 return ret;
839
840 if (r_state)
841 *r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
842 if (curr_state)
843 *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
844
845 return 0;
846 }
847
848
849
850
851
852
853
854
855
856 static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id,
857 bool *curr_state)
858 {
859 int ret;
860 u8 state;
861
862 if (!curr_state)
863 return -EINVAL;
864
865 ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state);
866 if (ret)
867 return ret;
868
869 *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
870
871 return 0;
872 }
873
874
875
876
877
878
879
880
881
882
883 static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle,
884 u32 id, u32 reset_state)
885 {
886 struct ti_sci_info *info;
887 struct ti_sci_msg_req_set_device_resets *req;
888 struct ti_sci_msg_hdr *resp;
889 struct ti_sci_xfer *xfer;
890 struct device *dev;
891 int ret = 0;
892
893 if (IS_ERR(handle))
894 return PTR_ERR(handle);
895 if (!handle)
896 return -EINVAL;
897
898 info = handle_to_ti_sci_info(handle);
899 dev = info->dev;
900
901 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS,
902 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
903 sizeof(*req), sizeof(*resp));
904 if (IS_ERR(xfer)) {
905 ret = PTR_ERR(xfer);
906 dev_err(dev, "Message alloc failed(%d)\n", ret);
907 return ret;
908 }
909 req = (struct ti_sci_msg_req_set_device_resets *)xfer->xfer_buf;
910 req->id = id;
911 req->resets = reset_state;
912
913 ret = ti_sci_do_xfer(info, xfer);
914 if (ret) {
915 dev_err(dev, "Mbox send fail %d\n", ret);
916 goto fail;
917 }
918
919 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
920
921 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
922
923 fail:
924 ti_sci_put_one_xfer(&info->minfo, xfer);
925
926 return ret;
927 }
928
929
930
931
932
933
934
935
936
937
938 static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle,
939 u32 id, u32 *reset_state)
940 {
941 return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL,
942 NULL);
943 }
944
945
946
947
948
949
950
951
952
953
954
955
956
957 static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
958 u32 dev_id, u32 clk_id,
959 u32 flags, u8 state)
960 {
961 struct ti_sci_info *info;
962 struct ti_sci_msg_req_set_clock_state *req;
963 struct ti_sci_msg_hdr *resp;
964 struct ti_sci_xfer *xfer;
965 struct device *dev;
966 int ret = 0;
967
968 if (IS_ERR(handle))
969 return PTR_ERR(handle);
970 if (!handle)
971 return -EINVAL;
972
973 info = handle_to_ti_sci_info(handle);
974 dev = info->dev;
975
976 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE,
977 flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
978 sizeof(*req), sizeof(*resp));
979 if (IS_ERR(xfer)) {
980 ret = PTR_ERR(xfer);
981 dev_err(dev, "Message alloc failed(%d)\n", ret);
982 return ret;
983 }
984 req = (struct ti_sci_msg_req_set_clock_state *)xfer->xfer_buf;
985 req->dev_id = dev_id;
986 if (clk_id < 255) {
987 req->clk_id = clk_id;
988 } else {
989 req->clk_id = 255;
990 req->clk_id_32 = clk_id;
991 }
992 req->request_state = state;
993
994 ret = ti_sci_do_xfer(info, xfer);
995 if (ret) {
996 dev_err(dev, "Mbox send fail %d\n", ret);
997 goto fail;
998 }
999
1000 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1001
1002 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
1003
1004 fail:
1005 ti_sci_put_one_xfer(&info->minfo, xfer);
1006
1007 return ret;
1008 }
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022 static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
1023 u32 dev_id, u32 clk_id,
1024 u8 *programmed_state, u8 *current_state)
1025 {
1026 struct ti_sci_info *info;
1027 struct ti_sci_msg_req_get_clock_state *req;
1028 struct ti_sci_msg_resp_get_clock_state *resp;
1029 struct ti_sci_xfer *xfer;
1030 struct device *dev;
1031 int ret = 0;
1032
1033 if (IS_ERR(handle))
1034 return PTR_ERR(handle);
1035 if (!handle)
1036 return -EINVAL;
1037
1038 if (!programmed_state && !current_state)
1039 return -EINVAL;
1040
1041 info = handle_to_ti_sci_info(handle);
1042 dev = info->dev;
1043
1044 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE,
1045 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1046 sizeof(*req), sizeof(*resp));
1047 if (IS_ERR(xfer)) {
1048 ret = PTR_ERR(xfer);
1049 dev_err(dev, "Message alloc failed(%d)\n", ret);
1050 return ret;
1051 }
1052 req = (struct ti_sci_msg_req_get_clock_state *)xfer->xfer_buf;
1053 req->dev_id = dev_id;
1054 if (clk_id < 255) {
1055 req->clk_id = clk_id;
1056 } else {
1057 req->clk_id = 255;
1058 req->clk_id_32 = clk_id;
1059 }
1060
1061 ret = ti_sci_do_xfer(info, xfer);
1062 if (ret) {
1063 dev_err(dev, "Mbox send fail %d\n", ret);
1064 goto fail;
1065 }
1066
1067 resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->xfer_buf;
1068
1069 if (!ti_sci_is_response_ack(resp)) {
1070 ret = -ENODEV;
1071 goto fail;
1072 }
1073
1074 if (programmed_state)
1075 *programmed_state = resp->programmed_state;
1076 if (current_state)
1077 *current_state = resp->current_state;
1078
1079 fail:
1080 ti_sci_put_one_xfer(&info->minfo, xfer);
1081
1082 return ret;
1083 }
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098 static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
1099 u32 clk_id, bool needs_ssc,
1100 bool can_change_freq, bool enable_input_term)
1101 {
1102 u32 flags = 0;
1103
1104 flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
1105 flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
1106 flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
1107
1108 return ti_sci_set_clock_state(handle, dev_id, clk_id, flags,
1109 MSG_CLOCK_SW_STATE_REQ);
1110 }
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124 static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
1125 u32 dev_id, u32 clk_id)
1126 {
1127 return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
1128 MSG_CLOCK_SW_STATE_UNREQ);
1129 }
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143 static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
1144 u32 dev_id, u32 clk_id)
1145 {
1146 return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
1147 MSG_CLOCK_SW_STATE_AUTO);
1148 }
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161 static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
1162 u32 dev_id, u32 clk_id, bool *req_state)
1163 {
1164 u8 state = 0;
1165 int ret;
1166
1167 if (!req_state)
1168 return -EINVAL;
1169
1170 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL);
1171 if (ret)
1172 return ret;
1173
1174 *req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
1175 return 0;
1176 }
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190 static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
1191 u32 clk_id, bool *req_state, bool *curr_state)
1192 {
1193 u8 c_state = 0, r_state = 0;
1194 int ret;
1195
1196 if (!req_state && !curr_state)
1197 return -EINVAL;
1198
1199 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1200 &r_state, &c_state);
1201 if (ret)
1202 return ret;
1203
1204 if (req_state)
1205 *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
1206 if (curr_state)
1207 *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
1208 return 0;
1209 }
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223 static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
1224 u32 clk_id, bool *req_state, bool *curr_state)
1225 {
1226 u8 c_state = 0, r_state = 0;
1227 int ret;
1228
1229 if (!req_state && !curr_state)
1230 return -EINVAL;
1231
1232 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1233 &r_state, &c_state);
1234 if (ret)
1235 return ret;
1236
1237 if (req_state)
1238 *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
1239 if (curr_state)
1240 *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
1241 return 0;
1242 }
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255 static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
1256 u32 dev_id, u32 clk_id, u32 parent_id)
1257 {
1258 struct ti_sci_info *info;
1259 struct ti_sci_msg_req_set_clock_parent *req;
1260 struct ti_sci_msg_hdr *resp;
1261 struct ti_sci_xfer *xfer;
1262 struct device *dev;
1263 int ret = 0;
1264
1265 if (IS_ERR(handle))
1266 return PTR_ERR(handle);
1267 if (!handle)
1268 return -EINVAL;
1269
1270 info = handle_to_ti_sci_info(handle);
1271 dev = info->dev;
1272
1273 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT,
1274 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1275 sizeof(*req), sizeof(*resp));
1276 if (IS_ERR(xfer)) {
1277 ret = PTR_ERR(xfer);
1278 dev_err(dev, "Message alloc failed(%d)\n", ret);
1279 return ret;
1280 }
1281 req = (struct ti_sci_msg_req_set_clock_parent *)xfer->xfer_buf;
1282 req->dev_id = dev_id;
1283 if (clk_id < 255) {
1284 req->clk_id = clk_id;
1285 } else {
1286 req->clk_id = 255;
1287 req->clk_id_32 = clk_id;
1288 }
1289 if (parent_id < 255) {
1290 req->parent_id = parent_id;
1291 } else {
1292 req->parent_id = 255;
1293 req->parent_id_32 = parent_id;
1294 }
1295
1296 ret = ti_sci_do_xfer(info, xfer);
1297 if (ret) {
1298 dev_err(dev, "Mbox send fail %d\n", ret);
1299 goto fail;
1300 }
1301
1302 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1303
1304 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
1305
1306 fail:
1307 ti_sci_put_one_xfer(&info->minfo, xfer);
1308
1309 return ret;
1310 }
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323 static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
1324 u32 dev_id, u32 clk_id, u32 *parent_id)
1325 {
1326 struct ti_sci_info *info;
1327 struct ti_sci_msg_req_get_clock_parent *req;
1328 struct ti_sci_msg_resp_get_clock_parent *resp;
1329 struct ti_sci_xfer *xfer;
1330 struct device *dev;
1331 int ret = 0;
1332
1333 if (IS_ERR(handle))
1334 return PTR_ERR(handle);
1335 if (!handle || !parent_id)
1336 return -EINVAL;
1337
1338 info = handle_to_ti_sci_info(handle);
1339 dev = info->dev;
1340
1341 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT,
1342 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1343 sizeof(*req), sizeof(*resp));
1344 if (IS_ERR(xfer)) {
1345 ret = PTR_ERR(xfer);
1346 dev_err(dev, "Message alloc failed(%d)\n", ret);
1347 return ret;
1348 }
1349 req = (struct ti_sci_msg_req_get_clock_parent *)xfer->xfer_buf;
1350 req->dev_id = dev_id;
1351 if (clk_id < 255) {
1352 req->clk_id = clk_id;
1353 } else {
1354 req->clk_id = 255;
1355 req->clk_id_32 = clk_id;
1356 }
1357
1358 ret = ti_sci_do_xfer(info, xfer);
1359 if (ret) {
1360 dev_err(dev, "Mbox send fail %d\n", ret);
1361 goto fail;
1362 }
1363
1364 resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->xfer_buf;
1365
1366 if (!ti_sci_is_response_ack(resp)) {
1367 ret = -ENODEV;
1368 } else {
1369 if (resp->parent_id < 255)
1370 *parent_id = resp->parent_id;
1371 else
1372 *parent_id = resp->parent_id_32;
1373 }
1374
1375 fail:
1376 ti_sci_put_one_xfer(&info->minfo, xfer);
1377
1378 return ret;
1379 }
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392 static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
1393 u32 dev_id, u32 clk_id,
1394 u32 *num_parents)
1395 {
1396 struct ti_sci_info *info;
1397 struct ti_sci_msg_req_get_clock_num_parents *req;
1398 struct ti_sci_msg_resp_get_clock_num_parents *resp;
1399 struct ti_sci_xfer *xfer;
1400 struct device *dev;
1401 int ret = 0;
1402
1403 if (IS_ERR(handle))
1404 return PTR_ERR(handle);
1405 if (!handle || !num_parents)
1406 return -EINVAL;
1407
1408 info = handle_to_ti_sci_info(handle);
1409 dev = info->dev;
1410
1411 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
1412 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1413 sizeof(*req), sizeof(*resp));
1414 if (IS_ERR(xfer)) {
1415 ret = PTR_ERR(xfer);
1416 dev_err(dev, "Message alloc failed(%d)\n", ret);
1417 return ret;
1418 }
1419 req = (struct ti_sci_msg_req_get_clock_num_parents *)xfer->xfer_buf;
1420 req->dev_id = dev_id;
1421 if (clk_id < 255) {
1422 req->clk_id = clk_id;
1423 } else {
1424 req->clk_id = 255;
1425 req->clk_id_32 = clk_id;
1426 }
1427
1428 ret = ti_sci_do_xfer(info, xfer);
1429 if (ret) {
1430 dev_err(dev, "Mbox send fail %d\n", ret);
1431 goto fail;
1432 }
1433
1434 resp = (struct ti_sci_msg_resp_get_clock_num_parents *)xfer->xfer_buf;
1435
1436 if (!ti_sci_is_response_ack(resp)) {
1437 ret = -ENODEV;
1438 } else {
1439 if (resp->num_parents < 255)
1440 *num_parents = resp->num_parents;
1441 else
1442 *num_parents = resp->num_parents_32;
1443 }
1444
1445 fail:
1446 ti_sci_put_one_xfer(&info->minfo, xfer);
1447
1448 return ret;
1449 }
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470 static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
1471 u32 dev_id, u32 clk_id, u64 min_freq,
1472 u64 target_freq, u64 max_freq,
1473 u64 *match_freq)
1474 {
1475 struct ti_sci_info *info;
1476 struct ti_sci_msg_req_query_clock_freq *req;
1477 struct ti_sci_msg_resp_query_clock_freq *resp;
1478 struct ti_sci_xfer *xfer;
1479 struct device *dev;
1480 int ret = 0;
1481
1482 if (IS_ERR(handle))
1483 return PTR_ERR(handle);
1484 if (!handle || !match_freq)
1485 return -EINVAL;
1486
1487 info = handle_to_ti_sci_info(handle);
1488 dev = info->dev;
1489
1490 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ,
1491 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1492 sizeof(*req), sizeof(*resp));
1493 if (IS_ERR(xfer)) {
1494 ret = PTR_ERR(xfer);
1495 dev_err(dev, "Message alloc failed(%d)\n", ret);
1496 return ret;
1497 }
1498 req = (struct ti_sci_msg_req_query_clock_freq *)xfer->xfer_buf;
1499 req->dev_id = dev_id;
1500 if (clk_id < 255) {
1501 req->clk_id = clk_id;
1502 } else {
1503 req->clk_id = 255;
1504 req->clk_id_32 = clk_id;
1505 }
1506 req->min_freq_hz = min_freq;
1507 req->target_freq_hz = target_freq;
1508 req->max_freq_hz = max_freq;
1509
1510 ret = ti_sci_do_xfer(info, xfer);
1511 if (ret) {
1512 dev_err(dev, "Mbox send fail %d\n", ret);
1513 goto fail;
1514 }
1515
1516 resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->xfer_buf;
1517
1518 if (!ti_sci_is_response_ack(resp))
1519 ret = -ENODEV;
1520 else
1521 *match_freq = resp->freq_hz;
1522
1523 fail:
1524 ti_sci_put_one_xfer(&info->minfo, xfer);
1525
1526 return ret;
1527 }
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547 static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
1548 u32 dev_id, u32 clk_id, u64 min_freq,
1549 u64 target_freq, u64 max_freq)
1550 {
1551 struct ti_sci_info *info;
1552 struct ti_sci_msg_req_set_clock_freq *req;
1553 struct ti_sci_msg_hdr *resp;
1554 struct ti_sci_xfer *xfer;
1555 struct device *dev;
1556 int ret = 0;
1557
1558 if (IS_ERR(handle))
1559 return PTR_ERR(handle);
1560 if (!handle)
1561 return -EINVAL;
1562
1563 info = handle_to_ti_sci_info(handle);
1564 dev = info->dev;
1565
1566 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ,
1567 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1568 sizeof(*req), sizeof(*resp));
1569 if (IS_ERR(xfer)) {
1570 ret = PTR_ERR(xfer);
1571 dev_err(dev, "Message alloc failed(%d)\n", ret);
1572 return ret;
1573 }
1574 req = (struct ti_sci_msg_req_set_clock_freq *)xfer->xfer_buf;
1575 req->dev_id = dev_id;
1576 if (clk_id < 255) {
1577 req->clk_id = clk_id;
1578 } else {
1579 req->clk_id = 255;
1580 req->clk_id_32 = clk_id;
1581 }
1582 req->min_freq_hz = min_freq;
1583 req->target_freq_hz = target_freq;
1584 req->max_freq_hz = max_freq;
1585
1586 ret = ti_sci_do_xfer(info, xfer);
1587 if (ret) {
1588 dev_err(dev, "Mbox send fail %d\n", ret);
1589 goto fail;
1590 }
1591
1592 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1593
1594 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
1595
1596 fail:
1597 ti_sci_put_one_xfer(&info->minfo, xfer);
1598
1599 return ret;
1600 }
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613 static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
1614 u32 dev_id, u32 clk_id, u64 *freq)
1615 {
1616 struct ti_sci_info *info;
1617 struct ti_sci_msg_req_get_clock_freq *req;
1618 struct ti_sci_msg_resp_get_clock_freq *resp;
1619 struct ti_sci_xfer *xfer;
1620 struct device *dev;
1621 int ret = 0;
1622
1623 if (IS_ERR(handle))
1624 return PTR_ERR(handle);
1625 if (!handle || !freq)
1626 return -EINVAL;
1627
1628 info = handle_to_ti_sci_info(handle);
1629 dev = info->dev;
1630
1631 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ,
1632 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1633 sizeof(*req), sizeof(*resp));
1634 if (IS_ERR(xfer)) {
1635 ret = PTR_ERR(xfer);
1636 dev_err(dev, "Message alloc failed(%d)\n", ret);
1637 return ret;
1638 }
1639 req = (struct ti_sci_msg_req_get_clock_freq *)xfer->xfer_buf;
1640 req->dev_id = dev_id;
1641 if (clk_id < 255) {
1642 req->clk_id = clk_id;
1643 } else {
1644 req->clk_id = 255;
1645 req->clk_id_32 = clk_id;
1646 }
1647
1648 ret = ti_sci_do_xfer(info, xfer);
1649 if (ret) {
1650 dev_err(dev, "Mbox send fail %d\n", ret);
1651 goto fail;
1652 }
1653
1654 resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->xfer_buf;
1655
1656 if (!ti_sci_is_response_ack(resp))
1657 ret = -ENODEV;
1658 else
1659 *freq = resp->freq_hz;
1660
1661 fail:
1662 ti_sci_put_one_xfer(&info->minfo, xfer);
1663
1664 return ret;
1665 }
1666
1667 static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
1668 {
1669 struct ti_sci_info *info;
1670 struct ti_sci_msg_req_reboot *req;
1671 struct ti_sci_msg_hdr *resp;
1672 struct ti_sci_xfer *xfer;
1673 struct device *dev;
1674 int ret = 0;
1675
1676 if (IS_ERR(handle))
1677 return PTR_ERR(handle);
1678 if (!handle)
1679 return -EINVAL;
1680
1681 info = handle_to_ti_sci_info(handle);
1682 dev = info->dev;
1683
1684 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SYS_RESET,
1685 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1686 sizeof(*req), sizeof(*resp));
1687 if (IS_ERR(xfer)) {
1688 ret = PTR_ERR(xfer);
1689 dev_err(dev, "Message alloc failed(%d)\n", ret);
1690 return ret;
1691 }
1692 req = (struct ti_sci_msg_req_reboot *)xfer->xfer_buf;
1693
1694 ret = ti_sci_do_xfer(info, xfer);
1695 if (ret) {
1696 dev_err(dev, "Mbox send fail %d\n", ret);
1697 goto fail;
1698 }
1699
1700 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1701
1702 if (!ti_sci_is_response_ack(resp))
1703 ret = -ENODEV;
1704 else
1705 ret = 0;
1706
1707 fail:
1708 ti_sci_put_one_xfer(&info->minfo, xfer);
1709
1710 return ret;
1711 }
1712
1713 static int ti_sci_get_resource_type(struct ti_sci_info *info, u16 dev_id,
1714 u16 *type)
1715 {
1716 struct ti_sci_rm_type_map *rm_type_map = info->desc->rm_type_map;
1717 bool found = false;
1718 int i;
1719
1720
1721 if (!rm_type_map) {
1722 *type = dev_id;
1723 return 0;
1724 }
1725
1726 for (i = 0; rm_type_map[i].dev_id; i++) {
1727 if (rm_type_map[i].dev_id == dev_id) {
1728 *type = rm_type_map[i].type;
1729 found = true;
1730 break;
1731 }
1732 }
1733
1734 if (!found)
1735 return -EINVAL;
1736
1737 return 0;
1738 }
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754 static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
1755 u32 dev_id, u8 subtype, u8 s_host,
1756 u16 *range_start, u16 *range_num)
1757 {
1758 struct ti_sci_msg_resp_get_resource_range *resp;
1759 struct ti_sci_msg_req_get_resource_range *req;
1760 struct ti_sci_xfer *xfer;
1761 struct ti_sci_info *info;
1762 struct device *dev;
1763 u16 type;
1764 int ret = 0;
1765
1766 if (IS_ERR(handle))
1767 return PTR_ERR(handle);
1768 if (!handle)
1769 return -EINVAL;
1770
1771 info = handle_to_ti_sci_info(handle);
1772 dev = info->dev;
1773
1774 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE,
1775 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1776 sizeof(*req), sizeof(*resp));
1777 if (IS_ERR(xfer)) {
1778 ret = PTR_ERR(xfer);
1779 dev_err(dev, "Message alloc failed(%d)\n", ret);
1780 return ret;
1781 }
1782
1783 ret = ti_sci_get_resource_type(info, dev_id, &type);
1784 if (ret) {
1785 dev_err(dev, "rm type lookup failed for %u\n", dev_id);
1786 goto fail;
1787 }
1788
1789 req = (struct ti_sci_msg_req_get_resource_range *)xfer->xfer_buf;
1790 req->secondary_host = s_host;
1791 req->type = type & MSG_RM_RESOURCE_TYPE_MASK;
1792 req->subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
1793
1794 ret = ti_sci_do_xfer(info, xfer);
1795 if (ret) {
1796 dev_err(dev, "Mbox send fail %d\n", ret);
1797 goto fail;
1798 }
1799
1800 resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->xfer_buf;
1801
1802 if (!ti_sci_is_response_ack(resp)) {
1803 ret = -ENODEV;
1804 } else if (!resp->range_start && !resp->range_num) {
1805 ret = -ENODEV;
1806 } else {
1807 *range_start = resp->range_start;
1808 *range_num = resp->range_num;
1809 };
1810
1811 fail:
1812 ti_sci_put_one_xfer(&info->minfo, xfer);
1813
1814 return ret;
1815 }
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829 static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
1830 u32 dev_id, u8 subtype,
1831 u16 *range_start, u16 *range_num)
1832 {
1833 return ti_sci_get_resource_range(handle, dev_id, subtype,
1834 TI_SCI_IRQ_SECONDARY_HOST_INVALID,
1835 range_start, range_num);
1836 }
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851 static
1852 int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
1853 u32 dev_id, u8 subtype, u8 s_host,
1854 u16 *range_start, u16 *range_num)
1855 {
1856 return ti_sci_get_resource_range(handle, dev_id, subtype, s_host,
1857 range_start, range_num);
1858 }
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879 static int ti_sci_manage_irq(const struct ti_sci_handle *handle,
1880 u32 valid_params, u16 src_id, u16 src_index,
1881 u16 dst_id, u16 dst_host_irq, u16 ia_id, u16 vint,
1882 u16 global_event, u8 vint_status_bit, u8 s_host,
1883 u16 type)
1884 {
1885 struct ti_sci_msg_req_manage_irq *req;
1886 struct ti_sci_msg_hdr *resp;
1887 struct ti_sci_xfer *xfer;
1888 struct ti_sci_info *info;
1889 struct device *dev;
1890 int ret = 0;
1891
1892 if (IS_ERR(handle))
1893 return PTR_ERR(handle);
1894 if (!handle)
1895 return -EINVAL;
1896
1897 info = handle_to_ti_sci_info(handle);
1898 dev = info->dev;
1899
1900 xfer = ti_sci_get_one_xfer(info, type, TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1901 sizeof(*req), sizeof(*resp));
1902 if (IS_ERR(xfer)) {
1903 ret = PTR_ERR(xfer);
1904 dev_err(dev, "Message alloc failed(%d)\n", ret);
1905 return ret;
1906 }
1907 req = (struct ti_sci_msg_req_manage_irq *)xfer->xfer_buf;
1908 req->valid_params = valid_params;
1909 req->src_id = src_id;
1910 req->src_index = src_index;
1911 req->dst_id = dst_id;
1912 req->dst_host_irq = dst_host_irq;
1913 req->ia_id = ia_id;
1914 req->vint = vint;
1915 req->global_event = global_event;
1916 req->vint_status_bit = vint_status_bit;
1917 req->secondary_host = s_host;
1918
1919 ret = ti_sci_do_xfer(info, xfer);
1920 if (ret) {
1921 dev_err(dev, "Mbox send fail %d\n", ret);
1922 goto fail;
1923 }
1924
1925 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1926
1927 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
1928
1929 fail:
1930 ti_sci_put_one_xfer(&info->minfo, xfer);
1931
1932 return ret;
1933 }
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953 static int ti_sci_set_irq(const struct ti_sci_handle *handle, u32 valid_params,
1954 u16 src_id, u16 src_index, u16 dst_id,
1955 u16 dst_host_irq, u16 ia_id, u16 vint,
1956 u16 global_event, u8 vint_status_bit, u8 s_host)
1957 {
1958 pr_debug("%s: IRQ set with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
1959 __func__, valid_params, src_id, src_index,
1960 dst_id, dst_host_irq, ia_id, vint, global_event,
1961 vint_status_bit);
1962
1963 return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
1964 dst_id, dst_host_irq, ia_id, vint,
1965 global_event, vint_status_bit, s_host,
1966 TI_SCI_MSG_SET_IRQ);
1967 }
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987 static int ti_sci_free_irq(const struct ti_sci_handle *handle, u32 valid_params,
1988 u16 src_id, u16 src_index, u16 dst_id,
1989 u16 dst_host_irq, u16 ia_id, u16 vint,
1990 u16 global_event, u8 vint_status_bit, u8 s_host)
1991 {
1992 pr_debug("%s: IRQ release with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
1993 __func__, valid_params, src_id, src_index,
1994 dst_id, dst_host_irq, ia_id, vint, global_event,
1995 vint_status_bit);
1996
1997 return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
1998 dst_id, dst_host_irq, ia_id, vint,
1999 global_event, vint_status_bit, s_host,
2000 TI_SCI_MSG_FREE_IRQ);
2001 }
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016 static int ti_sci_cmd_set_irq(const struct ti_sci_handle *handle, u16 src_id,
2017 u16 src_index, u16 dst_id, u16 dst_host_irq)
2018 {
2019 u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
2020
2021 return ti_sci_set_irq(handle, valid_params, src_id, src_index, dst_id,
2022 dst_host_irq, 0, 0, 0, 0, 0);
2023 }
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038 static int ti_sci_cmd_set_event_map(const struct ti_sci_handle *handle,
2039 u16 src_id, u16 src_index, u16 ia_id,
2040 u16 vint, u16 global_event,
2041 u8 vint_status_bit)
2042 {
2043 u32 valid_params = MSG_FLAG_IA_ID_VALID | MSG_FLAG_VINT_VALID |
2044 MSG_FLAG_GLB_EVNT_VALID |
2045 MSG_FLAG_VINT_STS_BIT_VALID;
2046
2047 return ti_sci_set_irq(handle, valid_params, src_id, src_index, 0, 0,
2048 ia_id, vint, global_event, vint_status_bit, 0);
2049 }
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064 static int ti_sci_cmd_free_irq(const struct ti_sci_handle *handle, u16 src_id,
2065 u16 src_index, u16 dst_id, u16 dst_host_irq)
2066 {
2067 u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
2068
2069 return ti_sci_free_irq(handle, valid_params, src_id, src_index, dst_id,
2070 dst_host_irq, 0, 0, 0, 0, 0);
2071 }
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086 static int ti_sci_cmd_free_event_map(const struct ti_sci_handle *handle,
2087 u16 src_id, u16 src_index, u16 ia_id,
2088 u16 vint, u16 global_event,
2089 u8 vint_status_bit)
2090 {
2091 u32 valid_params = MSG_FLAG_IA_ID_VALID |
2092 MSG_FLAG_VINT_VALID | MSG_FLAG_GLB_EVNT_VALID |
2093 MSG_FLAG_VINT_STS_BIT_VALID;
2094
2095 return ti_sci_free_irq(handle, valid_params, src_id, src_index, 0, 0,
2096 ia_id, vint, global_event, vint_status_bit, 0);
2097 }
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118 static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle,
2119 u32 valid_params, u16 nav_id, u16 index,
2120 u32 addr_lo, u32 addr_hi, u32 count,
2121 u8 mode, u8 size, u8 order_id)
2122 {
2123 struct ti_sci_msg_rm_ring_cfg_req *req;
2124 struct ti_sci_msg_hdr *resp;
2125 struct ti_sci_xfer *xfer;
2126 struct ti_sci_info *info;
2127 struct device *dev;
2128 int ret = 0;
2129
2130 if (IS_ERR_OR_NULL(handle))
2131 return -EINVAL;
2132
2133 info = handle_to_ti_sci_info(handle);
2134 dev = info->dev;
2135
2136 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_CFG,
2137 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2138 sizeof(*req), sizeof(*resp));
2139 if (IS_ERR(xfer)) {
2140 ret = PTR_ERR(xfer);
2141 dev_err(dev, "RM_RA:Message config failed(%d)\n", ret);
2142 return ret;
2143 }
2144 req = (struct ti_sci_msg_rm_ring_cfg_req *)xfer->xfer_buf;
2145 req->valid_params = valid_params;
2146 req->nav_id = nav_id;
2147 req->index = index;
2148 req->addr_lo = addr_lo;
2149 req->addr_hi = addr_hi;
2150 req->count = count;
2151 req->mode = mode;
2152 req->size = size;
2153 req->order_id = order_id;
2154
2155 ret = ti_sci_do_xfer(info, xfer);
2156 if (ret) {
2157 dev_err(dev, "RM_RA:Mbox config send fail %d\n", ret);
2158 goto fail;
2159 }
2160
2161 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2162 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2163
2164 fail:
2165 ti_sci_put_one_xfer(&info->minfo, xfer);
2166 dev_dbg(dev, "RM_RA:config ring %u ret:%d\n", index, ret);
2167 return ret;
2168 }
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187 static int ti_sci_cmd_ring_get_config(const struct ti_sci_handle *handle,
2188 u32 nav_id, u32 index, u8 *mode,
2189 u32 *addr_lo, u32 *addr_hi,
2190 u32 *count, u8 *size, u8 *order_id)
2191 {
2192 struct ti_sci_msg_rm_ring_get_cfg_resp *resp;
2193 struct ti_sci_msg_rm_ring_get_cfg_req *req;
2194 struct ti_sci_xfer *xfer;
2195 struct ti_sci_info *info;
2196 struct device *dev;
2197 int ret = 0;
2198
2199 if (IS_ERR_OR_NULL(handle))
2200 return -EINVAL;
2201
2202 info = handle_to_ti_sci_info(handle);
2203 dev = info->dev;
2204
2205 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_GET_CFG,
2206 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2207 sizeof(*req), sizeof(*resp));
2208 if (IS_ERR(xfer)) {
2209 ret = PTR_ERR(xfer);
2210 dev_err(dev,
2211 "RM_RA:Message get config failed(%d)\n", ret);
2212 return ret;
2213 }
2214 req = (struct ti_sci_msg_rm_ring_get_cfg_req *)xfer->xfer_buf;
2215 req->nav_id = nav_id;
2216 req->index = index;
2217
2218 ret = ti_sci_do_xfer(info, xfer);
2219 if (ret) {
2220 dev_err(dev, "RM_RA:Mbox get config send fail %d\n", ret);
2221 goto fail;
2222 }
2223
2224 resp = (struct ti_sci_msg_rm_ring_get_cfg_resp *)xfer->xfer_buf;
2225
2226 if (!ti_sci_is_response_ack(resp)) {
2227 ret = -ENODEV;
2228 } else {
2229 if (mode)
2230 *mode = resp->mode;
2231 if (addr_lo)
2232 *addr_lo = resp->addr_lo;
2233 if (addr_hi)
2234 *addr_hi = resp->addr_hi;
2235 if (count)
2236 *count = resp->count;
2237 if (size)
2238 *size = resp->size;
2239 if (order_id)
2240 *order_id = resp->order_id;
2241 };
2242
2243 fail:
2244 ti_sci_put_one_xfer(&info->minfo, xfer);
2245 dev_dbg(dev, "RM_RA:get config ring %u ret:%d\n", index, ret);
2246 return ret;
2247 }
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259 static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle,
2260 u32 nav_id, u32 src_thread, u32 dst_thread)
2261 {
2262 struct ti_sci_msg_psil_pair *req;
2263 struct ti_sci_msg_hdr *resp;
2264 struct ti_sci_xfer *xfer;
2265 struct ti_sci_info *info;
2266 struct device *dev;
2267 int ret = 0;
2268
2269 if (IS_ERR(handle))
2270 return PTR_ERR(handle);
2271 if (!handle)
2272 return -EINVAL;
2273
2274 info = handle_to_ti_sci_info(handle);
2275 dev = info->dev;
2276
2277 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR,
2278 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2279 sizeof(*req), sizeof(*resp));
2280 if (IS_ERR(xfer)) {
2281 ret = PTR_ERR(xfer);
2282 dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
2283 return ret;
2284 }
2285 req = (struct ti_sci_msg_psil_pair *)xfer->xfer_buf;
2286 req->nav_id = nav_id;
2287 req->src_thread = src_thread;
2288 req->dst_thread = dst_thread;
2289
2290 ret = ti_sci_do_xfer(info, xfer);
2291 if (ret) {
2292 dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
2293 goto fail;
2294 }
2295
2296 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2297 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2298
2299 fail:
2300 ti_sci_put_one_xfer(&info->minfo, xfer);
2301
2302 return ret;
2303 }
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315 static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle,
2316 u32 nav_id, u32 src_thread, u32 dst_thread)
2317 {
2318 struct ti_sci_msg_psil_unpair *req;
2319 struct ti_sci_msg_hdr *resp;
2320 struct ti_sci_xfer *xfer;
2321 struct ti_sci_info *info;
2322 struct device *dev;
2323 int ret = 0;
2324
2325 if (IS_ERR(handle))
2326 return PTR_ERR(handle);
2327 if (!handle)
2328 return -EINVAL;
2329
2330 info = handle_to_ti_sci_info(handle);
2331 dev = info->dev;
2332
2333 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR,
2334 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2335 sizeof(*req), sizeof(*resp));
2336 if (IS_ERR(xfer)) {
2337 ret = PTR_ERR(xfer);
2338 dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
2339 return ret;
2340 }
2341 req = (struct ti_sci_msg_psil_unpair *)xfer->xfer_buf;
2342 req->nav_id = nav_id;
2343 req->src_thread = src_thread;
2344 req->dst_thread = dst_thread;
2345
2346 ret = ti_sci_do_xfer(info, xfer);
2347 if (ret) {
2348 dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
2349 goto fail;
2350 }
2351
2352 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2353 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2354
2355 fail:
2356 ti_sci_put_one_xfer(&info->minfo, xfer);
2357
2358 return ret;
2359 }
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372 static int ti_sci_cmd_rm_udmap_tx_ch_cfg(const struct ti_sci_handle *handle,
2373 const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params)
2374 {
2375 struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *req;
2376 struct ti_sci_msg_hdr *resp;
2377 struct ti_sci_xfer *xfer;
2378 struct ti_sci_info *info;
2379 struct device *dev;
2380 int ret = 0;
2381
2382 if (IS_ERR_OR_NULL(handle))
2383 return -EINVAL;
2384
2385 info = handle_to_ti_sci_info(handle);
2386 dev = info->dev;
2387
2388 xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG,
2389 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2390 sizeof(*req), sizeof(*resp));
2391 if (IS_ERR(xfer)) {
2392 ret = PTR_ERR(xfer);
2393 dev_err(dev, "Message TX_CH_CFG alloc failed(%d)\n", ret);
2394 return ret;
2395 }
2396 req = (struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *)xfer->xfer_buf;
2397 req->valid_params = params->valid_params;
2398 req->nav_id = params->nav_id;
2399 req->index = params->index;
2400 req->tx_pause_on_err = params->tx_pause_on_err;
2401 req->tx_filt_einfo = params->tx_filt_einfo;
2402 req->tx_filt_pswords = params->tx_filt_pswords;
2403 req->tx_atype = params->tx_atype;
2404 req->tx_chan_type = params->tx_chan_type;
2405 req->tx_supr_tdpkt = params->tx_supr_tdpkt;
2406 req->tx_fetch_size = params->tx_fetch_size;
2407 req->tx_credit_count = params->tx_credit_count;
2408 req->txcq_qnum = params->txcq_qnum;
2409 req->tx_priority = params->tx_priority;
2410 req->tx_qos = params->tx_qos;
2411 req->tx_orderid = params->tx_orderid;
2412 req->fdepth = params->fdepth;
2413 req->tx_sched_priority = params->tx_sched_priority;
2414 req->tx_burst_size = params->tx_burst_size;
2415
2416 ret = ti_sci_do_xfer(info, xfer);
2417 if (ret) {
2418 dev_err(dev, "Mbox send TX_CH_CFG fail %d\n", ret);
2419 goto fail;
2420 }
2421
2422 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2423 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2424
2425 fail:
2426 ti_sci_put_one_xfer(&info->minfo, xfer);
2427 dev_dbg(dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret);
2428 return ret;
2429 }
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442 static int ti_sci_cmd_rm_udmap_rx_ch_cfg(const struct ti_sci_handle *handle,
2443 const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params)
2444 {
2445 struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *req;
2446 struct ti_sci_msg_hdr *resp;
2447 struct ti_sci_xfer *xfer;
2448 struct ti_sci_info *info;
2449 struct device *dev;
2450 int ret = 0;
2451
2452 if (IS_ERR_OR_NULL(handle))
2453 return -EINVAL;
2454
2455 info = handle_to_ti_sci_info(handle);
2456 dev = info->dev;
2457
2458 xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG,
2459 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2460 sizeof(*req), sizeof(*resp));
2461 if (IS_ERR(xfer)) {
2462 ret = PTR_ERR(xfer);
2463 dev_err(dev, "Message RX_CH_CFG alloc failed(%d)\n", ret);
2464 return ret;
2465 }
2466 req = (struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *)xfer->xfer_buf;
2467 req->valid_params = params->valid_params;
2468 req->nav_id = params->nav_id;
2469 req->index = params->index;
2470 req->rx_fetch_size = params->rx_fetch_size;
2471 req->rxcq_qnum = params->rxcq_qnum;
2472 req->rx_priority = params->rx_priority;
2473 req->rx_qos = params->rx_qos;
2474 req->rx_orderid = params->rx_orderid;
2475 req->rx_sched_priority = params->rx_sched_priority;
2476 req->flowid_start = params->flowid_start;
2477 req->flowid_cnt = params->flowid_cnt;
2478 req->rx_pause_on_err = params->rx_pause_on_err;
2479 req->rx_atype = params->rx_atype;
2480 req->rx_chan_type = params->rx_chan_type;
2481 req->rx_ignore_short = params->rx_ignore_short;
2482 req->rx_ignore_long = params->rx_ignore_long;
2483 req->rx_burst_size = params->rx_burst_size;
2484
2485 ret = ti_sci_do_xfer(info, xfer);
2486 if (ret) {
2487 dev_err(dev, "Mbox send RX_CH_CFG fail %d\n", ret);
2488 goto fail;
2489 }
2490
2491 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2492 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2493
2494 fail:
2495 ti_sci_put_one_xfer(&info->minfo, xfer);
2496 dev_dbg(dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret);
2497 return ret;
2498 }
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511 static int ti_sci_cmd_rm_udmap_rx_flow_cfg(const struct ti_sci_handle *handle,
2512 const struct ti_sci_msg_rm_udmap_flow_cfg *params)
2513 {
2514 struct ti_sci_msg_rm_udmap_flow_cfg_req *req;
2515 struct ti_sci_msg_hdr *resp;
2516 struct ti_sci_xfer *xfer;
2517 struct ti_sci_info *info;
2518 struct device *dev;
2519 int ret = 0;
2520
2521 if (IS_ERR_OR_NULL(handle))
2522 return -EINVAL;
2523
2524 info = handle_to_ti_sci_info(handle);
2525 dev = info->dev;
2526
2527 xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG,
2528 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2529 sizeof(*req), sizeof(*resp));
2530 if (IS_ERR(xfer)) {
2531 ret = PTR_ERR(xfer);
2532 dev_err(dev, "RX_FL_CFG: Message alloc failed(%d)\n", ret);
2533 return ret;
2534 }
2535 req = (struct ti_sci_msg_rm_udmap_flow_cfg_req *)xfer->xfer_buf;
2536 req->valid_params = params->valid_params;
2537 req->nav_id = params->nav_id;
2538 req->flow_index = params->flow_index;
2539 req->rx_einfo_present = params->rx_einfo_present;
2540 req->rx_psinfo_present = params->rx_psinfo_present;
2541 req->rx_error_handling = params->rx_error_handling;
2542 req->rx_desc_type = params->rx_desc_type;
2543 req->rx_sop_offset = params->rx_sop_offset;
2544 req->rx_dest_qnum = params->rx_dest_qnum;
2545 req->rx_src_tag_hi = params->rx_src_tag_hi;
2546 req->rx_src_tag_lo = params->rx_src_tag_lo;
2547 req->rx_dest_tag_hi = params->rx_dest_tag_hi;
2548 req->rx_dest_tag_lo = params->rx_dest_tag_lo;
2549 req->rx_src_tag_hi_sel = params->rx_src_tag_hi_sel;
2550 req->rx_src_tag_lo_sel = params->rx_src_tag_lo_sel;
2551 req->rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel;
2552 req->rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel;
2553 req->rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum;
2554 req->rx_fdq1_qnum = params->rx_fdq1_qnum;
2555 req->rx_fdq2_qnum = params->rx_fdq2_qnum;
2556 req->rx_fdq3_qnum = params->rx_fdq3_qnum;
2557 req->rx_ps_location = params->rx_ps_location;
2558
2559 ret = ti_sci_do_xfer(info, xfer);
2560 if (ret) {
2561 dev_err(dev, "RX_FL_CFG: Mbox send fail %d\n", ret);
2562 goto fail;
2563 }
2564
2565 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2566 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2567
2568 fail:
2569 ti_sci_put_one_xfer(&info->minfo, xfer);
2570 dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret);
2571 return ret;
2572 }
2573
2574
2575
2576
2577
2578
2579
2580
2581 static int ti_sci_cmd_proc_request(const struct ti_sci_handle *handle,
2582 u8 proc_id)
2583 {
2584 struct ti_sci_msg_req_proc_request *req;
2585 struct ti_sci_msg_hdr *resp;
2586 struct ti_sci_info *info;
2587 struct ti_sci_xfer *xfer;
2588 struct device *dev;
2589 int ret = 0;
2590
2591 if (!handle)
2592 return -EINVAL;
2593 if (IS_ERR(handle))
2594 return PTR_ERR(handle);
2595
2596 info = handle_to_ti_sci_info(handle);
2597 dev = info->dev;
2598
2599 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_REQUEST,
2600 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2601 sizeof(*req), sizeof(*resp));
2602 if (IS_ERR(xfer)) {
2603 ret = PTR_ERR(xfer);
2604 dev_err(dev, "Message alloc failed(%d)\n", ret);
2605 return ret;
2606 }
2607 req = (struct ti_sci_msg_req_proc_request *)xfer->xfer_buf;
2608 req->processor_id = proc_id;
2609
2610 ret = ti_sci_do_xfer(info, xfer);
2611 if (ret) {
2612 dev_err(dev, "Mbox send fail %d\n", ret);
2613 goto fail;
2614 }
2615
2616 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2617
2618 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2619
2620 fail:
2621 ti_sci_put_one_xfer(&info->minfo, xfer);
2622
2623 return ret;
2624 }
2625
2626
2627
2628
2629
2630
2631
2632
2633 static int ti_sci_cmd_proc_release(const struct ti_sci_handle *handle,
2634 u8 proc_id)
2635 {
2636 struct ti_sci_msg_req_proc_release *req;
2637 struct ti_sci_msg_hdr *resp;
2638 struct ti_sci_info *info;
2639 struct ti_sci_xfer *xfer;
2640 struct device *dev;
2641 int ret = 0;
2642
2643 if (!handle)
2644 return -EINVAL;
2645 if (IS_ERR(handle))
2646 return PTR_ERR(handle);
2647
2648 info = handle_to_ti_sci_info(handle);
2649 dev = info->dev;
2650
2651 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_RELEASE,
2652 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2653 sizeof(*req), sizeof(*resp));
2654 if (IS_ERR(xfer)) {
2655 ret = PTR_ERR(xfer);
2656 dev_err(dev, "Message alloc failed(%d)\n", ret);
2657 return ret;
2658 }
2659 req = (struct ti_sci_msg_req_proc_release *)xfer->xfer_buf;
2660 req->processor_id = proc_id;
2661
2662 ret = ti_sci_do_xfer(info, xfer);
2663 if (ret) {
2664 dev_err(dev, "Mbox send fail %d\n", ret);
2665 goto fail;
2666 }
2667
2668 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2669
2670 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2671
2672 fail:
2673 ti_sci_put_one_xfer(&info->minfo, xfer);
2674
2675 return ret;
2676 }
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688 static int ti_sci_cmd_proc_handover(const struct ti_sci_handle *handle,
2689 u8 proc_id, u8 host_id)
2690 {
2691 struct ti_sci_msg_req_proc_handover *req;
2692 struct ti_sci_msg_hdr *resp;
2693 struct ti_sci_info *info;
2694 struct ti_sci_xfer *xfer;
2695 struct device *dev;
2696 int ret = 0;
2697
2698 if (!handle)
2699 return -EINVAL;
2700 if (IS_ERR(handle))
2701 return PTR_ERR(handle);
2702
2703 info = handle_to_ti_sci_info(handle);
2704 dev = info->dev;
2705
2706 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_HANDOVER,
2707 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2708 sizeof(*req), sizeof(*resp));
2709 if (IS_ERR(xfer)) {
2710 ret = PTR_ERR(xfer);
2711 dev_err(dev, "Message alloc failed(%d)\n", ret);
2712 return ret;
2713 }
2714 req = (struct ti_sci_msg_req_proc_handover *)xfer->xfer_buf;
2715 req->processor_id = proc_id;
2716 req->host_id = host_id;
2717
2718 ret = ti_sci_do_xfer(info, xfer);
2719 if (ret) {
2720 dev_err(dev, "Mbox send fail %d\n", ret);
2721 goto fail;
2722 }
2723
2724 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2725
2726 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2727
2728 fail:
2729 ti_sci_put_one_xfer(&info->minfo, xfer);
2730
2731 return ret;
2732 }
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744 static int ti_sci_cmd_proc_set_config(const struct ti_sci_handle *handle,
2745 u8 proc_id, u64 bootvector,
2746 u32 config_flags_set,
2747 u32 config_flags_clear)
2748 {
2749 struct ti_sci_msg_req_set_config *req;
2750 struct ti_sci_msg_hdr *resp;
2751 struct ti_sci_info *info;
2752 struct ti_sci_xfer *xfer;
2753 struct device *dev;
2754 int ret = 0;
2755
2756 if (!handle)
2757 return -EINVAL;
2758 if (IS_ERR(handle))
2759 return PTR_ERR(handle);
2760
2761 info = handle_to_ti_sci_info(handle);
2762 dev = info->dev;
2763
2764 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CONFIG,
2765 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2766 sizeof(*req), sizeof(*resp));
2767 if (IS_ERR(xfer)) {
2768 ret = PTR_ERR(xfer);
2769 dev_err(dev, "Message alloc failed(%d)\n", ret);
2770 return ret;
2771 }
2772 req = (struct ti_sci_msg_req_set_config *)xfer->xfer_buf;
2773 req->processor_id = proc_id;
2774 req->bootvector_low = bootvector & TI_SCI_ADDR_LOW_MASK;
2775 req->bootvector_high = (bootvector & TI_SCI_ADDR_HIGH_MASK) >>
2776 TI_SCI_ADDR_HIGH_SHIFT;
2777 req->config_flags_set = config_flags_set;
2778 req->config_flags_clear = config_flags_clear;
2779
2780 ret = ti_sci_do_xfer(info, xfer);
2781 if (ret) {
2782 dev_err(dev, "Mbox send fail %d\n", ret);
2783 goto fail;
2784 }
2785
2786 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2787
2788 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2789
2790 fail:
2791 ti_sci_put_one_xfer(&info->minfo, xfer);
2792
2793 return ret;
2794 }
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806 static int ti_sci_cmd_proc_set_control(const struct ti_sci_handle *handle,
2807 u8 proc_id, u32 control_flags_set,
2808 u32 control_flags_clear)
2809 {
2810 struct ti_sci_msg_req_set_ctrl *req;
2811 struct ti_sci_msg_hdr *resp;
2812 struct ti_sci_info *info;
2813 struct ti_sci_xfer *xfer;
2814 struct device *dev;
2815 int ret = 0;
2816
2817 if (!handle)
2818 return -EINVAL;
2819 if (IS_ERR(handle))
2820 return PTR_ERR(handle);
2821
2822 info = handle_to_ti_sci_info(handle);
2823 dev = info->dev;
2824
2825 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CTRL,
2826 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2827 sizeof(*req), sizeof(*resp));
2828 if (IS_ERR(xfer)) {
2829 ret = PTR_ERR(xfer);
2830 dev_err(dev, "Message alloc failed(%d)\n", ret);
2831 return ret;
2832 }
2833 req = (struct ti_sci_msg_req_set_ctrl *)xfer->xfer_buf;
2834 req->processor_id = proc_id;
2835 req->control_flags_set = control_flags_set;
2836 req->control_flags_clear = control_flags_clear;
2837
2838 ret = ti_sci_do_xfer(info, xfer);
2839 if (ret) {
2840 dev_err(dev, "Mbox send fail %d\n", ret);
2841 goto fail;
2842 }
2843
2844 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2845
2846 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2847
2848 fail:
2849 ti_sci_put_one_xfer(&info->minfo, xfer);
2850
2851 return ret;
2852 }
2853
2854
2855
2856
2857
2858
2859
2860
2861 static int ti_sci_cmd_proc_get_status(const struct ti_sci_handle *handle,
2862 u8 proc_id, u64 *bv, u32 *cfg_flags,
2863 u32 *ctrl_flags, u32 *sts_flags)
2864 {
2865 struct ti_sci_msg_resp_get_status *resp;
2866 struct ti_sci_msg_req_get_status *req;
2867 struct ti_sci_info *info;
2868 struct ti_sci_xfer *xfer;
2869 struct device *dev;
2870 int ret = 0;
2871
2872 if (!handle)
2873 return -EINVAL;
2874 if (IS_ERR(handle))
2875 return PTR_ERR(handle);
2876
2877 info = handle_to_ti_sci_info(handle);
2878 dev = info->dev;
2879
2880 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_STATUS,
2881 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2882 sizeof(*req), sizeof(*resp));
2883 if (IS_ERR(xfer)) {
2884 ret = PTR_ERR(xfer);
2885 dev_err(dev, "Message alloc failed(%d)\n", ret);
2886 return ret;
2887 }
2888 req = (struct ti_sci_msg_req_get_status *)xfer->xfer_buf;
2889 req->processor_id = proc_id;
2890
2891 ret = ti_sci_do_xfer(info, xfer);
2892 if (ret) {
2893 dev_err(dev, "Mbox send fail %d\n", ret);
2894 goto fail;
2895 }
2896
2897 resp = (struct ti_sci_msg_resp_get_status *)xfer->tx_message.buf;
2898
2899 if (!ti_sci_is_response_ack(resp)) {
2900 ret = -ENODEV;
2901 } else {
2902 *bv = (resp->bootvector_low & TI_SCI_ADDR_LOW_MASK) |
2903 (((u64)resp->bootvector_high << TI_SCI_ADDR_HIGH_SHIFT) &
2904 TI_SCI_ADDR_HIGH_MASK);
2905 *cfg_flags = resp->config_flags;
2906 *ctrl_flags = resp->control_flags;
2907 *sts_flags = resp->status_flags;
2908 }
2909
2910 fail:
2911 ti_sci_put_one_xfer(&info->minfo, xfer);
2912
2913 return ret;
2914 }
2915
2916
2917
2918
2919
2920 static void ti_sci_setup_ops(struct ti_sci_info *info)
2921 {
2922 struct ti_sci_ops *ops = &info->handle.ops;
2923 struct ti_sci_core_ops *core_ops = &ops->core_ops;
2924 struct ti_sci_dev_ops *dops = &ops->dev_ops;
2925 struct ti_sci_clk_ops *cops = &ops->clk_ops;
2926 struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
2927 struct ti_sci_rm_irq_ops *iops = &ops->rm_irq_ops;
2928 struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops;
2929 struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops;
2930 struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops;
2931 struct ti_sci_proc_ops *pops = &ops->proc_ops;
2932
2933 core_ops->reboot_device = ti_sci_cmd_core_reboot;
2934
2935 dops->get_device = ti_sci_cmd_get_device;
2936 dops->get_device_exclusive = ti_sci_cmd_get_device_exclusive;
2937 dops->idle_device = ti_sci_cmd_idle_device;
2938 dops->idle_device_exclusive = ti_sci_cmd_idle_device_exclusive;
2939 dops->put_device = ti_sci_cmd_put_device;
2940
2941 dops->is_valid = ti_sci_cmd_dev_is_valid;
2942 dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt;
2943 dops->is_idle = ti_sci_cmd_dev_is_idle;
2944 dops->is_stop = ti_sci_cmd_dev_is_stop;
2945 dops->is_on = ti_sci_cmd_dev_is_on;
2946 dops->is_transitioning = ti_sci_cmd_dev_is_trans;
2947 dops->set_device_resets = ti_sci_cmd_set_device_resets;
2948 dops->get_device_resets = ti_sci_cmd_get_device_resets;
2949
2950 cops->get_clock = ti_sci_cmd_get_clock;
2951 cops->idle_clock = ti_sci_cmd_idle_clock;
2952 cops->put_clock = ti_sci_cmd_put_clock;
2953 cops->is_auto = ti_sci_cmd_clk_is_auto;
2954 cops->is_on = ti_sci_cmd_clk_is_on;
2955 cops->is_off = ti_sci_cmd_clk_is_off;
2956
2957 cops->set_parent = ti_sci_cmd_clk_set_parent;
2958 cops->get_parent = ti_sci_cmd_clk_get_parent;
2959 cops->get_num_parents = ti_sci_cmd_clk_get_num_parents;
2960
2961 cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
2962 cops->set_freq = ti_sci_cmd_clk_set_freq;
2963 cops->get_freq = ti_sci_cmd_clk_get_freq;
2964
2965 rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
2966 rm_core_ops->get_range_from_shost =
2967 ti_sci_cmd_get_resource_range_from_shost;
2968
2969 iops->set_irq = ti_sci_cmd_set_irq;
2970 iops->set_event_map = ti_sci_cmd_set_event_map;
2971 iops->free_irq = ti_sci_cmd_free_irq;
2972 iops->free_event_map = ti_sci_cmd_free_event_map;
2973
2974 rops->config = ti_sci_cmd_ring_config;
2975 rops->get_config = ti_sci_cmd_ring_get_config;
2976
2977 psilops->pair = ti_sci_cmd_rm_psil_pair;
2978 psilops->unpair = ti_sci_cmd_rm_psil_unpair;
2979
2980 udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
2981 udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
2982 udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
2983
2984 pops->request = ti_sci_cmd_proc_request;
2985 pops->release = ti_sci_cmd_proc_release;
2986 pops->handover = ti_sci_cmd_proc_handover;
2987 pops->set_config = ti_sci_cmd_proc_set_config;
2988 pops->set_control = ti_sci_cmd_proc_set_control;
2989 pops->get_status = ti_sci_cmd_proc_get_status;
2990 }
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004 const struct ti_sci_handle *ti_sci_get_handle(struct device *dev)
3005 {
3006 struct device_node *ti_sci_np;
3007 struct list_head *p;
3008 struct ti_sci_handle *handle = NULL;
3009 struct ti_sci_info *info;
3010
3011 if (!dev) {
3012 pr_err("I need a device pointer\n");
3013 return ERR_PTR(-EINVAL);
3014 }
3015 ti_sci_np = of_get_parent(dev->of_node);
3016 if (!ti_sci_np) {
3017 dev_err(dev, "No OF information\n");
3018 return ERR_PTR(-EINVAL);
3019 }
3020
3021 mutex_lock(&ti_sci_list_mutex);
3022 list_for_each(p, &ti_sci_list) {
3023 info = list_entry(p, struct ti_sci_info, node);
3024 if (ti_sci_np == info->dev->of_node) {
3025 handle = &info->handle;
3026 info->users++;
3027 break;
3028 }
3029 }
3030 mutex_unlock(&ti_sci_list_mutex);
3031 of_node_put(ti_sci_np);
3032
3033 if (!handle)
3034 return ERR_PTR(-EPROBE_DEFER);
3035
3036 return handle;
3037 }
3038 EXPORT_SYMBOL_GPL(ti_sci_get_handle);
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052 int ti_sci_put_handle(const struct ti_sci_handle *handle)
3053 {
3054 struct ti_sci_info *info;
3055
3056 if (IS_ERR(handle))
3057 return PTR_ERR(handle);
3058 if (!handle)
3059 return -EINVAL;
3060
3061 info = handle_to_ti_sci_info(handle);
3062 mutex_lock(&ti_sci_list_mutex);
3063 if (!WARN_ON(!info->users))
3064 info->users--;
3065 mutex_unlock(&ti_sci_list_mutex);
3066
3067 return 0;
3068 }
3069 EXPORT_SYMBOL_GPL(ti_sci_put_handle);
3070
3071 static void devm_ti_sci_release(struct device *dev, void *res)
3072 {
3073 const struct ti_sci_handle **ptr = res;
3074 const struct ti_sci_handle *handle = *ptr;
3075 int ret;
3076
3077 ret = ti_sci_put_handle(handle);
3078 if (ret)
3079 dev_err(dev, "failed to put handle %d\n", ret);
3080 }
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093 const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev)
3094 {
3095 const struct ti_sci_handle **ptr;
3096 const struct ti_sci_handle *handle;
3097
3098 ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
3099 if (!ptr)
3100 return ERR_PTR(-ENOMEM);
3101 handle = ti_sci_get_handle(dev);
3102
3103 if (!IS_ERR(handle)) {
3104 *ptr = handle;
3105 devres_add(dev, ptr);
3106 } else {
3107 devres_free(ptr);
3108 }
3109
3110 return handle;
3111 }
3112 EXPORT_SYMBOL_GPL(devm_ti_sci_get_handle);
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127 const struct ti_sci_handle *ti_sci_get_by_phandle(struct device_node *np,
3128 const char *property)
3129 {
3130 struct ti_sci_handle *handle = NULL;
3131 struct device_node *ti_sci_np;
3132 struct ti_sci_info *info;
3133 struct list_head *p;
3134
3135 if (!np) {
3136 pr_err("I need a device pointer\n");
3137 return ERR_PTR(-EINVAL);
3138 }
3139
3140 ti_sci_np = of_parse_phandle(np, property, 0);
3141 if (!ti_sci_np)
3142 return ERR_PTR(-ENODEV);
3143
3144 mutex_lock(&ti_sci_list_mutex);
3145 list_for_each(p, &ti_sci_list) {
3146 info = list_entry(p, struct ti_sci_info, node);
3147 if (ti_sci_np == info->dev->of_node) {
3148 handle = &info->handle;
3149 info->users++;
3150 break;
3151 }
3152 }
3153 mutex_unlock(&ti_sci_list_mutex);
3154 of_node_put(ti_sci_np);
3155
3156 if (!handle)
3157 return ERR_PTR(-EPROBE_DEFER);
3158
3159 return handle;
3160 }
3161 EXPORT_SYMBOL_GPL(ti_sci_get_by_phandle);
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175 const struct ti_sci_handle *devm_ti_sci_get_by_phandle(struct device *dev,
3176 const char *property)
3177 {
3178 const struct ti_sci_handle *handle;
3179 const struct ti_sci_handle **ptr;
3180
3181 ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
3182 if (!ptr)
3183 return ERR_PTR(-ENOMEM);
3184 handle = ti_sci_get_by_phandle(dev_of_node(dev), property);
3185
3186 if (!IS_ERR(handle)) {
3187 *ptr = handle;
3188 devres_add(dev, ptr);
3189 } else {
3190 devres_free(ptr);
3191 }
3192
3193 return handle;
3194 }
3195 EXPORT_SYMBOL_GPL(devm_ti_sci_get_by_phandle);
3196
3197
3198
3199
3200
3201
3202
3203 u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
3204 {
3205 unsigned long flags;
3206 u16 set, free_bit;
3207
3208 raw_spin_lock_irqsave(&res->lock, flags);
3209 for (set = 0; set < res->sets; set++) {
3210 free_bit = find_first_zero_bit(res->desc[set].res_map,
3211 res->desc[set].num);
3212 if (free_bit != res->desc[set].num) {
3213 set_bit(free_bit, res->desc[set].res_map);
3214 raw_spin_unlock_irqrestore(&res->lock, flags);
3215 return res->desc[set].start + free_bit;
3216 }
3217 }
3218 raw_spin_unlock_irqrestore(&res->lock, flags);
3219
3220 return TI_SCI_RESOURCE_NULL;
3221 }
3222 EXPORT_SYMBOL_GPL(ti_sci_get_free_resource);
3223
3224
3225
3226
3227
3228
3229 void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
3230 {
3231 unsigned long flags;
3232 u16 set;
3233
3234 raw_spin_lock_irqsave(&res->lock, flags);
3235 for (set = 0; set < res->sets; set++) {
3236 if (res->desc[set].start <= id &&
3237 (res->desc[set].num + res->desc[set].start) > id)
3238 clear_bit(id - res->desc[set].start,
3239 res->desc[set].res_map);
3240 }
3241 raw_spin_unlock_irqrestore(&res->lock, flags);
3242 }
3243 EXPORT_SYMBOL_GPL(ti_sci_release_resource);
3244
3245
3246
3247
3248
3249
3250
3251 u32 ti_sci_get_num_resources(struct ti_sci_resource *res)
3252 {
3253 u32 set, count = 0;
3254
3255 for (set = 0; set < res->sets; set++)
3256 count += res->desc[set].num;
3257
3258 return count;
3259 }
3260 EXPORT_SYMBOL_GPL(ti_sci_get_num_resources);
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272 struct ti_sci_resource *
3273 devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
3274 struct device *dev, u32 dev_id, char *of_prop)
3275 {
3276 struct ti_sci_resource *res;
3277 bool valid_set = false;
3278 u32 resource_subtype;
3279 int i, ret;
3280
3281 res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
3282 if (!res)
3283 return ERR_PTR(-ENOMEM);
3284
3285 ret = of_property_count_elems_of_size(dev_of_node(dev), of_prop,
3286 sizeof(u32));
3287 if (ret < 0) {
3288 dev_err(dev, "%s resource type ids not available\n", of_prop);
3289 return ERR_PTR(ret);
3290 }
3291 res->sets = ret;
3292
3293 res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc),
3294 GFP_KERNEL);
3295 if (!res->desc)
3296 return ERR_PTR(-ENOMEM);
3297
3298 for (i = 0; i < res->sets; i++) {
3299 ret = of_property_read_u32_index(dev_of_node(dev), of_prop, i,
3300 &resource_subtype);
3301 if (ret)
3302 return ERR_PTR(-EINVAL);
3303
3304 ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
3305 resource_subtype,
3306 &res->desc[i].start,
3307 &res->desc[i].num);
3308 if (ret) {
3309 dev_dbg(dev, "dev = %d subtype %d not allocated for this host\n",
3310 dev_id, resource_subtype);
3311 res->desc[i].start = 0;
3312 res->desc[i].num = 0;
3313 continue;
3314 }
3315
3316 dev_dbg(dev, "dev = %d, subtype = %d, start = %d, num = %d\n",
3317 dev_id, resource_subtype, res->desc[i].start,
3318 res->desc[i].num);
3319
3320 valid_set = true;
3321 res->desc[i].res_map =
3322 devm_kzalloc(dev, BITS_TO_LONGS(res->desc[i].num) *
3323 sizeof(*res->desc[i].res_map), GFP_KERNEL);
3324 if (!res->desc[i].res_map)
3325 return ERR_PTR(-ENOMEM);
3326 }
3327 raw_spin_lock_init(&res->lock);
3328
3329 if (valid_set)
3330 return res;
3331
3332 return ERR_PTR(-EINVAL);
3333 }
3334
3335 static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode,
3336 void *cmd)
3337 {
3338 struct ti_sci_info *info = reboot_to_ti_sci_info(nb);
3339 const struct ti_sci_handle *handle = &info->handle;
3340
3341 ti_sci_cmd_core_reboot(handle);
3342
3343
3344 return NOTIFY_BAD;
3345 }
3346
3347
3348 static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
3349 .default_host_id = 2,
3350
3351 .max_rx_timeout_ms = 1000,
3352
3353 .max_msgs = 20,
3354 .max_msg_size = 64,
3355 .rm_type_map = NULL,
3356 };
3357
3358 static struct ti_sci_rm_type_map ti_sci_am654_rm_type_map[] = {
3359 {.dev_id = 56, .type = 0x00b},
3360 {.dev_id = 179, .type = 0x000},
3361 {.dev_id = 187, .type = 0x009},
3362 {.dev_id = 188, .type = 0x006},
3363 {.dev_id = 194, .type = 0x007},
3364 {.dev_id = 195, .type = 0x00a},
3365 {.dev_id = 0, .type = 0x000},
3366 };
3367
3368
3369 static const struct ti_sci_desc ti_sci_pmmc_am654_desc = {
3370 .default_host_id = 12,
3371
3372 .max_rx_timeout_ms = 10000,
3373
3374 .max_msgs = 20,
3375 .max_msg_size = 60,
3376 .rm_type_map = ti_sci_am654_rm_type_map,
3377 };
3378
3379 static const struct of_device_id ti_sci_of_match[] = {
3380 {.compatible = "ti,k2g-sci", .data = &ti_sci_pmmc_k2g_desc},
3381 {.compatible = "ti,am654-sci", .data = &ti_sci_pmmc_am654_desc},
3382 { },
3383 };
3384 MODULE_DEVICE_TABLE(of, ti_sci_of_match);
3385
3386 static int ti_sci_probe(struct platform_device *pdev)
3387 {
3388 struct device *dev = &pdev->dev;
3389 const struct of_device_id *of_id;
3390 const struct ti_sci_desc *desc;
3391 struct ti_sci_xfer *xfer;
3392 struct ti_sci_info *info = NULL;
3393 struct ti_sci_xfers_info *minfo;
3394 struct mbox_client *cl;
3395 int ret = -EINVAL;
3396 int i;
3397 int reboot = 0;
3398 u32 h_id;
3399
3400 of_id = of_match_device(ti_sci_of_match, dev);
3401 if (!of_id) {
3402 dev_err(dev, "OF data missing\n");
3403 return -EINVAL;
3404 }
3405 desc = of_id->data;
3406
3407 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
3408 if (!info)
3409 return -ENOMEM;
3410
3411 info->dev = dev;
3412 info->desc = desc;
3413 ret = of_property_read_u32(dev->of_node, "ti,host-id", &h_id);
3414
3415 if (ret < 0) {
3416 info->host_id = info->desc->default_host_id;
3417 } else {
3418 if (!h_id) {
3419 dev_warn(dev, "Host ID 0 is reserved for firmware\n");
3420 info->host_id = info->desc->default_host_id;
3421 } else {
3422 info->host_id = h_id;
3423 }
3424 }
3425
3426 reboot = of_property_read_bool(dev->of_node,
3427 "ti,system-reboot-controller");
3428 INIT_LIST_HEAD(&info->node);
3429 minfo = &info->minfo;
3430
3431
3432
3433
3434
3435
3436 if (WARN_ON(desc->max_msgs >=
3437 1 << 8 * sizeof(((struct ti_sci_msg_hdr *)0)->seq)))
3438 return -EINVAL;
3439
3440 minfo->xfer_block = devm_kcalloc(dev,
3441 desc->max_msgs,
3442 sizeof(*minfo->xfer_block),
3443 GFP_KERNEL);
3444 if (!minfo->xfer_block)
3445 return -ENOMEM;
3446
3447 minfo->xfer_alloc_table = devm_kcalloc(dev,
3448 BITS_TO_LONGS(desc->max_msgs),
3449 sizeof(unsigned long),
3450 GFP_KERNEL);
3451 if (!minfo->xfer_alloc_table)
3452 return -ENOMEM;
3453 bitmap_zero(minfo->xfer_alloc_table, desc->max_msgs);
3454
3455
3456 for (i = 0, xfer = minfo->xfer_block; i < desc->max_msgs; i++, xfer++) {
3457 xfer->xfer_buf = devm_kcalloc(dev, 1, desc->max_msg_size,
3458 GFP_KERNEL);
3459 if (!xfer->xfer_buf)
3460 return -ENOMEM;
3461
3462 xfer->tx_message.buf = xfer->xfer_buf;
3463 init_completion(&xfer->done);
3464 }
3465
3466 ret = ti_sci_debugfs_create(pdev, info);
3467 if (ret)
3468 dev_warn(dev, "Failed to create debug file\n");
3469
3470 platform_set_drvdata(pdev, info);
3471
3472 cl = &info->cl;
3473 cl->dev = dev;
3474 cl->tx_block = false;
3475 cl->rx_callback = ti_sci_rx_callback;
3476 cl->knows_txdone = true;
3477
3478 spin_lock_init(&minfo->xfer_lock);
3479 sema_init(&minfo->sem_xfer_count, desc->max_msgs);
3480
3481 info->chan_rx = mbox_request_channel_byname(cl, "rx");
3482 if (IS_ERR(info->chan_rx)) {
3483 ret = PTR_ERR(info->chan_rx);
3484 goto out;
3485 }
3486
3487 info->chan_tx = mbox_request_channel_byname(cl, "tx");
3488 if (IS_ERR(info->chan_tx)) {
3489 ret = PTR_ERR(info->chan_tx);
3490 goto out;
3491 }
3492 ret = ti_sci_cmd_get_revision(info);
3493 if (ret) {
3494 dev_err(dev, "Unable to communicate with TISCI(%d)\n", ret);
3495 goto out;
3496 }
3497
3498 ti_sci_setup_ops(info);
3499
3500 if (reboot) {
3501 info->nb.notifier_call = tisci_reboot_handler;
3502 info->nb.priority = 128;
3503
3504 ret = register_restart_handler(&info->nb);
3505 if (ret) {
3506 dev_err(dev, "reboot registration fail(%d)\n", ret);
3507 return ret;
3508 }
3509 }
3510
3511 dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n",
3512 info->handle.version.abi_major, info->handle.version.abi_minor,
3513 info->handle.version.firmware_revision,
3514 info->handle.version.firmware_description);
3515
3516 mutex_lock(&ti_sci_list_mutex);
3517 list_add_tail(&info->node, &ti_sci_list);
3518 mutex_unlock(&ti_sci_list_mutex);
3519
3520 return of_platform_populate(dev->of_node, NULL, NULL, dev);
3521 out:
3522 if (!IS_ERR(info->chan_tx))
3523 mbox_free_channel(info->chan_tx);
3524 if (!IS_ERR(info->chan_rx))
3525 mbox_free_channel(info->chan_rx);
3526 debugfs_remove(info->d);
3527 return ret;
3528 }
3529
3530 static int ti_sci_remove(struct platform_device *pdev)
3531 {
3532 struct ti_sci_info *info;
3533 struct device *dev = &pdev->dev;
3534 int ret = 0;
3535
3536 of_platform_depopulate(dev);
3537
3538 info = platform_get_drvdata(pdev);
3539
3540 if (info->nb.notifier_call)
3541 unregister_restart_handler(&info->nb);
3542
3543 mutex_lock(&ti_sci_list_mutex);
3544 if (info->users)
3545 ret = -EBUSY;
3546 else
3547 list_del(&info->node);
3548 mutex_unlock(&ti_sci_list_mutex);
3549
3550 if (!ret) {
3551 ti_sci_debugfs_destroy(pdev, info);
3552
3553
3554 mbox_free_channel(info->chan_tx);
3555 mbox_free_channel(info->chan_rx);
3556 }
3557
3558 return ret;
3559 }
3560
3561 static struct platform_driver ti_sci_driver = {
3562 .probe = ti_sci_probe,
3563 .remove = ti_sci_remove,
3564 .driver = {
3565 .name = "ti-sci",
3566 .of_match_table = of_match_ptr(ti_sci_of_match),
3567 },
3568 };
3569 module_platform_driver(ti_sci_driver);
3570
3571 MODULE_LICENSE("GPL v2");
3572 MODULE_DESCRIPTION("TI System Control Interface(SCI) driver");
3573 MODULE_AUTHOR("Nishanth Menon");
3574 MODULE_ALIAS("platform:ti-sci");