This source file includes following definitions.
- am33xx_txev_eoi
- am33xx_txev_enable
- wkup_m3_ctrl_ipc_write
- wkup_m3_ctrl_ipc_read
- wkup_m3_fw_version_read
- wkup_m3_txev_handler
- wkup_m3_ping
- wkup_m3_ping_noirq
- wkup_m3_is_available
- wkup_m3_set_mem_type
- wkup_m3_set_resume_address
- wkup_m3_request_pm_status
- wkup_m3_prepare_low_power
- wkup_m3_finish_low_power
- wkup_m3_request_wake_src
- wkup_m3_set_rtc_only
- wkup_m3_ipc_get
- wkup_m3_ipc_put
- wkup_m3_rproc_boot_thread
- wkup_m3_ipc_probe
- wkup_m3_ipc_remove
- wkup_m3_ipc_suspend
- wkup_m3_ipc_resume
1
2
3
4
5
6
7
8
9
10 #include <linux/err.h>
11 #include <linux/kernel.h>
12 #include <linux/kthread.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/module.h>
16 #include <linux/of.h>
17 #include <linux/omap-mailbox.h>
18 #include <linux/platform_device.h>
19 #include <linux/remoteproc.h>
20 #include <linux/suspend.h>
21 #include <linux/wkup_m3_ipc.h>
22
23 #define AM33XX_CTRL_IPC_REG_COUNT 0x8
24 #define AM33XX_CTRL_IPC_REG_OFFSET(m) (0x4 + 4 * (m))
25
26
27 #define AM33XX_CONTROL_M3_TXEV_EOI 0x00
28
29 #define AM33XX_M3_TXEV_ACK (0x1 << 0)
30 #define AM33XX_M3_TXEV_ENABLE (0x0 << 0)
31
32 #define IPC_CMD_DS0 0x4
33 #define IPC_CMD_STANDBY 0xc
34 #define IPC_CMD_IDLE 0x10
35 #define IPC_CMD_RESET 0xe
36 #define DS_IPC_DEFAULT 0xffffffff
37 #define M3_VERSION_UNKNOWN 0x0000ffff
38 #define M3_BASELINE_VERSION 0x191
39 #define M3_STATUS_RESP_MASK (0xffff << 16)
40 #define M3_FW_VERSION_MASK 0xffff
41 #define M3_WAKE_SRC_MASK 0xff
42
43 #define M3_STATE_UNKNOWN 0
44 #define M3_STATE_RESET 1
45 #define M3_STATE_INITED 2
46 #define M3_STATE_MSG_FOR_LP 3
47 #define M3_STATE_MSG_FOR_RESET 4
48
49 static struct wkup_m3_ipc *m3_ipc_state;
50
51 static const struct wkup_m3_wakeup_src wakeups[] = {
52 {.irq_nr = 16, .src = "PRCM"},
53 {.irq_nr = 35, .src = "USB0_PHY"},
54 {.irq_nr = 36, .src = "USB1_PHY"},
55 {.irq_nr = 40, .src = "I2C0"},
56 {.irq_nr = 41, .src = "RTC Timer"},
57 {.irq_nr = 42, .src = "RTC Alarm"},
58 {.irq_nr = 43, .src = "Timer0"},
59 {.irq_nr = 44, .src = "Timer1"},
60 {.irq_nr = 45, .src = "UART"},
61 {.irq_nr = 46, .src = "GPIO0"},
62 {.irq_nr = 48, .src = "MPU_WAKE"},
63 {.irq_nr = 49, .src = "WDT0"},
64 {.irq_nr = 50, .src = "WDT1"},
65 {.irq_nr = 51, .src = "ADC_TSC"},
66 {.irq_nr = 0, .src = "Unknown"},
67 };
68
69 static void am33xx_txev_eoi(struct wkup_m3_ipc *m3_ipc)
70 {
71 writel(AM33XX_M3_TXEV_ACK,
72 m3_ipc->ipc_mem_base + AM33XX_CONTROL_M3_TXEV_EOI);
73 }
74
75 static void am33xx_txev_enable(struct wkup_m3_ipc *m3_ipc)
76 {
77 writel(AM33XX_M3_TXEV_ENABLE,
78 m3_ipc->ipc_mem_base + AM33XX_CONTROL_M3_TXEV_EOI);
79 }
80
81 static void wkup_m3_ctrl_ipc_write(struct wkup_m3_ipc *m3_ipc,
82 u32 val, int ipc_reg_num)
83 {
84 if (WARN(ipc_reg_num < 0 || ipc_reg_num > AM33XX_CTRL_IPC_REG_COUNT,
85 "ipc register operation out of range"))
86 return;
87
88 writel(val, m3_ipc->ipc_mem_base +
89 AM33XX_CTRL_IPC_REG_OFFSET(ipc_reg_num));
90 }
91
92 static unsigned int wkup_m3_ctrl_ipc_read(struct wkup_m3_ipc *m3_ipc,
93 int ipc_reg_num)
94 {
95 if (WARN(ipc_reg_num < 0 || ipc_reg_num > AM33XX_CTRL_IPC_REG_COUNT,
96 "ipc register operation out of range"))
97 return 0;
98
99 return readl(m3_ipc->ipc_mem_base +
100 AM33XX_CTRL_IPC_REG_OFFSET(ipc_reg_num));
101 }
102
103 static int wkup_m3_fw_version_read(struct wkup_m3_ipc *m3_ipc)
104 {
105 int val;
106
107 val = wkup_m3_ctrl_ipc_read(m3_ipc, 2);
108
109 return val & M3_FW_VERSION_MASK;
110 }
111
112 static irqreturn_t wkup_m3_txev_handler(int irq, void *ipc_data)
113 {
114 struct wkup_m3_ipc *m3_ipc = ipc_data;
115 struct device *dev = m3_ipc->dev;
116 int ver = 0;
117
118 am33xx_txev_eoi(m3_ipc);
119
120 switch (m3_ipc->state) {
121 case M3_STATE_RESET:
122 ver = wkup_m3_fw_version_read(m3_ipc);
123
124 if (ver == M3_VERSION_UNKNOWN ||
125 ver < M3_BASELINE_VERSION) {
126 dev_warn(dev, "CM3 Firmware Version %x not supported\n",
127 ver);
128 } else {
129 dev_info(dev, "CM3 Firmware Version = 0x%x\n", ver);
130 }
131
132 m3_ipc->state = M3_STATE_INITED;
133 complete(&m3_ipc->sync_complete);
134 break;
135 case M3_STATE_MSG_FOR_RESET:
136 m3_ipc->state = M3_STATE_INITED;
137 complete(&m3_ipc->sync_complete);
138 break;
139 case M3_STATE_MSG_FOR_LP:
140 complete(&m3_ipc->sync_complete);
141 break;
142 case M3_STATE_UNKNOWN:
143 dev_warn(dev, "Unknown CM3 State\n");
144 }
145
146 am33xx_txev_enable(m3_ipc);
147
148 return IRQ_HANDLED;
149 }
150
151 static int wkup_m3_ping(struct wkup_m3_ipc *m3_ipc)
152 {
153 struct device *dev = m3_ipc->dev;
154 mbox_msg_t dummy_msg = 0;
155 int ret;
156
157 if (!m3_ipc->mbox) {
158 dev_err(dev,
159 "No IPC channel to communicate with wkup_m3!\n");
160 return -EIO;
161 }
162
163
164
165
166
167
168
169
170 ret = mbox_send_message(m3_ipc->mbox, &dummy_msg);
171 if (ret < 0) {
172 dev_err(dev, "%s: mbox_send_message() failed: %d\n",
173 __func__, ret);
174 return ret;
175 }
176
177 ret = wait_for_completion_timeout(&m3_ipc->sync_complete,
178 msecs_to_jiffies(500));
179 if (!ret) {
180 dev_err(dev, "MPU<->CM3 sync failure\n");
181 m3_ipc->state = M3_STATE_UNKNOWN;
182 return -EIO;
183 }
184
185 mbox_client_txdone(m3_ipc->mbox, 0);
186 return 0;
187 }
188
189 static int wkup_m3_ping_noirq(struct wkup_m3_ipc *m3_ipc)
190 {
191 struct device *dev = m3_ipc->dev;
192 mbox_msg_t dummy_msg = 0;
193 int ret;
194
195 if (!m3_ipc->mbox) {
196 dev_err(dev,
197 "No IPC channel to communicate with wkup_m3!\n");
198 return -EIO;
199 }
200
201 ret = mbox_send_message(m3_ipc->mbox, &dummy_msg);
202 if (ret < 0) {
203 dev_err(dev, "%s: mbox_send_message() failed: %d\n",
204 __func__, ret);
205 return ret;
206 }
207
208 mbox_client_txdone(m3_ipc->mbox, 0);
209 return 0;
210 }
211
212 static int wkup_m3_is_available(struct wkup_m3_ipc *m3_ipc)
213 {
214 return ((m3_ipc->state != M3_STATE_RESET) &&
215 (m3_ipc->state != M3_STATE_UNKNOWN));
216 }
217
218
219
220
221
222
223
224
225
226 static void wkup_m3_set_mem_type(struct wkup_m3_ipc *m3_ipc, int mem_type)
227 {
228 m3_ipc->mem_type = mem_type;
229 }
230
231
232
233
234
235 static void wkup_m3_set_resume_address(struct wkup_m3_ipc *m3_ipc, void *addr)
236 {
237 m3_ipc->resume_addr = (unsigned long)addr;
238 }
239
240
241
242
243
244
245
246
247 static int wkup_m3_request_pm_status(struct wkup_m3_ipc *m3_ipc)
248 {
249 unsigned int i;
250 int val;
251
252 val = wkup_m3_ctrl_ipc_read(m3_ipc, 1);
253
254 i = M3_STATUS_RESP_MASK & val;
255 i >>= __ffs(M3_STATUS_RESP_MASK);
256
257 return i;
258 }
259
260
261
262
263
264
265
266
267 static int wkup_m3_prepare_low_power(struct wkup_m3_ipc *m3_ipc, int state)
268 {
269 struct device *dev = m3_ipc->dev;
270 int m3_power_state;
271 int ret = 0;
272
273 if (!wkup_m3_is_available(m3_ipc))
274 return -ENODEV;
275
276 switch (state) {
277 case WKUP_M3_DEEPSLEEP:
278 m3_power_state = IPC_CMD_DS0;
279 break;
280 case WKUP_M3_STANDBY:
281 m3_power_state = IPC_CMD_STANDBY;
282 break;
283 case WKUP_M3_IDLE:
284 m3_power_state = IPC_CMD_IDLE;
285 break;
286 default:
287 return 1;
288 }
289
290
291 wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->resume_addr, 0);
292 wkup_m3_ctrl_ipc_write(m3_ipc, m3_power_state, 1);
293 wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->mem_type, 4);
294
295 wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 2);
296 wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 3);
297 wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 5);
298 wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 6);
299 wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 7);
300
301 m3_ipc->state = M3_STATE_MSG_FOR_LP;
302
303 if (state == WKUP_M3_IDLE)
304 ret = wkup_m3_ping_noirq(m3_ipc);
305 else
306 ret = wkup_m3_ping(m3_ipc);
307
308 if (ret) {
309 dev_err(dev, "Unable to ping CM3\n");
310 return ret;
311 }
312
313 return 0;
314 }
315
316
317
318
319
320
321 static int wkup_m3_finish_low_power(struct wkup_m3_ipc *m3_ipc)
322 {
323 struct device *dev = m3_ipc->dev;
324 int ret = 0;
325
326 if (!wkup_m3_is_available(m3_ipc))
327 return -ENODEV;
328
329 wkup_m3_ctrl_ipc_write(m3_ipc, IPC_CMD_RESET, 1);
330 wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 2);
331
332 m3_ipc->state = M3_STATE_MSG_FOR_RESET;
333
334 ret = wkup_m3_ping(m3_ipc);
335 if (ret) {
336 dev_err(dev, "Unable to ping CM3\n");
337 return ret;
338 }
339
340 return 0;
341 }
342
343
344
345
346
347 static const char *wkup_m3_request_wake_src(struct wkup_m3_ipc *m3_ipc)
348 {
349 unsigned int wakeup_src_idx;
350 int j, val;
351
352 val = wkup_m3_ctrl_ipc_read(m3_ipc, 6);
353
354 wakeup_src_idx = val & M3_WAKE_SRC_MASK;
355
356 for (j = 0; j < ARRAY_SIZE(wakeups) - 1; j++) {
357 if (wakeups[j].irq_nr == wakeup_src_idx)
358 return wakeups[j].src;
359 }
360 return wakeups[j].src;
361 }
362
363
364
365
366
367
368 static void wkup_m3_set_rtc_only(struct wkup_m3_ipc *m3_ipc)
369 {
370 if (m3_ipc_state)
371 m3_ipc_state->is_rtc_only = true;
372 }
373
374 static struct wkup_m3_ipc_ops ipc_ops = {
375 .set_mem_type = wkup_m3_set_mem_type,
376 .set_resume_address = wkup_m3_set_resume_address,
377 .prepare_low_power = wkup_m3_prepare_low_power,
378 .finish_low_power = wkup_m3_finish_low_power,
379 .request_pm_status = wkup_m3_request_pm_status,
380 .request_wake_src = wkup_m3_request_wake_src,
381 .set_rtc_only = wkup_m3_set_rtc_only,
382 };
383
384
385
386
387
388
389
390 struct wkup_m3_ipc *wkup_m3_ipc_get(void)
391 {
392 if (m3_ipc_state)
393 get_device(m3_ipc_state->dev);
394 else
395 return NULL;
396
397 return m3_ipc_state;
398 }
399 EXPORT_SYMBOL_GPL(wkup_m3_ipc_get);
400
401
402
403
404
405 void wkup_m3_ipc_put(struct wkup_m3_ipc *m3_ipc)
406 {
407 if (m3_ipc_state)
408 put_device(m3_ipc_state->dev);
409 }
410 EXPORT_SYMBOL_GPL(wkup_m3_ipc_put);
411
412 static void wkup_m3_rproc_boot_thread(struct wkup_m3_ipc *m3_ipc)
413 {
414 struct device *dev = m3_ipc->dev;
415 int ret;
416
417 init_completion(&m3_ipc->sync_complete);
418
419 ret = rproc_boot(m3_ipc->rproc);
420 if (ret)
421 dev_err(dev, "rproc_boot failed\n");
422 else
423 m3_ipc_state = m3_ipc;
424
425 do_exit(0);
426 }
427
428 static int wkup_m3_ipc_probe(struct platform_device *pdev)
429 {
430 struct device *dev = &pdev->dev;
431 int irq, ret;
432 phandle rproc_phandle;
433 struct rproc *m3_rproc;
434 struct resource *res;
435 struct task_struct *task;
436 struct wkup_m3_ipc *m3_ipc;
437
438 m3_ipc = devm_kzalloc(dev, sizeof(*m3_ipc), GFP_KERNEL);
439 if (!m3_ipc)
440 return -ENOMEM;
441
442 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
443 m3_ipc->ipc_mem_base = devm_ioremap_resource(dev, res);
444 if (IS_ERR(m3_ipc->ipc_mem_base)) {
445 dev_err(dev, "could not ioremap ipc_mem\n");
446 return PTR_ERR(m3_ipc->ipc_mem_base);
447 }
448
449 irq = platform_get_irq(pdev, 0);
450 if (!irq) {
451 dev_err(&pdev->dev, "no irq resource\n");
452 return -ENXIO;
453 }
454
455 ret = devm_request_irq(dev, irq, wkup_m3_txev_handler,
456 0, "wkup_m3_txev", m3_ipc);
457 if (ret) {
458 dev_err(dev, "request_irq failed\n");
459 return ret;
460 }
461
462 m3_ipc->mbox_client.dev = dev;
463 m3_ipc->mbox_client.tx_done = NULL;
464 m3_ipc->mbox_client.tx_prepare = NULL;
465 m3_ipc->mbox_client.rx_callback = NULL;
466 m3_ipc->mbox_client.tx_block = false;
467 m3_ipc->mbox_client.knows_txdone = false;
468
469 m3_ipc->mbox = mbox_request_channel(&m3_ipc->mbox_client, 0);
470
471 if (IS_ERR(m3_ipc->mbox)) {
472 dev_err(dev, "IPC Request for A8->M3 Channel failed! %ld\n",
473 PTR_ERR(m3_ipc->mbox));
474 return PTR_ERR(m3_ipc->mbox);
475 }
476
477 if (of_property_read_u32(dev->of_node, "ti,rproc", &rproc_phandle)) {
478 dev_err(&pdev->dev, "could not get rproc phandle\n");
479 ret = -ENODEV;
480 goto err_free_mbox;
481 }
482
483 m3_rproc = rproc_get_by_phandle(rproc_phandle);
484 if (!m3_rproc) {
485 dev_err(&pdev->dev, "could not get rproc handle\n");
486 ret = -EPROBE_DEFER;
487 goto err_free_mbox;
488 }
489
490 m3_ipc->rproc = m3_rproc;
491 m3_ipc->dev = dev;
492 m3_ipc->state = M3_STATE_RESET;
493
494 m3_ipc->ops = &ipc_ops;
495
496
497
498
499
500
501 task = kthread_run((void *)wkup_m3_rproc_boot_thread, m3_ipc,
502 "wkup_m3_rproc_loader");
503
504 if (IS_ERR(task)) {
505 dev_err(dev, "can't create rproc_boot thread\n");
506 ret = PTR_ERR(task);
507 goto err_put_rproc;
508 }
509
510 return 0;
511
512 err_put_rproc:
513 rproc_put(m3_rproc);
514 err_free_mbox:
515 mbox_free_channel(m3_ipc->mbox);
516 return ret;
517 }
518
519 static int wkup_m3_ipc_remove(struct platform_device *pdev)
520 {
521 mbox_free_channel(m3_ipc_state->mbox);
522
523 rproc_shutdown(m3_ipc_state->rproc);
524 rproc_put(m3_ipc_state->rproc);
525
526 m3_ipc_state = NULL;
527
528 return 0;
529 }
530
531 static int __maybe_unused wkup_m3_ipc_suspend(struct device *dev)
532 {
533
534
535
536 return 0;
537 }
538
539 static int __maybe_unused wkup_m3_ipc_resume(struct device *dev)
540 {
541 if (m3_ipc_state->is_rtc_only) {
542 rproc_shutdown(m3_ipc_state->rproc);
543 rproc_boot(m3_ipc_state->rproc);
544 }
545
546 m3_ipc_state->is_rtc_only = false;
547
548 return 0;
549 }
550
551 static const struct dev_pm_ops wkup_m3_ipc_pm_ops = {
552 SET_SYSTEM_SLEEP_PM_OPS(wkup_m3_ipc_suspend, wkup_m3_ipc_resume)
553 };
554
555 static const struct of_device_id wkup_m3_ipc_of_match[] = {
556 { .compatible = "ti,am3352-wkup-m3-ipc", },
557 { .compatible = "ti,am4372-wkup-m3-ipc", },
558 {},
559 };
560 MODULE_DEVICE_TABLE(of, wkup_m3_ipc_of_match);
561
562 static struct platform_driver wkup_m3_ipc_driver = {
563 .probe = wkup_m3_ipc_probe,
564 .remove = wkup_m3_ipc_remove,
565 .driver = {
566 .name = "wkup_m3_ipc",
567 .of_match_table = wkup_m3_ipc_of_match,
568 .pm = &wkup_m3_ipc_pm_ops,
569 },
570 };
571
572 module_platform_driver(wkup_m3_ipc_driver);
573
574 MODULE_LICENSE("GPL v2");
575 MODULE_DESCRIPTION("wkup m3 remote processor ipc driver");
576 MODULE_AUTHOR("Dave Gerlach <d-gerlach@ti.com>");