m3_ipc 57 drivers/soc/ti/pm33xx.c static struct wkup_m3_ipc *m3_ipc; m3_ipc 186 drivers/soc/ti/pm33xx.c m3_ipc->ops->set_rtc_only(m3_ipc); m3_ipc 197 drivers/soc/ti/pm33xx.c i = m3_ipc->ops->request_pm_status(m3_ipc); m3_ipc 221 drivers/soc/ti/pm33xx.c m3_ipc->ops->request_wake_src(m3_ipc)); m3_ipc 265 drivers/soc/ti/pm33xx.c ret = m3_ipc->ops->prepare_low_power(m3_ipc, WKUP_M3_DEEPSLEEP); m3_ipc 268 drivers/soc/ti/pm33xx.c ret = m3_ipc->ops->prepare_low_power(m3_ipc, WKUP_M3_STANDBY); m3_ipc 284 drivers/soc/ti/pm33xx.c m3_ipc->ops->finish_low_power(m3_ipc); m3_ipc 335 drivers/soc/ti/pm33xx.c m3_ipc->ops->set_mem_type(m3_ipc, temp); m3_ipc 341 drivers/soc/ti/pm33xx.c m3_ipc->ops->set_resume_address(m3_ipc, (void *)resume_address); m3_ipc 469 drivers/soc/ti/pm33xx.c m3_ipc = wkup_m3_ipc_get(); m3_ipc 470 drivers/soc/ti/pm33xx.c if (!m3_ipc) { m3_ipc 516 drivers/soc/ti/pm33xx.c wkup_m3_ipc_put(m3_ipc); m3_ipc 526 drivers/soc/ti/pm33xx.c wkup_m3_ipc_put(m3_ipc); m3_ipc 69 drivers/soc/ti/wkup_m3_ipc.c static void am33xx_txev_eoi(struct wkup_m3_ipc *m3_ipc) m3_ipc 72 drivers/soc/ti/wkup_m3_ipc.c m3_ipc->ipc_mem_base + AM33XX_CONTROL_M3_TXEV_EOI); m3_ipc 75 drivers/soc/ti/wkup_m3_ipc.c static void am33xx_txev_enable(struct wkup_m3_ipc *m3_ipc) m3_ipc 78 drivers/soc/ti/wkup_m3_ipc.c m3_ipc->ipc_mem_base + AM33XX_CONTROL_M3_TXEV_EOI); m3_ipc 81 drivers/soc/ti/wkup_m3_ipc.c static void wkup_m3_ctrl_ipc_write(struct wkup_m3_ipc *m3_ipc, m3_ipc 88 drivers/soc/ti/wkup_m3_ipc.c writel(val, m3_ipc->ipc_mem_base + m3_ipc 92 drivers/soc/ti/wkup_m3_ipc.c static unsigned int wkup_m3_ctrl_ipc_read(struct wkup_m3_ipc *m3_ipc, m3_ipc 99 drivers/soc/ti/wkup_m3_ipc.c return readl(m3_ipc->ipc_mem_base + m3_ipc 103 drivers/soc/ti/wkup_m3_ipc.c static int wkup_m3_fw_version_read(struct wkup_m3_ipc *m3_ipc) m3_ipc 107 drivers/soc/ti/wkup_m3_ipc.c val = wkup_m3_ctrl_ipc_read(m3_ipc, 2); m3_ipc 114 drivers/soc/ti/wkup_m3_ipc.c struct wkup_m3_ipc *m3_ipc = ipc_data; m3_ipc 115 drivers/soc/ti/wkup_m3_ipc.c struct device *dev = m3_ipc->dev; m3_ipc 118 drivers/soc/ti/wkup_m3_ipc.c am33xx_txev_eoi(m3_ipc); m3_ipc 120 drivers/soc/ti/wkup_m3_ipc.c switch (m3_ipc->state) { m3_ipc 122 drivers/soc/ti/wkup_m3_ipc.c ver = wkup_m3_fw_version_read(m3_ipc); m3_ipc 132 drivers/soc/ti/wkup_m3_ipc.c m3_ipc->state = M3_STATE_INITED; m3_ipc 133 drivers/soc/ti/wkup_m3_ipc.c complete(&m3_ipc->sync_complete); m3_ipc 136 drivers/soc/ti/wkup_m3_ipc.c m3_ipc->state = M3_STATE_INITED; m3_ipc 137 drivers/soc/ti/wkup_m3_ipc.c complete(&m3_ipc->sync_complete); m3_ipc 140 drivers/soc/ti/wkup_m3_ipc.c complete(&m3_ipc->sync_complete); m3_ipc 146 drivers/soc/ti/wkup_m3_ipc.c am33xx_txev_enable(m3_ipc); m3_ipc 151 drivers/soc/ti/wkup_m3_ipc.c static int wkup_m3_ping(struct wkup_m3_ipc *m3_ipc) m3_ipc 153 drivers/soc/ti/wkup_m3_ipc.c struct device *dev = m3_ipc->dev; m3_ipc 157 drivers/soc/ti/wkup_m3_ipc.c if (!m3_ipc->mbox) { m3_ipc 170 drivers/soc/ti/wkup_m3_ipc.c ret = mbox_send_message(m3_ipc->mbox, &dummy_msg); m3_ipc 177 drivers/soc/ti/wkup_m3_ipc.c ret = wait_for_completion_timeout(&m3_ipc->sync_complete, m3_ipc 181 drivers/soc/ti/wkup_m3_ipc.c m3_ipc->state = M3_STATE_UNKNOWN; m3_ipc 185 drivers/soc/ti/wkup_m3_ipc.c mbox_client_txdone(m3_ipc->mbox, 0); m3_ipc 189 drivers/soc/ti/wkup_m3_ipc.c static int wkup_m3_ping_noirq(struct wkup_m3_ipc *m3_ipc) m3_ipc 191 drivers/soc/ti/wkup_m3_ipc.c struct device *dev = m3_ipc->dev; m3_ipc 195 drivers/soc/ti/wkup_m3_ipc.c if (!m3_ipc->mbox) { m3_ipc 201 drivers/soc/ti/wkup_m3_ipc.c ret = mbox_send_message(m3_ipc->mbox, &dummy_msg); m3_ipc 208 drivers/soc/ti/wkup_m3_ipc.c mbox_client_txdone(m3_ipc->mbox, 0); m3_ipc 212 drivers/soc/ti/wkup_m3_ipc.c static int wkup_m3_is_available(struct wkup_m3_ipc *m3_ipc) m3_ipc 214 drivers/soc/ti/wkup_m3_ipc.c return ((m3_ipc->state != M3_STATE_RESET) && m3_ipc 215 drivers/soc/ti/wkup_m3_ipc.c (m3_ipc->state != M3_STATE_UNKNOWN)); m3_ipc 226 drivers/soc/ti/wkup_m3_ipc.c static void wkup_m3_set_mem_type(struct wkup_m3_ipc *m3_ipc, int mem_type) m3_ipc 228 drivers/soc/ti/wkup_m3_ipc.c m3_ipc->mem_type = mem_type; m3_ipc 235 drivers/soc/ti/wkup_m3_ipc.c static void wkup_m3_set_resume_address(struct wkup_m3_ipc *m3_ipc, void *addr) m3_ipc 237 drivers/soc/ti/wkup_m3_ipc.c m3_ipc->resume_addr = (unsigned long)addr; m3_ipc 247 drivers/soc/ti/wkup_m3_ipc.c static int wkup_m3_request_pm_status(struct wkup_m3_ipc *m3_ipc) m3_ipc 252 drivers/soc/ti/wkup_m3_ipc.c val = wkup_m3_ctrl_ipc_read(m3_ipc, 1); m3_ipc 267 drivers/soc/ti/wkup_m3_ipc.c static int wkup_m3_prepare_low_power(struct wkup_m3_ipc *m3_ipc, int state) m3_ipc 269 drivers/soc/ti/wkup_m3_ipc.c struct device *dev = m3_ipc->dev; m3_ipc 273 drivers/soc/ti/wkup_m3_ipc.c if (!wkup_m3_is_available(m3_ipc)) m3_ipc 291 drivers/soc/ti/wkup_m3_ipc.c wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->resume_addr, 0); m3_ipc 292 drivers/soc/ti/wkup_m3_ipc.c wkup_m3_ctrl_ipc_write(m3_ipc, m3_power_state, 1); m3_ipc 293 drivers/soc/ti/wkup_m3_ipc.c wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->mem_type, 4); m3_ipc 295 drivers/soc/ti/wkup_m3_ipc.c wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 2); m3_ipc 296 drivers/soc/ti/wkup_m3_ipc.c wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 3); m3_ipc 297 drivers/soc/ti/wkup_m3_ipc.c wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 5); m3_ipc 298 drivers/soc/ti/wkup_m3_ipc.c wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 6); m3_ipc 299 drivers/soc/ti/wkup_m3_ipc.c wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 7); m3_ipc 301 drivers/soc/ti/wkup_m3_ipc.c m3_ipc->state = M3_STATE_MSG_FOR_LP; m3_ipc 304 drivers/soc/ti/wkup_m3_ipc.c ret = wkup_m3_ping_noirq(m3_ipc); m3_ipc 306 drivers/soc/ti/wkup_m3_ipc.c ret = wkup_m3_ping(m3_ipc); m3_ipc 321 drivers/soc/ti/wkup_m3_ipc.c static int wkup_m3_finish_low_power(struct wkup_m3_ipc *m3_ipc) m3_ipc 323 drivers/soc/ti/wkup_m3_ipc.c struct device *dev = m3_ipc->dev; m3_ipc 326 drivers/soc/ti/wkup_m3_ipc.c if (!wkup_m3_is_available(m3_ipc)) m3_ipc 329 drivers/soc/ti/wkup_m3_ipc.c wkup_m3_ctrl_ipc_write(m3_ipc, IPC_CMD_RESET, 1); m3_ipc 330 drivers/soc/ti/wkup_m3_ipc.c wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 2); m3_ipc 332 drivers/soc/ti/wkup_m3_ipc.c m3_ipc->state = M3_STATE_MSG_FOR_RESET; m3_ipc 334 drivers/soc/ti/wkup_m3_ipc.c ret = wkup_m3_ping(m3_ipc); m3_ipc 347 drivers/soc/ti/wkup_m3_ipc.c static const char *wkup_m3_request_wake_src(struct wkup_m3_ipc *m3_ipc) m3_ipc 352 drivers/soc/ti/wkup_m3_ipc.c val = wkup_m3_ctrl_ipc_read(m3_ipc, 6); m3_ipc 368 drivers/soc/ti/wkup_m3_ipc.c static void wkup_m3_set_rtc_only(struct wkup_m3_ipc *m3_ipc) m3_ipc 405 drivers/soc/ti/wkup_m3_ipc.c void wkup_m3_ipc_put(struct wkup_m3_ipc *m3_ipc) m3_ipc 412 drivers/soc/ti/wkup_m3_ipc.c static void wkup_m3_rproc_boot_thread(struct wkup_m3_ipc *m3_ipc) m3_ipc 414 drivers/soc/ti/wkup_m3_ipc.c struct device *dev = m3_ipc->dev; m3_ipc 417 drivers/soc/ti/wkup_m3_ipc.c init_completion(&m3_ipc->sync_complete); m3_ipc 419 drivers/soc/ti/wkup_m3_ipc.c ret = rproc_boot(m3_ipc->rproc); m3_ipc 423 drivers/soc/ti/wkup_m3_ipc.c m3_ipc_state = m3_ipc; m3_ipc 436 drivers/soc/ti/wkup_m3_ipc.c struct wkup_m3_ipc *m3_ipc; m3_ipc 438 drivers/soc/ti/wkup_m3_ipc.c m3_ipc = devm_kzalloc(dev, sizeof(*m3_ipc), GFP_KERNEL); m3_ipc 439 drivers/soc/ti/wkup_m3_ipc.c if (!m3_ipc) m3_ipc 443 drivers/soc/ti/wkup_m3_ipc.c m3_ipc->ipc_mem_base = devm_ioremap_resource(dev, res); m3_ipc 444 drivers/soc/ti/wkup_m3_ipc.c if (IS_ERR(m3_ipc->ipc_mem_base)) { m3_ipc 446 drivers/soc/ti/wkup_m3_ipc.c return PTR_ERR(m3_ipc->ipc_mem_base); m3_ipc 456 drivers/soc/ti/wkup_m3_ipc.c 0, "wkup_m3_txev", m3_ipc); m3_ipc 462 drivers/soc/ti/wkup_m3_ipc.c m3_ipc->mbox_client.dev = dev; m3_ipc 463 drivers/soc/ti/wkup_m3_ipc.c m3_ipc->mbox_client.tx_done = NULL; m3_ipc 464 drivers/soc/ti/wkup_m3_ipc.c m3_ipc->mbox_client.tx_prepare = NULL; m3_ipc 465 drivers/soc/ti/wkup_m3_ipc.c m3_ipc->mbox_client.rx_callback = NULL; m3_ipc 466 drivers/soc/ti/wkup_m3_ipc.c m3_ipc->mbox_client.tx_block = false; m3_ipc 467 drivers/soc/ti/wkup_m3_ipc.c m3_ipc->mbox_client.knows_txdone = false; m3_ipc 469 drivers/soc/ti/wkup_m3_ipc.c m3_ipc->mbox = mbox_request_channel(&m3_ipc->mbox_client, 0); m3_ipc 471 drivers/soc/ti/wkup_m3_ipc.c if (IS_ERR(m3_ipc->mbox)) { m3_ipc 473 drivers/soc/ti/wkup_m3_ipc.c PTR_ERR(m3_ipc->mbox)); m3_ipc 474 drivers/soc/ti/wkup_m3_ipc.c return PTR_ERR(m3_ipc->mbox); m3_ipc 490 drivers/soc/ti/wkup_m3_ipc.c m3_ipc->rproc = m3_rproc; m3_ipc 491 drivers/soc/ti/wkup_m3_ipc.c m3_ipc->dev = dev; m3_ipc 492 drivers/soc/ti/wkup_m3_ipc.c m3_ipc->state = M3_STATE_RESET; m3_ipc 494 drivers/soc/ti/wkup_m3_ipc.c m3_ipc->ops = &ipc_ops; m3_ipc 501 drivers/soc/ti/wkup_m3_ipc.c task = kthread_run((void *)wkup_m3_rproc_boot_thread, m3_ipc, m3_ipc 515 drivers/soc/ti/wkup_m3_ipc.c mbox_free_channel(m3_ipc->mbox); m3_ipc 52 include/linux/wkup_m3_ipc.h void (*set_mem_type)(struct wkup_m3_ipc *m3_ipc, int mem_type); m3_ipc 53 include/linux/wkup_m3_ipc.h void (*set_resume_address)(struct wkup_m3_ipc *m3_ipc, void *addr); m3_ipc 54 include/linux/wkup_m3_ipc.h int (*prepare_low_power)(struct wkup_m3_ipc *m3_ipc, int state); m3_ipc 55 include/linux/wkup_m3_ipc.h int (*finish_low_power)(struct wkup_m3_ipc *m3_ipc); m3_ipc 56 include/linux/wkup_m3_ipc.h int (*request_pm_status)(struct wkup_m3_ipc *m3_ipc); m3_ipc 57 include/linux/wkup_m3_ipc.h const char *(*request_wake_src)(struct wkup_m3_ipc *m3_ipc); m3_ipc 58 include/linux/wkup_m3_ipc.h void (*set_rtc_only)(struct wkup_m3_ipc *m3_ipc); m3_ipc 62 include/linux/wkup_m3_ipc.h void wkup_m3_ipc_put(struct wkup_m3_ipc *m3_ipc);