1 /*
2  * Copyright (c) 2015, Sony Mobile Communications AB.
3  * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 and
7  * only version 2 as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  */
14 
15 #include <linux/interrupt.h>
16 #include <linux/io.h>
17 #include <linux/mfd/syscon.h>
18 #include <linux/module.h>
19 #include <linux/of_irq.h>
20 #include <linux/of_platform.h>
21 #include <linux/platform_device.h>
22 #include <linux/regmap.h>
23 #include <linux/sched.h>
24 #include <linux/slab.h>
25 #include <linux/soc/qcom/smd.h>
26 #include <linux/soc/qcom/smem.h>
27 #include <linux/wait.h>
28 
29 /*
30  * The Qualcomm Shared Memory communication solution provides point-to-point
31  * channels for clients to send and receive streaming or packet based data.
32  *
33  * Each channel consists of a control item (channel info) and a ring buffer
34  * pair. The channel info carry information related to channel state, flow
35  * control and the offsets within the ring buffer.
36  *
37  * All allocated channels are listed in an allocation table, identifying the
38  * pair of items by name, type and remote processor.
39  *
40  * Upon creating a new channel the remote processor allocates channel info and
41  * ring buffer items from the smem heap and populate the allocation table. An
42  * interrupt is sent to the other end of the channel and a scan for new
43  * channels should be done. A channel never goes away, it will only change
44  * state.
45  *
46  * The remote processor signals it intent for bring up the communication
47  * channel by setting the state of its end of the channel to "opening" and
48  * sends out an interrupt. We detect this change and register a smd device to
49  * consume the channel. Upon finding a consumer we finish the handshake and the
50  * channel is up.
51  *
52  * Upon closing a channel, the remote processor will update the state of its
53  * end of the channel and signal us, we will then unregister any attached
54  * device and close our end of the channel.
55  *
56  * Devices attached to a channel can use the qcom_smd_send function to push
57  * data to the channel, this is done by copying the data into the tx ring
58  * buffer, updating the pointers in the channel info and signaling the remote
59  * processor.
60  *
61  * The remote processor does the equivalent when it transfer data and upon
62  * receiving the interrupt we check the channel info for new data and delivers
63  * this to the attached device. If the device is not ready to receive the data
64  * we leave it in the ring buffer for now.
65  */
66 
67 struct smd_channel_info;
68 struct smd_channel_info_pair;
69 struct smd_channel_info_word;
70 struct smd_channel_info_word_pair;
71 
72 #define SMD_ALLOC_TBL_COUNT	2
73 #define SMD_ALLOC_TBL_SIZE	64
74 
75 /*
76  * This lists the various smem heap items relevant for the allocation table and
77  * smd channel entries.
78  */
79 static const struct {
80 	unsigned alloc_tbl_id;
81 	unsigned info_base_id;
82 	unsigned fifo_base_id;
83 } smem_items[SMD_ALLOC_TBL_COUNT] = {
84 	{
85 		.alloc_tbl_id = 13,
86 		.info_base_id = 14,
87 		.fifo_base_id = 338
88 	},
89 	{
90 		.alloc_tbl_id = 266,
91 		.info_base_id = 138,
92 		.fifo_base_id = 202,
93 	},
94 };
95 
96 /**
97  * struct qcom_smd_edge - representing a remote processor
98  * @smd:		handle to qcom_smd
99  * @of_node:		of_node handle for information related to this edge
100  * @edge_id:		identifier of this edge
101  * @remote_pid:		identifier of remote processor
102  * @irq:		interrupt for signals on this edge
103  * @ipc_regmap:		regmap handle holding the outgoing ipc register
104  * @ipc_offset:		offset within @ipc_regmap of the register for ipc
105  * @ipc_bit:		bit in the register at @ipc_offset of @ipc_regmap
106  * @channels:		list of all channels detected on this edge
107  * @channels_lock:	guard for modifications of @channels
108  * @allocated:		array of bitmaps representing already allocated channels
109  * @need_rescan:	flag that the @work needs to scan smem for new channels
110  * @smem_available:	last available amount of smem triggering a channel scan
111  * @work:		work item for edge house keeping
112  */
113 struct qcom_smd_edge {
114 	struct qcom_smd *smd;
115 	struct device_node *of_node;
116 	unsigned edge_id;
117 	unsigned remote_pid;
118 
119 	int irq;
120 
121 	struct regmap *ipc_regmap;
122 	int ipc_offset;
123 	int ipc_bit;
124 
125 	struct list_head channels;
126 	spinlock_t channels_lock;
127 
128 	DECLARE_BITMAP(allocated[SMD_ALLOC_TBL_COUNT], SMD_ALLOC_TBL_SIZE);
129 
130 	bool need_rescan;
131 	unsigned smem_available;
132 
133 	struct work_struct work;
134 };
135 
136 /*
137  * SMD channel states.
138  */
139 enum smd_channel_state {
140 	SMD_CHANNEL_CLOSED,
141 	SMD_CHANNEL_OPENING,
142 	SMD_CHANNEL_OPENED,
143 	SMD_CHANNEL_FLUSHING,
144 	SMD_CHANNEL_CLOSING,
145 	SMD_CHANNEL_RESET,
146 	SMD_CHANNEL_RESET_OPENING
147 };
148 
149 /**
150  * struct qcom_smd_channel - smd channel struct
151  * @edge:		qcom_smd_edge this channel is living on
152  * @qsdev:		reference to a associated smd client device
153  * @name:		name of the channel
154  * @state:		local state of the channel
155  * @remote_state:	remote state of the channel
156  * @info:		byte aligned outgoing/incoming channel info
157  * @info_word:		word aligned outgoing/incoming channel info
158  * @tx_lock:		lock to make writes to the channel mutually exclusive
159  * @fblockread_event:	wakeup event tied to tx fBLOCKREADINTR
160  * @tx_fifo:		pointer to the outgoing ring buffer
161  * @rx_fifo:		pointer to the incoming ring buffer
162  * @fifo_size:		size of each ring buffer
163  * @bounce_buffer:	bounce buffer for reading wrapped packets
164  * @cb:			callback function registered for this channel
165  * @recv_lock:		guard for rx info modifications and cb pointer
166  * @pkt_size:		size of the currently handled packet
167  * @list:		lite entry for @channels in qcom_smd_edge
168  */
169 struct qcom_smd_channel {
170 	struct qcom_smd_edge *edge;
171 
172 	struct qcom_smd_device *qsdev;
173 
174 	char *name;
175 	enum smd_channel_state state;
176 	enum smd_channel_state remote_state;
177 
178 	struct smd_channel_info_pair *info;
179 	struct smd_channel_info_word_pair *info_word;
180 
181 	struct mutex tx_lock;
182 	wait_queue_head_t fblockread_event;
183 
184 	void *tx_fifo;
185 	void *rx_fifo;
186 	int fifo_size;
187 
188 	void *bounce_buffer;
189 	int (*cb)(struct qcom_smd_device *, const void *, size_t);
190 
191 	spinlock_t recv_lock;
192 
193 	int pkt_size;
194 
195 	struct list_head list;
196 };
197 
198 /**
199  * struct qcom_smd - smd struct
200  * @dev:	device struct
201  * @num_edges:	number of entries in @edges
202  * @edges:	array of edges to be handled
203  */
204 struct qcom_smd {
205 	struct device *dev;
206 
207 	unsigned num_edges;
208 	struct qcom_smd_edge edges[0];
209 };
210 
211 /*
212  * Format of the smd_info smem items, for byte aligned channels.
213  */
214 struct smd_channel_info {
215 	__le32 state;
216 	u8  fDSR;
217 	u8  fCTS;
218 	u8  fCD;
219 	u8  fRI;
220 	u8  fHEAD;
221 	u8  fTAIL;
222 	u8  fSTATE;
223 	u8  fBLOCKREADINTR;
224 	__le32 tail;
225 	__le32 head;
226 };
227 
228 struct smd_channel_info_pair {
229 	struct smd_channel_info tx;
230 	struct smd_channel_info rx;
231 };
232 
233 /*
234  * Format of the smd_info smem items, for word aligned channels.
235  */
236 struct smd_channel_info_word {
237 	__le32 state;
238 	__le32 fDSR;
239 	__le32 fCTS;
240 	__le32 fCD;
241 	__le32 fRI;
242 	__le32 fHEAD;
243 	__le32 fTAIL;
244 	__le32 fSTATE;
245 	__le32 fBLOCKREADINTR;
246 	__le32 tail;
247 	__le32 head;
248 };
249 
250 struct smd_channel_info_word_pair {
251 	struct smd_channel_info_word tx;
252 	struct smd_channel_info_word rx;
253 };
254 
255 #define GET_RX_CHANNEL_FLAG(channel, param)				     \
256 	({								     \
257 		BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u8)); \
258 		channel->info_word ?					     \
259 			le32_to_cpu(channel->info_word->rx.param) :	     \
260 			channel->info->rx.param;			     \
261 	})
262 
263 #define GET_RX_CHANNEL_INFO(channel, param)				      \
264 	({								      \
265 		BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u32)); \
266 		le32_to_cpu(channel->info_word ?			      \
267 			channel->info_word->rx.param :			      \
268 			channel->info->rx.param);			      \
269 	})
270 
271 #define SET_RX_CHANNEL_FLAG(channel, param, value)			     \
272 	({								     \
273 		BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u8)); \
274 		if (channel->info_word)					     \
275 			channel->info_word->rx.param = cpu_to_le32(value);   \
276 		else							     \
277 			channel->info->rx.param = value;		     \
278 	})
279 
280 #define SET_RX_CHANNEL_INFO(channel, param, value)			      \
281 	({								      \
282 		BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u32)); \
283 		if (channel->info_word)					      \
284 			channel->info_word->rx.param = cpu_to_le32(value);    \
285 		else							      \
286 			channel->info->rx.param = cpu_to_le32(value);	      \
287 	})
288 
289 #define GET_TX_CHANNEL_FLAG(channel, param)				     \
290 	({								     \
291 		BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u8)); \
292 		channel->info_word ?					     \
293 			le32_to_cpu(channel->info_word->tx.param) :          \
294 			channel->info->tx.param;			     \
295 	})
296 
297 #define GET_TX_CHANNEL_INFO(channel, param)				      \
298 	({								      \
299 		BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u32)); \
300 		le32_to_cpu(channel->info_word ?			      \
301 			channel->info_word->tx.param :			      \
302 			channel->info->tx.param);			      \
303 	})
304 
305 #define SET_TX_CHANNEL_FLAG(channel, param, value)			     \
306 	({								     \
307 		BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u8)); \
308 		if (channel->info_word)					     \
309 			channel->info_word->tx.param = cpu_to_le32(value);   \
310 		else							     \
311 			channel->info->tx.param = value;		     \
312 	})
313 
314 #define SET_TX_CHANNEL_INFO(channel, param, value)			      \
315 	({								      \
316 		BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u32)); \
317 		if (channel->info_word)					      \
318 			channel->info_word->tx.param = cpu_to_le32(value);   \
319 		else							      \
320 			channel->info->tx.param = cpu_to_le32(value);	      \
321 	})
322 
323 /**
324  * struct qcom_smd_alloc_entry - channel allocation entry
325  * @name:	channel name
326  * @cid:	channel index
327  * @flags:	channel flags and edge id
328  * @ref_count:	reference count of the channel
329  */
330 struct qcom_smd_alloc_entry {
331 	u8 name[20];
332 	__le32 cid;
333 	__le32 flags;
334 	__le32 ref_count;
335 } __packed;
336 
337 #define SMD_CHANNEL_FLAGS_EDGE_MASK	0xff
338 #define SMD_CHANNEL_FLAGS_STREAM	BIT(8)
339 #define SMD_CHANNEL_FLAGS_PACKET	BIT(9)
340 
341 /*
342  * Each smd packet contains a 20 byte header, with the first 4 being the length
343  * of the packet.
344  */
345 #define SMD_PACKET_HEADER_LEN	20
346 
347 /*
348  * Signal the remote processor associated with 'channel'.
349  */
qcom_smd_signal_channel(struct qcom_smd_channel * channel)350 static void qcom_smd_signal_channel(struct qcom_smd_channel *channel)
351 {
352 	struct qcom_smd_edge *edge = channel->edge;
353 
354 	regmap_write(edge->ipc_regmap, edge->ipc_offset, BIT(edge->ipc_bit));
355 }
356 
357 /*
358  * Initialize the tx channel info
359  */
qcom_smd_channel_reset(struct qcom_smd_channel * channel)360 static void qcom_smd_channel_reset(struct qcom_smd_channel *channel)
361 {
362 	SET_TX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED);
363 	SET_TX_CHANNEL_FLAG(channel, fDSR, 0);
364 	SET_TX_CHANNEL_FLAG(channel, fCTS, 0);
365 	SET_TX_CHANNEL_FLAG(channel, fCD, 0);
366 	SET_TX_CHANNEL_FLAG(channel, fRI, 0);
367 	SET_TX_CHANNEL_FLAG(channel, fHEAD, 0);
368 	SET_TX_CHANNEL_FLAG(channel, fTAIL, 0);
369 	SET_TX_CHANNEL_FLAG(channel, fSTATE, 1);
370 	SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 1);
371 	SET_TX_CHANNEL_INFO(channel, head, 0);
372 	SET_TX_CHANNEL_INFO(channel, tail, 0);
373 
374 	qcom_smd_signal_channel(channel);
375 
376 	channel->state = SMD_CHANNEL_CLOSED;
377 	channel->pkt_size = 0;
378 }
379 
380 /*
381  * Calculate the amount of data available in the rx fifo
382  */
qcom_smd_channel_get_rx_avail(struct qcom_smd_channel * channel)383 static size_t qcom_smd_channel_get_rx_avail(struct qcom_smd_channel *channel)
384 {
385 	unsigned head;
386 	unsigned tail;
387 
388 	head = GET_RX_CHANNEL_INFO(channel, head);
389 	tail = GET_RX_CHANNEL_INFO(channel, tail);
390 
391 	return (head - tail) & (channel->fifo_size - 1);
392 }
393 
394 /*
395  * Set tx channel state and inform the remote processor
396  */
qcom_smd_channel_set_state(struct qcom_smd_channel * channel,int state)397 static void qcom_smd_channel_set_state(struct qcom_smd_channel *channel,
398 				       int state)
399 {
400 	struct qcom_smd_edge *edge = channel->edge;
401 	bool is_open = state == SMD_CHANNEL_OPENED;
402 
403 	if (channel->state == state)
404 		return;
405 
406 	dev_dbg(edge->smd->dev, "set_state(%s, %d)\n", channel->name, state);
407 
408 	SET_TX_CHANNEL_FLAG(channel, fDSR, is_open);
409 	SET_TX_CHANNEL_FLAG(channel, fCTS, is_open);
410 	SET_TX_CHANNEL_FLAG(channel, fCD, is_open);
411 
412 	SET_TX_CHANNEL_INFO(channel, state, state);
413 	SET_TX_CHANNEL_FLAG(channel, fSTATE, 1);
414 
415 	channel->state = state;
416 	qcom_smd_signal_channel(channel);
417 }
418 
419 /*
420  * Copy count bytes of data using 32bit accesses, if that's required.
421  */
smd_copy_to_fifo(void __iomem * dst,const void * src,size_t count,bool word_aligned)422 static void smd_copy_to_fifo(void __iomem *dst,
423 			     const void *src,
424 			     size_t count,
425 			     bool word_aligned)
426 {
427 	if (word_aligned) {
428 		__iowrite32_copy(dst, src, count / sizeof(u32));
429 	} else {
430 		memcpy_toio(dst, src, count);
431 	}
432 }
433 
434 /*
435  * Copy count bytes of data using 32bit accesses, if that is required.
436  */
smd_copy_from_fifo(void * _dst,const void __iomem * _src,size_t count,bool word_aligned)437 static void smd_copy_from_fifo(void *_dst,
438 			       const void __iomem *_src,
439 			       size_t count,
440 			       bool word_aligned)
441 {
442 	u32 *dst = (u32 *)_dst;
443 	u32 *src = (u32 *)_src;
444 
445 	if (word_aligned) {
446 		count /= sizeof(u32);
447 		while (count--)
448 			*dst++ = __raw_readl(src++);
449 	} else {
450 		memcpy_fromio(_dst, _src, count);
451 	}
452 }
453 
454 /*
455  * Read count bytes of data from the rx fifo into buf, but don't advance the
456  * tail.
457  */
qcom_smd_channel_peek(struct qcom_smd_channel * channel,void * buf,size_t count)458 static size_t qcom_smd_channel_peek(struct qcom_smd_channel *channel,
459 				    void *buf, size_t count)
460 {
461 	bool word_aligned;
462 	unsigned tail;
463 	size_t len;
464 
465 	word_aligned = channel->info_word;
466 	tail = GET_RX_CHANNEL_INFO(channel, tail);
467 
468 	len = min_t(size_t, count, channel->fifo_size - tail);
469 	if (len) {
470 		smd_copy_from_fifo(buf,
471 				   channel->rx_fifo + tail,
472 				   len,
473 				   word_aligned);
474 	}
475 
476 	if (len != count) {
477 		smd_copy_from_fifo(buf + len,
478 				   channel->rx_fifo,
479 				   count - len,
480 				   word_aligned);
481 	}
482 
483 	return count;
484 }
485 
486 /*
487  * Advance the rx tail by count bytes.
488  */
qcom_smd_channel_advance(struct qcom_smd_channel * channel,size_t count)489 static void qcom_smd_channel_advance(struct qcom_smd_channel *channel,
490 				     size_t count)
491 {
492 	unsigned tail;
493 
494 	tail = GET_RX_CHANNEL_INFO(channel, tail);
495 	tail += count;
496 	tail &= (channel->fifo_size - 1);
497 	SET_RX_CHANNEL_INFO(channel, tail, tail);
498 }
499 
500 /*
501  * Read out a single packet from the rx fifo and deliver it to the device
502  */
qcom_smd_channel_recv_single(struct qcom_smd_channel * channel)503 static int qcom_smd_channel_recv_single(struct qcom_smd_channel *channel)
504 {
505 	struct qcom_smd_device *qsdev = channel->qsdev;
506 	unsigned tail;
507 	size_t len;
508 	void *ptr;
509 	int ret;
510 
511 	if (!channel->cb)
512 		return 0;
513 
514 	tail = GET_RX_CHANNEL_INFO(channel, tail);
515 
516 	/* Use bounce buffer if the data wraps */
517 	if (tail + channel->pkt_size >= channel->fifo_size) {
518 		ptr = channel->bounce_buffer;
519 		len = qcom_smd_channel_peek(channel, ptr, channel->pkt_size);
520 	} else {
521 		ptr = channel->rx_fifo + tail;
522 		len = channel->pkt_size;
523 	}
524 
525 	ret = channel->cb(qsdev, ptr, len);
526 	if (ret < 0)
527 		return ret;
528 
529 	/* Only forward the tail if the client consumed the data */
530 	qcom_smd_channel_advance(channel, len);
531 
532 	channel->pkt_size = 0;
533 
534 	return 0;
535 }
536 
537 /*
538  * Per channel interrupt handling
539  */
qcom_smd_channel_intr(struct qcom_smd_channel * channel)540 static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel)
541 {
542 	bool need_state_scan = false;
543 	int remote_state;
544 	__le32 pktlen;
545 	int avail;
546 	int ret;
547 
548 	/* Handle state changes */
549 	remote_state = GET_RX_CHANNEL_INFO(channel, state);
550 	if (remote_state != channel->remote_state) {
551 		channel->remote_state = remote_state;
552 		need_state_scan = true;
553 	}
554 	/* Indicate that we have seen any state change */
555 	SET_RX_CHANNEL_FLAG(channel, fSTATE, 0);
556 
557 	/* Signal waiting qcom_smd_send() about the interrupt */
558 	if (!GET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR))
559 		wake_up_interruptible(&channel->fblockread_event);
560 
561 	/* Don't consume any data until we've opened the channel */
562 	if (channel->state != SMD_CHANNEL_OPENED)
563 		goto out;
564 
565 	/* Indicate that we've seen the new data */
566 	SET_RX_CHANNEL_FLAG(channel, fHEAD, 0);
567 
568 	/* Consume data */
569 	for (;;) {
570 		avail = qcom_smd_channel_get_rx_avail(channel);
571 
572 		if (!channel->pkt_size && avail >= SMD_PACKET_HEADER_LEN) {
573 			qcom_smd_channel_peek(channel, &pktlen, sizeof(pktlen));
574 			qcom_smd_channel_advance(channel, SMD_PACKET_HEADER_LEN);
575 			channel->pkt_size = le32_to_cpu(pktlen);
576 		} else if (channel->pkt_size && avail >= channel->pkt_size) {
577 			ret = qcom_smd_channel_recv_single(channel);
578 			if (ret)
579 				break;
580 		} else {
581 			break;
582 		}
583 	}
584 
585 	/* Indicate that we have seen and updated tail */
586 	SET_RX_CHANNEL_FLAG(channel, fTAIL, 1);
587 
588 	/* Signal the remote that we've consumed the data (if requested) */
589 	if (!GET_RX_CHANNEL_FLAG(channel, fBLOCKREADINTR)) {
590 		/* Ensure ordering of channel info updates */
591 		wmb();
592 
593 		qcom_smd_signal_channel(channel);
594 	}
595 
596 out:
597 	return need_state_scan;
598 }
599 
600 /*
601  * The edge interrupts are triggered by the remote processor on state changes,
602  * channel info updates or when new channels are created.
603  */
qcom_smd_edge_intr(int irq,void * data)604 static irqreturn_t qcom_smd_edge_intr(int irq, void *data)
605 {
606 	struct qcom_smd_edge *edge = data;
607 	struct qcom_smd_channel *channel;
608 	unsigned available;
609 	bool kick_worker = false;
610 
611 	/*
612 	 * Handle state changes or data on each of the channels on this edge
613 	 */
614 	spin_lock(&edge->channels_lock);
615 	list_for_each_entry(channel, &edge->channels, list) {
616 		spin_lock(&channel->recv_lock);
617 		kick_worker |= qcom_smd_channel_intr(channel);
618 		spin_unlock(&channel->recv_lock);
619 	}
620 	spin_unlock(&edge->channels_lock);
621 
622 	/*
623 	 * Creating a new channel requires allocating an smem entry, so we only
624 	 * have to scan if the amount of available space in smem have changed
625 	 * since last scan.
626 	 */
627 	available = qcom_smem_get_free_space(edge->remote_pid);
628 	if (available != edge->smem_available) {
629 		edge->smem_available = available;
630 		edge->need_rescan = true;
631 		kick_worker = true;
632 	}
633 
634 	if (kick_worker)
635 		schedule_work(&edge->work);
636 
637 	return IRQ_HANDLED;
638 }
639 
640 /*
641  * Delivers any outstanding packets in the rx fifo, can be used after probe of
642  * the clients to deliver any packets that wasn't delivered before the client
643  * was setup.
644  */
qcom_smd_channel_resume(struct qcom_smd_channel * channel)645 static void qcom_smd_channel_resume(struct qcom_smd_channel *channel)
646 {
647 	unsigned long flags;
648 
649 	spin_lock_irqsave(&channel->recv_lock, flags);
650 	qcom_smd_channel_intr(channel);
651 	spin_unlock_irqrestore(&channel->recv_lock, flags);
652 }
653 
654 /*
655  * Calculate how much space is available in the tx fifo.
656  */
qcom_smd_get_tx_avail(struct qcom_smd_channel * channel)657 static size_t qcom_smd_get_tx_avail(struct qcom_smd_channel *channel)
658 {
659 	unsigned head;
660 	unsigned tail;
661 	unsigned mask = channel->fifo_size - 1;
662 
663 	head = GET_TX_CHANNEL_INFO(channel, head);
664 	tail = GET_TX_CHANNEL_INFO(channel, tail);
665 
666 	return mask - ((head - tail) & mask);
667 }
668 
669 /*
670  * Write count bytes of data into channel, possibly wrapping in the ring buffer
671  */
qcom_smd_write_fifo(struct qcom_smd_channel * channel,const void * data,size_t count)672 static int qcom_smd_write_fifo(struct qcom_smd_channel *channel,
673 			       const void *data,
674 			       size_t count)
675 {
676 	bool word_aligned;
677 	unsigned head;
678 	size_t len;
679 
680 	word_aligned = channel->info_word;
681 	head = GET_TX_CHANNEL_INFO(channel, head);
682 
683 	len = min_t(size_t, count, channel->fifo_size - head);
684 	if (len) {
685 		smd_copy_to_fifo(channel->tx_fifo + head,
686 				 data,
687 				 len,
688 				 word_aligned);
689 	}
690 
691 	if (len != count) {
692 		smd_copy_to_fifo(channel->tx_fifo,
693 				 data + len,
694 				 count - len,
695 				 word_aligned);
696 	}
697 
698 	head += count;
699 	head &= (channel->fifo_size - 1);
700 	SET_TX_CHANNEL_INFO(channel, head, head);
701 
702 	return count;
703 }
704 
705 /**
706  * qcom_smd_send - write data to smd channel
707  * @channel:	channel handle
708  * @data:	buffer of data to write
709  * @len:	number of bytes to write
710  *
711  * This is a blocking write of len bytes into the channel's tx ring buffer and
712  * signal the remote end. It will sleep until there is enough space available
713  * in the tx buffer, utilizing the fBLOCKREADINTR signaling mechanism to avoid
714  * polling.
715  */
qcom_smd_send(struct qcom_smd_channel * channel,const void * data,int len)716 int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len)
717 {
718 	__le32 hdr[5] = { cpu_to_le32(len), };
719 	int tlen = sizeof(hdr) + len;
720 	int ret;
721 
722 	/* Word aligned channels only accept word size aligned data */
723 	if (channel->info_word && len % 4)
724 		return -EINVAL;
725 
726 	/* Reject packets that are too big */
727 	if (tlen >= channel->fifo_size)
728 		return -EINVAL;
729 
730 	ret = mutex_lock_interruptible(&channel->tx_lock);
731 	if (ret)
732 		return ret;
733 
734 	while (qcom_smd_get_tx_avail(channel) < tlen) {
735 		if (channel->state != SMD_CHANNEL_OPENED) {
736 			ret = -EPIPE;
737 			goto out;
738 		}
739 
740 		SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 0);
741 
742 		ret = wait_event_interruptible(channel->fblockread_event,
743 				       qcom_smd_get_tx_avail(channel) >= tlen ||
744 				       channel->state != SMD_CHANNEL_OPENED);
745 		if (ret)
746 			goto out;
747 
748 		SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 1);
749 	}
750 
751 	SET_TX_CHANNEL_FLAG(channel, fTAIL, 0);
752 
753 	qcom_smd_write_fifo(channel, hdr, sizeof(hdr));
754 	qcom_smd_write_fifo(channel, data, len);
755 
756 	SET_TX_CHANNEL_FLAG(channel, fHEAD, 1);
757 
758 	/* Ensure ordering of channel info updates */
759 	wmb();
760 
761 	qcom_smd_signal_channel(channel);
762 
763 out:
764 	mutex_unlock(&channel->tx_lock);
765 
766 	return ret;
767 }
768 EXPORT_SYMBOL(qcom_smd_send);
769 
to_smd_device(struct device * dev)770 static struct qcom_smd_device *to_smd_device(struct device *dev)
771 {
772 	return container_of(dev, struct qcom_smd_device, dev);
773 }
774 
to_smd_driver(struct device * dev)775 static struct qcom_smd_driver *to_smd_driver(struct device *dev)
776 {
777 	struct qcom_smd_device *qsdev = to_smd_device(dev);
778 
779 	return container_of(qsdev->dev.driver, struct qcom_smd_driver, driver);
780 }
781 
qcom_smd_dev_match(struct device * dev,struct device_driver * drv)782 static int qcom_smd_dev_match(struct device *dev, struct device_driver *drv)
783 {
784 	struct qcom_smd_device *qsdev = to_smd_device(dev);
785 	struct qcom_smd_driver *qsdrv = container_of(drv, struct qcom_smd_driver, driver);
786 	const struct qcom_smd_id *match = qsdrv->smd_match_table;
787 	const char *name = qsdev->channel->name;
788 
789 	if (match) {
790 		while (match->name[0]) {
791 			if (!strcmp(match->name, name))
792 				return 1;
793 			match++;
794 		}
795 	}
796 
797 	return of_driver_match_device(dev, drv);
798 }
799 
800 /*
801  * Probe the smd client.
802  *
803  * The remote side have indicated that it want the channel to be opened, so
804  * complete the state handshake and probe our client driver.
805  */
qcom_smd_dev_probe(struct device * dev)806 static int qcom_smd_dev_probe(struct device *dev)
807 {
808 	struct qcom_smd_device *qsdev = to_smd_device(dev);
809 	struct qcom_smd_driver *qsdrv = to_smd_driver(dev);
810 	struct qcom_smd_channel *channel = qsdev->channel;
811 	size_t bb_size;
812 	int ret;
813 
814 	/*
815 	 * Packets are maximum 4k, but reduce if the fifo is smaller
816 	 */
817 	bb_size = min(channel->fifo_size, SZ_4K);
818 	channel->bounce_buffer = kmalloc(bb_size, GFP_KERNEL);
819 	if (!channel->bounce_buffer)
820 		return -ENOMEM;
821 
822 	channel->cb = qsdrv->callback;
823 
824 	qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENING);
825 
826 	qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENED);
827 
828 	ret = qsdrv->probe(qsdev);
829 	if (ret)
830 		goto err;
831 
832 	qcom_smd_channel_resume(channel);
833 
834 	return 0;
835 
836 err:
837 	dev_err(&qsdev->dev, "probe failed\n");
838 
839 	channel->cb = NULL;
840 	kfree(channel->bounce_buffer);
841 	channel->bounce_buffer = NULL;
842 
843 	qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED);
844 	return ret;
845 }
846 
847 /*
848  * Remove the smd client.
849  *
850  * The channel is going away, for some reason, so remove the smd client and
851  * reset the channel state.
852  */
qcom_smd_dev_remove(struct device * dev)853 static int qcom_smd_dev_remove(struct device *dev)
854 {
855 	struct qcom_smd_device *qsdev = to_smd_device(dev);
856 	struct qcom_smd_driver *qsdrv = to_smd_driver(dev);
857 	struct qcom_smd_channel *channel = qsdev->channel;
858 	unsigned long flags;
859 
860 	qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSING);
861 
862 	/*
863 	 * Make sure we don't race with the code receiving data.
864 	 */
865 	spin_lock_irqsave(&channel->recv_lock, flags);
866 	channel->cb = NULL;
867 	spin_unlock_irqrestore(&channel->recv_lock, flags);
868 
869 	/* Wake up any sleepers in qcom_smd_send() */
870 	wake_up_interruptible(&channel->fblockread_event);
871 
872 	/*
873 	 * We expect that the client might block in remove() waiting for any
874 	 * outstanding calls to qcom_smd_send() to wake up and finish.
875 	 */
876 	if (qsdrv->remove)
877 		qsdrv->remove(qsdev);
878 
879 	/*
880 	 * The client is now gone, cleanup and reset the channel state.
881 	 */
882 	channel->qsdev = NULL;
883 	kfree(channel->bounce_buffer);
884 	channel->bounce_buffer = NULL;
885 
886 	qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED);
887 
888 	qcom_smd_channel_reset(channel);
889 
890 	return 0;
891 }
892 
893 static struct bus_type qcom_smd_bus = {
894 	.name = "qcom_smd",
895 	.match = qcom_smd_dev_match,
896 	.probe = qcom_smd_dev_probe,
897 	.remove = qcom_smd_dev_remove,
898 };
899 
900 /*
901  * Release function for the qcom_smd_device object.
902  */
qcom_smd_release_device(struct device * dev)903 static void qcom_smd_release_device(struct device *dev)
904 {
905 	struct qcom_smd_device *qsdev = to_smd_device(dev);
906 
907 	kfree(qsdev);
908 }
909 
910 /*
911  * Finds the device_node for the smd child interested in this channel.
912  */
qcom_smd_match_channel(struct device_node * edge_node,const char * channel)913 static struct device_node *qcom_smd_match_channel(struct device_node *edge_node,
914 						  const char *channel)
915 {
916 	struct device_node *child;
917 	const char *name;
918 	const char *key;
919 	int ret;
920 
921 	for_each_available_child_of_node(edge_node, child) {
922 		key = "qcom,smd-channels";
923 		ret = of_property_read_string(child, key, &name);
924 		if (ret)
925 			continue;
926 
927 		if (strcmp(name, channel) == 0)
928 			return child;
929 	}
930 
931 	return NULL;
932 }
933 
934 /*
935  * Create a smd client device for channel that is being opened.
936  */
qcom_smd_create_device(struct qcom_smd_channel * channel)937 static int qcom_smd_create_device(struct qcom_smd_channel *channel)
938 {
939 	struct qcom_smd_device *qsdev;
940 	struct qcom_smd_edge *edge = channel->edge;
941 	struct device_node *node;
942 	struct qcom_smd *smd = edge->smd;
943 	int ret;
944 
945 	if (channel->qsdev)
946 		return -EEXIST;
947 
948 	dev_dbg(smd->dev, "registering '%s'\n", channel->name);
949 
950 	qsdev = kzalloc(sizeof(*qsdev), GFP_KERNEL);
951 	if (!qsdev)
952 		return -ENOMEM;
953 
954 	node = qcom_smd_match_channel(edge->of_node, channel->name);
955 	dev_set_name(&qsdev->dev, "%s.%s",
956 		     edge->of_node->name,
957 		     node ? node->name : channel->name);
958 
959 	qsdev->dev.parent = smd->dev;
960 	qsdev->dev.bus = &qcom_smd_bus;
961 	qsdev->dev.release = qcom_smd_release_device;
962 	qsdev->dev.of_node = node;
963 
964 	qsdev->channel = channel;
965 
966 	channel->qsdev = qsdev;
967 
968 	ret = device_register(&qsdev->dev);
969 	if (ret) {
970 		dev_err(smd->dev, "device_register failed: %d\n", ret);
971 		put_device(&qsdev->dev);
972 	}
973 
974 	return ret;
975 }
976 
977 /*
978  * Destroy a smd client device for a channel that's going away.
979  */
qcom_smd_destroy_device(struct qcom_smd_channel * channel)980 static void qcom_smd_destroy_device(struct qcom_smd_channel *channel)
981 {
982 	struct device *dev;
983 
984 	BUG_ON(!channel->qsdev);
985 
986 	dev = &channel->qsdev->dev;
987 
988 	device_unregister(dev);
989 	of_node_put(dev->of_node);
990 	put_device(dev);
991 }
992 
993 /**
994  * qcom_smd_driver_register - register a smd driver
995  * @qsdrv:	qcom_smd_driver struct
996  */
qcom_smd_driver_register(struct qcom_smd_driver * qsdrv)997 int qcom_smd_driver_register(struct qcom_smd_driver *qsdrv)
998 {
999 	qsdrv->driver.bus = &qcom_smd_bus;
1000 	return driver_register(&qsdrv->driver);
1001 }
1002 EXPORT_SYMBOL(qcom_smd_driver_register);
1003 
1004 /**
1005  * qcom_smd_driver_unregister - unregister a smd driver
1006  * @qsdrv:	qcom_smd_driver struct
1007  */
qcom_smd_driver_unregister(struct qcom_smd_driver * qsdrv)1008 void qcom_smd_driver_unregister(struct qcom_smd_driver *qsdrv)
1009 {
1010 	driver_unregister(&qsdrv->driver);
1011 }
1012 EXPORT_SYMBOL(qcom_smd_driver_unregister);
1013 
1014 /*
1015  * Allocate the qcom_smd_channel object for a newly found smd channel,
1016  * retrieving and validating the smem items involved.
1017  */
qcom_smd_create_channel(struct qcom_smd_edge * edge,unsigned smem_info_item,unsigned smem_fifo_item,char * name)1018 static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *edge,
1019 							unsigned smem_info_item,
1020 							unsigned smem_fifo_item,
1021 							char *name)
1022 {
1023 	struct qcom_smd_channel *channel;
1024 	struct qcom_smd *smd = edge->smd;
1025 	size_t fifo_size;
1026 	size_t info_size;
1027 	void *fifo_base;
1028 	void *info;
1029 	int ret;
1030 
1031 	channel = devm_kzalloc(smd->dev, sizeof(*channel), GFP_KERNEL);
1032 	if (!channel)
1033 		return ERR_PTR(-ENOMEM);
1034 
1035 	channel->edge = edge;
1036 	channel->name = devm_kstrdup(smd->dev, name, GFP_KERNEL);
1037 	if (!channel->name)
1038 		return ERR_PTR(-ENOMEM);
1039 
1040 	mutex_init(&channel->tx_lock);
1041 	spin_lock_init(&channel->recv_lock);
1042 	init_waitqueue_head(&channel->fblockread_event);
1043 
1044 	info = qcom_smem_get(edge->remote_pid, smem_info_item, &info_size);
1045 	if (IS_ERR(info)) {
1046 		ret = PTR_ERR(info);
1047 		goto free_name_and_channel;
1048 	}
1049 
1050 	/*
1051 	 * Use the size of the item to figure out which channel info struct to
1052 	 * use.
1053 	 */
1054 	if (info_size == 2 * sizeof(struct smd_channel_info_word)) {
1055 		channel->info_word = info;
1056 	} else if (info_size == 2 * sizeof(struct smd_channel_info)) {
1057 		channel->info = info;
1058 	} else {
1059 		dev_err(smd->dev,
1060 			"channel info of size %zu not supported\n", info_size);
1061 		ret = -EINVAL;
1062 		goto free_name_and_channel;
1063 	}
1064 
1065 	fifo_base = qcom_smem_get(edge->remote_pid, smem_fifo_item, &fifo_size);
1066 	if (IS_ERR(fifo_base)) {
1067 		ret =  PTR_ERR(fifo_base);
1068 		goto free_name_and_channel;
1069 	}
1070 
1071 	/* The channel consist of a rx and tx fifo of equal size */
1072 	fifo_size /= 2;
1073 
1074 	dev_dbg(smd->dev, "new channel '%s' info-size: %zu fifo-size: %zu\n",
1075 			  name, info_size, fifo_size);
1076 
1077 	channel->tx_fifo = fifo_base;
1078 	channel->rx_fifo = fifo_base + fifo_size;
1079 	channel->fifo_size = fifo_size;
1080 
1081 	qcom_smd_channel_reset(channel);
1082 
1083 	return channel;
1084 
1085 free_name_and_channel:
1086 	devm_kfree(smd->dev, channel->name);
1087 	devm_kfree(smd->dev, channel);
1088 
1089 	return ERR_PTR(ret);
1090 }
1091 
1092 /*
1093  * Scans the allocation table for any newly allocated channels, calls
1094  * qcom_smd_create_channel() to create representations of these and add
1095  * them to the edge's list of channels.
1096  */
qcom_discover_channels(struct qcom_smd_edge * edge)1097 static void qcom_discover_channels(struct qcom_smd_edge *edge)
1098 {
1099 	struct qcom_smd_alloc_entry *alloc_tbl;
1100 	struct qcom_smd_alloc_entry *entry;
1101 	struct qcom_smd_channel *channel;
1102 	struct qcom_smd *smd = edge->smd;
1103 	unsigned long flags;
1104 	unsigned fifo_id;
1105 	unsigned info_id;
1106 	int tbl;
1107 	int i;
1108 	u32 eflags, cid;
1109 
1110 	for (tbl = 0; tbl < SMD_ALLOC_TBL_COUNT; tbl++) {
1111 		alloc_tbl = qcom_smem_get(edge->remote_pid,
1112 				    smem_items[tbl].alloc_tbl_id, NULL);
1113 		if (IS_ERR(alloc_tbl))
1114 			continue;
1115 
1116 		for (i = 0; i < SMD_ALLOC_TBL_SIZE; i++) {
1117 			entry = &alloc_tbl[i];
1118 			eflags = le32_to_cpu(entry->flags);
1119 			if (test_bit(i, edge->allocated[tbl]))
1120 				continue;
1121 
1122 			if (entry->ref_count == 0)
1123 				continue;
1124 
1125 			if (!entry->name[0])
1126 				continue;
1127 
1128 			if (!(eflags & SMD_CHANNEL_FLAGS_PACKET))
1129 				continue;
1130 
1131 			if ((eflags & SMD_CHANNEL_FLAGS_EDGE_MASK) != edge->edge_id)
1132 				continue;
1133 
1134 			cid = le32_to_cpu(entry->cid);
1135 			info_id = smem_items[tbl].info_base_id + cid;
1136 			fifo_id = smem_items[tbl].fifo_base_id + cid;
1137 
1138 			channel = qcom_smd_create_channel(edge, info_id, fifo_id, entry->name);
1139 			if (IS_ERR(channel))
1140 				continue;
1141 
1142 			spin_lock_irqsave(&edge->channels_lock, flags);
1143 			list_add(&channel->list, &edge->channels);
1144 			spin_unlock_irqrestore(&edge->channels_lock, flags);
1145 
1146 			dev_dbg(smd->dev, "new channel found: '%s'\n", channel->name);
1147 			set_bit(i, edge->allocated[tbl]);
1148 		}
1149 	}
1150 
1151 	schedule_work(&edge->work);
1152 }
1153 
1154 /*
1155  * This per edge worker scans smem for any new channels and register these. It
1156  * then scans all registered channels for state changes that should be handled
1157  * by creating or destroying smd client devices for the registered channels.
1158  *
1159  * LOCKING: edge->channels_lock is not needed to be held during the traversal
1160  * of the channels list as it's done synchronously with the only writer.
1161  */
qcom_channel_state_worker(struct work_struct * work)1162 static void qcom_channel_state_worker(struct work_struct *work)
1163 {
1164 	struct qcom_smd_channel *channel;
1165 	struct qcom_smd_edge *edge = container_of(work,
1166 						  struct qcom_smd_edge,
1167 						  work);
1168 	unsigned remote_state;
1169 
1170 	/*
1171 	 * Rescan smem if we have reason to belive that there are new channels.
1172 	 */
1173 	if (edge->need_rescan) {
1174 		edge->need_rescan = false;
1175 		qcom_discover_channels(edge);
1176 	}
1177 
1178 	/*
1179 	 * Register a device for any closed channel where the remote processor
1180 	 * is showing interest in opening the channel.
1181 	 */
1182 	list_for_each_entry(channel, &edge->channels, list) {
1183 		if (channel->state != SMD_CHANNEL_CLOSED)
1184 			continue;
1185 
1186 		remote_state = GET_RX_CHANNEL_INFO(channel, state);
1187 		if (remote_state != SMD_CHANNEL_OPENING &&
1188 		    remote_state != SMD_CHANNEL_OPENED)
1189 			continue;
1190 
1191 		qcom_smd_create_device(channel);
1192 	}
1193 
1194 	/*
1195 	 * Unregister the device for any channel that is opened where the
1196 	 * remote processor is closing the channel.
1197 	 */
1198 	list_for_each_entry(channel, &edge->channels, list) {
1199 		if (channel->state != SMD_CHANNEL_OPENING &&
1200 		    channel->state != SMD_CHANNEL_OPENED)
1201 			continue;
1202 
1203 		remote_state = GET_RX_CHANNEL_INFO(channel, state);
1204 		if (remote_state == SMD_CHANNEL_OPENING ||
1205 		    remote_state == SMD_CHANNEL_OPENED)
1206 			continue;
1207 
1208 		qcom_smd_destroy_device(channel);
1209 	}
1210 }
1211 
1212 /*
1213  * Parses an of_node describing an edge.
1214  */
qcom_smd_parse_edge(struct device * dev,struct device_node * node,struct qcom_smd_edge * edge)1215 static int qcom_smd_parse_edge(struct device *dev,
1216 			       struct device_node *node,
1217 			       struct qcom_smd_edge *edge)
1218 {
1219 	struct device_node *syscon_np;
1220 	const char *key;
1221 	int irq;
1222 	int ret;
1223 
1224 	INIT_LIST_HEAD(&edge->channels);
1225 	spin_lock_init(&edge->channels_lock);
1226 
1227 	INIT_WORK(&edge->work, qcom_channel_state_worker);
1228 
1229 	edge->of_node = of_node_get(node);
1230 
1231 	irq = irq_of_parse_and_map(node, 0);
1232 	if (irq < 0) {
1233 		dev_err(dev, "required smd interrupt missing\n");
1234 		return -EINVAL;
1235 	}
1236 
1237 	ret = devm_request_irq(dev, irq,
1238 			       qcom_smd_edge_intr, IRQF_TRIGGER_RISING,
1239 			       node->name, edge);
1240 	if (ret) {
1241 		dev_err(dev, "failed to request smd irq\n");
1242 		return ret;
1243 	}
1244 
1245 	edge->irq = irq;
1246 
1247 	key = "qcom,smd-edge";
1248 	ret = of_property_read_u32(node, key, &edge->edge_id);
1249 	if (ret) {
1250 		dev_err(dev, "edge missing %s property\n", key);
1251 		return -EINVAL;
1252 	}
1253 
1254 	edge->remote_pid = QCOM_SMEM_HOST_ANY;
1255 	key = "qcom,remote-pid";
1256 	of_property_read_u32(node, key, &edge->remote_pid);
1257 
1258 	syscon_np = of_parse_phandle(node, "qcom,ipc", 0);
1259 	if (!syscon_np) {
1260 		dev_err(dev, "no qcom,ipc node\n");
1261 		return -ENODEV;
1262 	}
1263 
1264 	edge->ipc_regmap = syscon_node_to_regmap(syscon_np);
1265 	if (IS_ERR(edge->ipc_regmap))
1266 		return PTR_ERR(edge->ipc_regmap);
1267 
1268 	key = "qcom,ipc";
1269 	ret = of_property_read_u32_index(node, key, 1, &edge->ipc_offset);
1270 	if (ret < 0) {
1271 		dev_err(dev, "no offset in %s\n", key);
1272 		return -EINVAL;
1273 	}
1274 
1275 	ret = of_property_read_u32_index(node, key, 2, &edge->ipc_bit);
1276 	if (ret < 0) {
1277 		dev_err(dev, "no bit in %s\n", key);
1278 		return -EINVAL;
1279 	}
1280 
1281 	return 0;
1282 }
1283 
qcom_smd_probe(struct platform_device * pdev)1284 static int qcom_smd_probe(struct platform_device *pdev)
1285 {
1286 	struct qcom_smd_edge *edge;
1287 	struct device_node *node;
1288 	struct qcom_smd *smd;
1289 	size_t array_size;
1290 	int num_edges;
1291 	int ret;
1292 	int i = 0;
1293 	void *p;
1294 
1295 	/* Wait for smem */
1296 	p = qcom_smem_get(QCOM_SMEM_HOST_ANY, smem_items[0].alloc_tbl_id, NULL);
1297 	if (PTR_ERR(p) == -EPROBE_DEFER)
1298 		return PTR_ERR(p);
1299 
1300 	num_edges = of_get_available_child_count(pdev->dev.of_node);
1301 	array_size = sizeof(*smd) + num_edges * sizeof(struct qcom_smd_edge);
1302 	smd = devm_kzalloc(&pdev->dev, array_size, GFP_KERNEL);
1303 	if (!smd)
1304 		return -ENOMEM;
1305 	smd->dev = &pdev->dev;
1306 
1307 	smd->num_edges = num_edges;
1308 	for_each_available_child_of_node(pdev->dev.of_node, node) {
1309 		edge = &smd->edges[i++];
1310 		edge->smd = smd;
1311 
1312 		ret = qcom_smd_parse_edge(&pdev->dev, node, edge);
1313 		if (ret)
1314 			continue;
1315 
1316 		edge->need_rescan = true;
1317 		schedule_work(&edge->work);
1318 	}
1319 
1320 	platform_set_drvdata(pdev, smd);
1321 
1322 	return 0;
1323 }
1324 
1325 /*
1326  * Shut down all smd clients by making sure that each edge stops processing
1327  * events and scanning for new channels, then call destroy on the devices.
1328  */
qcom_smd_remove(struct platform_device * pdev)1329 static int qcom_smd_remove(struct platform_device *pdev)
1330 {
1331 	struct qcom_smd_channel *channel;
1332 	struct qcom_smd_edge *edge;
1333 	struct qcom_smd *smd = platform_get_drvdata(pdev);
1334 	int i;
1335 
1336 	for (i = 0; i < smd->num_edges; i++) {
1337 		edge = &smd->edges[i];
1338 
1339 		disable_irq(edge->irq);
1340 		cancel_work_sync(&edge->work);
1341 
1342 		list_for_each_entry(channel, &edge->channels, list) {
1343 			if (!channel->qsdev)
1344 				continue;
1345 
1346 			qcom_smd_destroy_device(channel);
1347 		}
1348 	}
1349 
1350 	return 0;
1351 }
1352 
1353 static const struct of_device_id qcom_smd_of_match[] = {
1354 	{ .compatible = "qcom,smd" },
1355 	{}
1356 };
1357 MODULE_DEVICE_TABLE(of, qcom_smd_of_match);
1358 
1359 static struct platform_driver qcom_smd_driver = {
1360 	.probe = qcom_smd_probe,
1361 	.remove = qcom_smd_remove,
1362 	.driver = {
1363 		.name = "qcom-smd",
1364 		.of_match_table = qcom_smd_of_match,
1365 	},
1366 };
1367 
qcom_smd_init(void)1368 static int __init qcom_smd_init(void)
1369 {
1370 	int ret;
1371 
1372 	ret = bus_register(&qcom_smd_bus);
1373 	if (ret) {
1374 		pr_err("failed to register smd bus: %d\n", ret);
1375 		return ret;
1376 	}
1377 
1378 	return platform_driver_register(&qcom_smd_driver);
1379 }
1380 postcore_initcall(qcom_smd_init);
1381 
qcom_smd_exit(void)1382 static void __exit qcom_smd_exit(void)
1383 {
1384 	platform_driver_unregister(&qcom_smd_driver);
1385 	bus_unregister(&qcom_smd_bus);
1386 }
1387 module_exit(qcom_smd_exit);
1388 
1389 MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
1390 MODULE_DESCRIPTION("Qualcomm Shared Memory Driver");
1391 MODULE_LICENSE("GPL v2");
1392