1/*
2 * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/module.h>
17#include <linux/kernel.h>
18#include <linux/usb.h>
19#include <linux/errno.h>
20#include <linux/init.h>
21#include <linux/tty.h>
22#include <linux/tty_driver.h>
23#include <linux/tty_flip.h>
24#include <linux/slab.h>
25#include <linux/usb/cdc.h>
26
27#include "gdm_mux.h"
28
29static struct workqueue_struct *mux_rx_wq;
30
31static u16 packet_type[TTY_MAX_COUNT] = {0xF011, 0xF010};
32
33#define USB_DEVICE_CDC_DATA(vid, pid) \
34	.match_flags = \
35		USB_DEVICE_ID_MATCH_DEVICE |\
36		USB_DEVICE_ID_MATCH_INT_CLASS |\
37		USB_DEVICE_ID_MATCH_INT_SUBCLASS,\
38	.idVendor = vid,\
39	.idProduct = pid,\
40	.bInterfaceClass = USB_CLASS_COMM,\
41	.bInterfaceSubClass = USB_CDC_SUBCLASS_ACM
42
43static const struct usb_device_id id_table[] = {
44	{ USB_DEVICE_CDC_DATA(0x1076, 0x8000) }, /* GCT GDM7240 */
45	{ USB_DEVICE_CDC_DATA(0x1076, 0x8f00) }, /* GCT GDM7243 */
46	{ USB_DEVICE_CDC_DATA(0x1076, 0x9000) }, /* GCT GDM7243 */
47	{ USB_DEVICE_CDC_DATA(0x1d74, 0x2300) }, /* LGIT Phoenix */
48	{}
49};
50
51MODULE_DEVICE_TABLE(usb, id_table);
52
53static int packet_type_to_index(u16 packetType)
54{
55	int i;
56
57	for (i = 0; i < TTY_MAX_COUNT; i++) {
58		if (packet_type[i] == packetType)
59			return i;
60	}
61
62	return -1;
63}
64
65static struct mux_tx *alloc_mux_tx(int len)
66{
67	struct mux_tx *t = NULL;
68
69	t = kzalloc(sizeof(*t), GFP_ATOMIC);
70	if (!t)
71		return NULL;
72
73	t->urb = usb_alloc_urb(0, GFP_ATOMIC);
74	t->buf = kmalloc(MUX_TX_MAX_SIZE, GFP_ATOMIC);
75	if (!t->urb || !t->buf) {
76		usb_free_urb(t->urb);
77		kfree(t->buf);
78		kfree(t);
79		return NULL;
80	}
81
82	return t;
83}
84
85static void free_mux_tx(struct mux_tx *t)
86{
87	if (t) {
88		usb_free_urb(t->urb);
89		kfree(t->buf);
90		kfree(t);
91	}
92}
93
94static struct mux_rx *alloc_mux_rx(void)
95{
96	struct mux_rx *r = NULL;
97
98	r = kzalloc(sizeof(*r), GFP_KERNEL);
99	if (!r)
100		return NULL;
101
102	r->urb = usb_alloc_urb(0, GFP_KERNEL);
103	r->buf = kmalloc(MUX_RX_MAX_SIZE, GFP_KERNEL);
104	if (!r->urb || !r->buf) {
105		usb_free_urb(r->urb);
106		kfree(r->buf);
107		kfree(r);
108		return NULL;
109	}
110
111	return r;
112}
113
114static void free_mux_rx(struct mux_rx *r)
115{
116	if (r) {
117		usb_free_urb(r->urb);
118		kfree(r->buf);
119		kfree(r);
120	}
121}
122
123static struct mux_rx *get_rx_struct(struct rx_cxt *rx)
124{
125	struct mux_rx *r;
126	unsigned long flags;
127
128	spin_lock_irqsave(&rx->free_list_lock, flags);
129
130	if (list_empty(&rx->rx_free_list)) {
131		spin_unlock_irqrestore(&rx->free_list_lock, flags);
132		return NULL;
133	}
134
135	r = list_entry(rx->rx_free_list.prev, struct mux_rx, free_list);
136	list_del(&r->free_list);
137
138	spin_unlock_irqrestore(&rx->free_list_lock, flags);
139
140	return r;
141}
142
143static void put_rx_struct(struct rx_cxt *rx, struct mux_rx *r)
144{
145	unsigned long flags;
146
147	spin_lock_irqsave(&rx->free_list_lock, flags);
148	list_add_tail(&r->free_list, &rx->rx_free_list);
149	spin_unlock_irqrestore(&rx->free_list_lock, flags);
150}
151
152static int up_to_host(struct mux_rx *r)
153{
154	struct mux_dev *mux_dev = r->mux_dev;
155	struct mux_pkt_header *mux_header;
156	unsigned int start_flag;
157	unsigned int payload_size;
158	unsigned short packet_type;
159	int total_len;
160	u32 packet_size_sum = r->offset;
161	int index;
162	int ret = TO_HOST_INVALID_PACKET;
163	int len = r->len;
164
165	while (1) {
166		mux_header = (struct mux_pkt_header *)(r->buf +
167						       packet_size_sum);
168		start_flag = __le32_to_cpu(mux_header->start_flag);
169		payload_size = __le32_to_cpu(mux_header->payload_size);
170		packet_type = __le16_to_cpu(mux_header->packet_type);
171
172		if (start_flag != START_FLAG) {
173			pr_err("invalid START_FLAG %x\n", start_flag);
174			break;
175		}
176
177		total_len = ALIGN(MUX_HEADER_SIZE + payload_size, 4);
178
179		if (len - packet_size_sum <
180			total_len) {
181			pr_err("invalid payload : %d %d %04x\n",
182			       payload_size, len, packet_type);
183			break;
184		}
185
186		index = packet_type_to_index(packet_type);
187		if (index < 0) {
188			pr_err("invalid index %d\n", index);
189			break;
190		}
191
192		ret = r->callback(mux_header->data,
193				payload_size,
194				index,
195				mux_dev->tty_dev,
196				RECV_PACKET_PROCESS_CONTINUE
197				);
198		if (ret == TO_HOST_BUFFER_REQUEST_FAIL) {
199			r->offset += packet_size_sum;
200			break;
201		}
202
203		packet_size_sum += total_len;
204		if (len - packet_size_sum <= MUX_HEADER_SIZE + 2) {
205			ret = r->callback(NULL,
206					0,
207					index,
208					mux_dev->tty_dev,
209					RECV_PACKET_PROCESS_COMPLETE
210					);
211			break;
212		}
213	}
214
215	return ret;
216}
217
218static void do_rx(struct work_struct *work)
219{
220	struct mux_dev *mux_dev =
221		container_of(work, struct mux_dev, work_rx.work);
222	struct mux_rx *r;
223	struct rx_cxt *rx = &mux_dev->rx;
224	unsigned long flags;
225	int ret = 0;
226
227	while (1) {
228		spin_lock_irqsave(&rx->to_host_lock, flags);
229		if (list_empty(&rx->to_host_list)) {
230			spin_unlock_irqrestore(&rx->to_host_lock, flags);
231			break;
232		}
233		r = list_entry(rx->to_host_list.next, struct mux_rx,
234			       to_host_list);
235		list_del(&r->to_host_list);
236		spin_unlock_irqrestore(&rx->to_host_lock, flags);
237
238		ret = up_to_host(r);
239		if (ret == TO_HOST_BUFFER_REQUEST_FAIL)
240			pr_err("failed to send mux data to host\n");
241		else
242			put_rx_struct(rx, r);
243	}
244}
245
246static void remove_rx_submit_list(struct mux_rx *r, struct rx_cxt *rx)
247{
248	unsigned long flags;
249	struct mux_rx	*r_remove, *r_remove_next;
250
251	spin_lock_irqsave(&rx->submit_list_lock, flags);
252	list_for_each_entry_safe(r_remove, r_remove_next, &rx->rx_submit_list,
253				 rx_submit_list) {
254		if (r == r_remove)
255			list_del(&r->rx_submit_list);
256	}
257	spin_unlock_irqrestore(&rx->submit_list_lock, flags);
258}
259
260static void gdm_mux_rcv_complete(struct urb *urb)
261{
262	struct mux_rx *r = urb->context;
263	struct mux_dev *mux_dev = r->mux_dev;
264	struct rx_cxt *rx = &mux_dev->rx;
265	unsigned long flags;
266
267	remove_rx_submit_list(r, rx);
268
269	if (urb->status) {
270		if (mux_dev->usb_state == PM_NORMAL)
271			dev_err(&urb->dev->dev, "%s: urb status error %d\n",
272				__func__, urb->status);
273		put_rx_struct(rx, r);
274	} else {
275		r->len = r->urb->actual_length;
276		spin_lock_irqsave(&rx->to_host_lock, flags);
277		list_add_tail(&r->to_host_list, &rx->to_host_list);
278		queue_work(mux_rx_wq, &mux_dev->work_rx.work);
279		spin_unlock_irqrestore(&rx->to_host_lock, flags);
280	}
281}
282
283static int gdm_mux_recv(void *priv_dev, int (*cb)(void *data, int len,
284			int tty_index, struct tty_dev *tty_dev, int complete))
285{
286	struct mux_dev *mux_dev = priv_dev;
287	struct usb_device *usbdev = mux_dev->usbdev;
288	struct mux_rx *r;
289	struct rx_cxt *rx = &mux_dev->rx;
290	unsigned long flags;
291	int ret;
292
293	if (!usbdev) {
294		pr_err("device is disconnected\n");
295		return -ENODEV;
296	}
297
298	r = get_rx_struct(rx);
299	if (!r) {
300		pr_err("get_rx_struct fail\n");
301		return -ENOMEM;
302	}
303
304	r->offset = 0;
305	r->mux_dev = (void *)mux_dev;
306	r->callback = cb;
307	mux_dev->rx_cb = cb;
308
309	usb_fill_bulk_urb(r->urb,
310			  usbdev,
311			  usb_rcvbulkpipe(usbdev, 0x86),
312			  r->buf,
313			  MUX_RX_MAX_SIZE,
314			  gdm_mux_rcv_complete,
315			  r);
316
317	spin_lock_irqsave(&rx->submit_list_lock, flags);
318	list_add_tail(&r->rx_submit_list, &rx->rx_submit_list);
319	spin_unlock_irqrestore(&rx->submit_list_lock, flags);
320
321	ret = usb_submit_urb(r->urb, GFP_KERNEL);
322
323	if (ret) {
324		spin_lock_irqsave(&rx->submit_list_lock, flags);
325		list_del(&r->rx_submit_list);
326		spin_unlock_irqrestore(&rx->submit_list_lock, flags);
327
328		put_rx_struct(rx, r);
329
330		pr_err("usb_submit_urb ret=%d\n", ret);
331	}
332
333	usb_mark_last_busy(usbdev);
334
335	return ret;
336}
337
338static void gdm_mux_send_complete(struct urb *urb)
339{
340	struct mux_tx *t = urb->context;
341
342	if (urb->status == -ECONNRESET) {
343		dev_info(&urb->dev->dev, "CONNRESET\n");
344		free_mux_tx(t);
345		return;
346	}
347
348	if (t->callback)
349		t->callback(t->cb_data);
350
351	free_mux_tx(t);
352}
353
354static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index,
355			void (*cb)(void *data), void *cb_data)
356{
357	struct mux_dev *mux_dev = priv_dev;
358	struct usb_device *usbdev = mux_dev->usbdev;
359	struct mux_pkt_header *mux_header;
360	struct mux_tx *t = NULL;
361	static u32 seq_num = 1;
362	int total_len;
363	int ret;
364	unsigned long flags;
365
366	if (mux_dev->usb_state == PM_SUSPEND) {
367		ret = usb_autopm_get_interface(mux_dev->intf);
368		if (!ret)
369			usb_autopm_put_interface(mux_dev->intf);
370	}
371
372	spin_lock_irqsave(&mux_dev->write_lock, flags);
373
374	total_len = ALIGN(MUX_HEADER_SIZE + len, 4);
375
376	t = alloc_mux_tx(total_len);
377	if (!t) {
378		pr_err("alloc_mux_tx fail\n");
379		spin_unlock_irqrestore(&mux_dev->write_lock, flags);
380		return -ENOMEM;
381	}
382
383	mux_header = (struct mux_pkt_header *)t->buf;
384	mux_header->start_flag = __cpu_to_le32(START_FLAG);
385	mux_header->seq_num = __cpu_to_le32(seq_num++);
386	mux_header->payload_size = __cpu_to_le32((u32)len);
387	mux_header->packet_type = __cpu_to_le16(packet_type[tty_index]);
388
389	memcpy(t->buf + MUX_HEADER_SIZE, data, len);
390	memset(t->buf + MUX_HEADER_SIZE + len, 0, total_len - MUX_HEADER_SIZE -
391	       len);
392
393	t->len = total_len;
394	t->callback = cb;
395	t->cb_data = cb_data;
396
397	usb_fill_bulk_urb(t->urb,
398			  usbdev,
399			  usb_sndbulkpipe(usbdev, 5),
400			  t->buf,
401			  total_len,
402			  gdm_mux_send_complete,
403			  t);
404
405	ret = usb_submit_urb(t->urb, GFP_ATOMIC);
406
407	spin_unlock_irqrestore(&mux_dev->write_lock, flags);
408
409	if (ret)
410		pr_err("usb_submit_urb Error: %d\n", ret);
411
412	usb_mark_last_busy(usbdev);
413
414	return ret;
415}
416
417static int gdm_mux_send_control(void *priv_dev, int request, int value,
418				void *buf, int len)
419{
420	struct mux_dev *mux_dev = priv_dev;
421	struct usb_device *usbdev = mux_dev->usbdev;
422	int ret;
423
424	ret = usb_control_msg(usbdev,
425			      usb_sndctrlpipe(usbdev, 0),
426			      request,
427			      USB_RT_ACM,
428			      value,
429			      2,
430			      buf,
431			      len,
432			      5000
433			     );
434
435	if (ret < 0)
436		pr_err("usb_control_msg error: %d\n", ret);
437
438	return ret < 0 ? ret : 0;
439}
440
441static void release_usb(struct mux_dev *mux_dev)
442{
443	struct rx_cxt		*rx = &mux_dev->rx;
444	struct mux_rx		*r, *r_next;
445	unsigned long		flags;
446
447	cancel_delayed_work(&mux_dev->work_rx);
448
449	spin_lock_irqsave(&rx->submit_list_lock, flags);
450	list_for_each_entry_safe(r, r_next, &rx->rx_submit_list,
451				 rx_submit_list) {
452		spin_unlock_irqrestore(&rx->submit_list_lock, flags);
453		usb_kill_urb(r->urb);
454		spin_lock_irqsave(&rx->submit_list_lock, flags);
455	}
456	spin_unlock_irqrestore(&rx->submit_list_lock, flags);
457
458	spin_lock_irqsave(&rx->free_list_lock, flags);
459	list_for_each_entry_safe(r, r_next, &rx->rx_free_list, free_list) {
460		list_del(&r->free_list);
461		free_mux_rx(r);
462	}
463	spin_unlock_irqrestore(&rx->free_list_lock, flags);
464
465	spin_lock_irqsave(&rx->to_host_lock, flags);
466	list_for_each_entry_safe(r, r_next, &rx->to_host_list, to_host_list) {
467		if (r->mux_dev == (void *)mux_dev) {
468			list_del(&r->to_host_list);
469			free_mux_rx(r);
470		}
471	}
472	spin_unlock_irqrestore(&rx->to_host_lock, flags);
473}
474
475static int init_usb(struct mux_dev *mux_dev)
476{
477	struct mux_rx *r;
478	struct rx_cxt *rx = &mux_dev->rx;
479	int ret = 0;
480	int i;
481
482	spin_lock_init(&mux_dev->write_lock);
483	INIT_LIST_HEAD(&rx->to_host_list);
484	INIT_LIST_HEAD(&rx->rx_submit_list);
485	INIT_LIST_HEAD(&rx->rx_free_list);
486	spin_lock_init(&rx->to_host_lock);
487	spin_lock_init(&rx->submit_list_lock);
488	spin_lock_init(&rx->free_list_lock);
489
490	for (i = 0; i < MAX_ISSUE_NUM * 2; i++) {
491		r = alloc_mux_rx();
492		if (!r) {
493			ret = -ENOMEM;
494			break;
495		}
496
497		list_add(&r->free_list, &rx->rx_free_list);
498	}
499
500	INIT_DELAYED_WORK(&mux_dev->work_rx, do_rx);
501
502	return ret;
503}
504
505static int gdm_mux_probe(struct usb_interface *intf,
506			 const struct usb_device_id *id)
507{
508	struct mux_dev *mux_dev;
509	struct tty_dev *tty_dev;
510	u16 idVendor, idProduct;
511	int bInterfaceNumber;
512	int ret;
513	int i;
514	struct usb_device *usbdev = interface_to_usbdev(intf);
515
516	bInterfaceNumber = intf->cur_altsetting->desc.bInterfaceNumber;
517
518	idVendor = __le16_to_cpu(usbdev->descriptor.idVendor);
519	idProduct = __le16_to_cpu(usbdev->descriptor.idProduct);
520
521	pr_info("mux vid = 0x%04x pid = 0x%04x\n", idVendor, idProduct);
522
523	if (bInterfaceNumber != 2)
524		return -ENODEV;
525
526	mux_dev = kzalloc(sizeof(*mux_dev), GFP_KERNEL);
527	if (!mux_dev)
528		return -ENOMEM;
529
530	tty_dev = kzalloc(sizeof(*tty_dev), GFP_KERNEL);
531	if (!tty_dev) {
532		ret = -ENOMEM;
533		goto err_free_mux;
534	}
535
536	mux_dev->usbdev = usbdev;
537	mux_dev->control_intf = intf;
538
539	ret = init_usb(mux_dev);
540	if (ret)
541		goto err_free_usb;
542
543	tty_dev->priv_dev = (void *)mux_dev;
544	tty_dev->send_func = gdm_mux_send;
545	tty_dev->recv_func = gdm_mux_recv;
546	tty_dev->send_control = gdm_mux_send_control;
547
548	ret = register_lte_tty_device(tty_dev, &intf->dev);
549	if (ret)
550		goto err_unregister_tty;
551
552	for (i = 0; i < TTY_MAX_COUNT; i++)
553		mux_dev->tty_dev = tty_dev;
554
555	mux_dev->intf = intf;
556	mux_dev->usb_state = PM_NORMAL;
557
558	usb_get_dev(usbdev);
559	usb_set_intfdata(intf, tty_dev);
560
561	return 0;
562
563err_unregister_tty:
564	unregister_lte_tty_device(tty_dev);
565err_free_usb:
566	release_usb(mux_dev);
567	kfree(tty_dev);
568err_free_mux:
569	kfree(mux_dev);
570
571	return ret;
572}
573
574static void gdm_mux_disconnect(struct usb_interface *intf)
575{
576	struct tty_dev *tty_dev;
577	struct mux_dev *mux_dev;
578	struct usb_device *usbdev = interface_to_usbdev(intf);
579
580	tty_dev = usb_get_intfdata(intf);
581
582	mux_dev = tty_dev->priv_dev;
583
584	release_usb(mux_dev);
585	unregister_lte_tty_device(tty_dev);
586
587	kfree(mux_dev);
588	kfree(tty_dev);
589
590	usb_put_dev(usbdev);
591}
592
593static int gdm_mux_suspend(struct usb_interface *intf, pm_message_t pm_msg)
594{
595	struct tty_dev *tty_dev;
596	struct mux_dev *mux_dev;
597	struct rx_cxt *rx;
598	struct mux_rx *r, *r_next;
599	unsigned long flags;
600
601	tty_dev = usb_get_intfdata(intf);
602	mux_dev = tty_dev->priv_dev;
603	rx = &mux_dev->rx;
604
605	if (mux_dev->usb_state != PM_NORMAL) {
606		dev_err(intf->usb_dev, "usb suspend - invalid state\n");
607		return -1;
608	}
609
610	mux_dev->usb_state = PM_SUSPEND;
611
612	spin_lock_irqsave(&rx->submit_list_lock, flags);
613	list_for_each_entry_safe(r, r_next, &rx->rx_submit_list,
614				 rx_submit_list) {
615		spin_unlock_irqrestore(&rx->submit_list_lock, flags);
616		usb_kill_urb(r->urb);
617		spin_lock_irqsave(&rx->submit_list_lock, flags);
618	}
619	spin_unlock_irqrestore(&rx->submit_list_lock, flags);
620
621	return 0;
622}
623
624static int gdm_mux_resume(struct usb_interface *intf)
625{
626	struct tty_dev *tty_dev;
627	struct mux_dev *mux_dev;
628	u8 i;
629
630	tty_dev = usb_get_intfdata(intf);
631	mux_dev = tty_dev->priv_dev;
632
633	if (mux_dev->usb_state != PM_SUSPEND) {
634		dev_err(intf->usb_dev, "usb resume - invalid state\n");
635		return -1;
636	}
637
638	mux_dev->usb_state = PM_NORMAL;
639
640	for (i = 0; i < MAX_ISSUE_NUM; i++)
641		gdm_mux_recv(mux_dev, mux_dev->rx_cb);
642
643	return 0;
644}
645
646static struct usb_driver gdm_mux_driver = {
647	.name = "gdm_mux",
648	.probe = gdm_mux_probe,
649	.disconnect = gdm_mux_disconnect,
650	.id_table = id_table,
651	.supports_autosuspend = 1,
652	.suspend = gdm_mux_suspend,
653	.resume = gdm_mux_resume,
654	.reset_resume = gdm_mux_resume,
655};
656
657static int __init gdm_usb_mux_init(void)
658{
659
660	mux_rx_wq = create_workqueue("mux_rx_wq");
661	if (!mux_rx_wq) {
662		pr_err("work queue create fail\n");
663		return -1;
664	}
665
666	register_lte_tty_driver();
667
668	return usb_register(&gdm_mux_driver);
669}
670
671static void __exit gdm_usb_mux_exit(void)
672{
673	unregister_lte_tty_driver();
674
675	if (mux_rx_wq) {
676		flush_workqueue(mux_rx_wq);
677		destroy_workqueue(mux_rx_wq);
678	}
679
680	usb_deregister(&gdm_mux_driver);
681}
682
683module_init(gdm_usb_mux_init);
684module_exit(gdm_usb_mux_exit);
685
686MODULE_DESCRIPTION("GCT LTE TTY Device Driver");
687MODULE_LICENSE("GPL");
688