1 /* -----------------------------------------------------------------------------
2  * Copyright (c) 2011 Ozmo Inc
3  * Released under the GNU General Public License Version 2 (GPLv2).
4  * -----------------------------------------------------------------------------
5  */
6 
7 #include <linux/module.h>
8 #include <linux/timer.h>
9 #include <linux/sched.h>
10 #include <linux/netdevice.h>
11 #include <linux/etherdevice.h>
12 #include <linux/errno.h>
13 #include "ozdbg.h"
14 #include "ozprotocol.h"
15 #include "ozeltbuf.h"
16 #include "ozpd.h"
17 #include "ozproto.h"
18 #include "ozcdev.h"
19 #include "ozusbsvc.h"
20 #include <asm/unaligned.h>
21 #include <linux/uaccess.h>
22 #include <net/psnap.h>
23 
24 static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd);
25 static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f);
26 static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f);
27 static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f);
28 static int oz_send_isoc_frame(struct oz_pd *pd);
29 static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f);
30 static void oz_isoc_stream_free(struct oz_isoc_stream *st);
31 static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data);
32 static void oz_isoc_destructor(struct sk_buff *skb);
33 
34 /*
35  * Counts the uncompleted isoc frames submitted to netcard.
36  */
37 static atomic_t g_submitted_isoc = ATOMIC_INIT(0);
38 
39 /* Application handler functions.
40  */
41 static const struct oz_app_if g_app_if[OZ_NB_APPS] = {
42 	[OZ_APPID_USB] = {
43 		.init      = oz_usb_init,
44 		.term      = oz_usb_term,
45 		.start     = oz_usb_start,
46 		.stop      = oz_usb_stop,
47 		.rx        = oz_usb_rx,
48 		.heartbeat = oz_usb_heartbeat,
49 		.farewell  = oz_usb_farewell,
50 	},
51 	[OZ_APPID_SERIAL] = {
52 		.init      = oz_cdev_init,
53 		.term      = oz_cdev_term,
54 		.start     = oz_cdev_start,
55 		.stop      = oz_cdev_stop,
56 		.rx        = oz_cdev_rx,
57 	},
58 };
59 
60 
61 /*
62  * Context: softirq or process
63  */
oz_pd_set_state(struct oz_pd * pd,unsigned state)64 void oz_pd_set_state(struct oz_pd *pd, unsigned state)
65 {
66 	pd->state = state;
67 	switch (state) {
68 	case OZ_PD_S_IDLE:
69 		oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_IDLE\n");
70 		break;
71 	case OZ_PD_S_CONNECTED:
72 		oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_CONNECTED\n");
73 		break;
74 	case OZ_PD_S_STOPPED:
75 		oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_STOPPED\n");
76 		break;
77 	case OZ_PD_S_SLEEP:
78 		oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_SLEEP\n");
79 		break;
80 	}
81 }
82 
83 /*
84  * Context: softirq or process
85  */
oz_pd_get(struct oz_pd * pd)86 void oz_pd_get(struct oz_pd *pd)
87 {
88 	atomic_inc(&pd->ref_count);
89 }
90 
91 /*
92  * Context: softirq or process
93  */
oz_pd_put(struct oz_pd * pd)94 void oz_pd_put(struct oz_pd *pd)
95 {
96 	if (atomic_dec_and_test(&pd->ref_count))
97 		oz_pd_destroy(pd);
98 }
99 
100 /*
101  * Context: softirq-serialized
102  */
oz_pd_alloc(const u8 * mac_addr)103 struct oz_pd *oz_pd_alloc(const u8 *mac_addr)
104 {
105 	struct oz_pd *pd;
106 	int i;
107 
108 	pd = kzalloc(sizeof(struct oz_pd), GFP_ATOMIC);
109 	if (!pd)
110 		return NULL;
111 
112 	atomic_set(&pd->ref_count, 2);
113 	for (i = 0; i < OZ_NB_APPS; i++)
114 		spin_lock_init(&pd->app_lock[i]);
115 	pd->last_rx_pkt_num = 0xffffffff;
116 	oz_pd_set_state(pd, OZ_PD_S_IDLE);
117 	pd->max_tx_size = OZ_MAX_TX_SIZE;
118 	ether_addr_copy(pd->mac_addr, mac_addr);
119 	oz_elt_buf_init(&pd->elt_buff);
120 	spin_lock_init(&pd->tx_frame_lock);
121 	INIT_LIST_HEAD(&pd->tx_queue);
122 	INIT_LIST_HEAD(&pd->farewell_list);
123 	pd->last_sent_frame = &pd->tx_queue;
124 	spin_lock_init(&pd->stream_lock);
125 	INIT_LIST_HEAD(&pd->stream_list);
126 	tasklet_init(&pd->heartbeat_tasklet, oz_pd_heartbeat_handler,
127 						(unsigned long)pd);
128 	tasklet_init(&pd->timeout_tasklet, oz_pd_timeout_handler,
129 						(unsigned long)pd);
130 	hrtimer_init(&pd->heartbeat, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
131 	hrtimer_init(&pd->timeout, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
132 	pd->heartbeat.function = oz_pd_heartbeat_event;
133 	pd->timeout.function = oz_pd_timeout_event;
134 
135 	return pd;
136 }
137 
138 /*
139  * Context: softirq or process
140  */
oz_pd_free(struct work_struct * work)141 static void oz_pd_free(struct work_struct *work)
142 {
143 	struct list_head *e, *n;
144 	struct oz_pd *pd;
145 
146 	oz_pd_dbg(pd, ON, "Destroying PD\n");
147 	pd = container_of(work, struct oz_pd, workitem);
148 	/*Disable timer tasklets*/
149 	tasklet_kill(&pd->heartbeat_tasklet);
150 	tasklet_kill(&pd->timeout_tasklet);
151 
152 	/* Free streams, queued tx frames and farewells. */
153 
154 	list_for_each_safe(e, n, &pd->stream_list)
155 		oz_isoc_stream_free(list_entry(e, struct oz_isoc_stream, link));
156 
157 	list_for_each_safe(e, n, &pd->tx_queue) {
158 		struct oz_tx_frame *f = list_entry(e, struct oz_tx_frame, link);
159 
160 		if (f->skb != NULL)
161 			kfree_skb(f->skb);
162 		oz_retire_frame(pd, f);
163 	}
164 
165 	oz_elt_buf_term(&pd->elt_buff);
166 
167 	list_for_each_safe(e, n, &pd->farewell_list)
168 		kfree(list_entry(e, struct oz_farewell, link));
169 
170 	if (pd->net_dev)
171 		dev_put(pd->net_dev);
172 	kfree(pd);
173 }
174 
175 /*
176  * Context: softirq or Process
177  */
oz_pd_destroy(struct oz_pd * pd)178 void oz_pd_destroy(struct oz_pd *pd)
179 {
180 	if (hrtimer_active(&pd->timeout))
181 		hrtimer_cancel(&pd->timeout);
182 	if (hrtimer_active(&pd->heartbeat))
183 		hrtimer_cancel(&pd->heartbeat);
184 
185 	INIT_WORK(&pd->workitem, oz_pd_free);
186 	if (!schedule_work(&pd->workitem))
187 		oz_pd_dbg(pd, ON, "failed to schedule workitem\n");
188 }
189 
190 /*
191  * Context: softirq-serialized
192  */
oz_services_start(struct oz_pd * pd,u16 apps,int resume)193 int oz_services_start(struct oz_pd *pd, u16 apps, int resume)
194 {
195 	int i, rc = 0;
196 
197 	oz_pd_dbg(pd, ON, "%s: (0x%x) resume(%d)\n", __func__, apps, resume);
198 	for (i = 0; i < OZ_NB_APPS; i++) {
199 		if (g_app_if[i].start && (apps & (1 << i))) {
200 			if (g_app_if[i].start(pd, resume)) {
201 				rc = -1;
202 				oz_pd_dbg(pd, ON,
203 					  "Unable to start service %d\n", i);
204 				break;
205 			}
206 			spin_lock_bh(&g_polling_lock);
207 			pd->total_apps |= (1 << i);
208 			if (resume)
209 				pd->paused_apps &= ~(1 << i);
210 			spin_unlock_bh(&g_polling_lock);
211 		}
212 	}
213 	return rc;
214 }
215 
216 /*
217  * Context: softirq or process
218  */
oz_services_stop(struct oz_pd * pd,u16 apps,int pause)219 void oz_services_stop(struct oz_pd *pd, u16 apps, int pause)
220 {
221 	int i;
222 
223 	oz_pd_dbg(pd, ON, "%s: (0x%x) pause(%d)\n", __func__, apps, pause);
224 	for (i = 0; i < OZ_NB_APPS; i++) {
225 		if (g_app_if[i].stop && (apps & (1 << i))) {
226 			spin_lock_bh(&g_polling_lock);
227 			if (pause) {
228 				pd->paused_apps |=  (1 << i);
229 			} else {
230 				pd->total_apps  &= ~(1 << i);
231 				pd->paused_apps &= ~(1 << i);
232 			}
233 			spin_unlock_bh(&g_polling_lock);
234 			g_app_if[i].stop(pd, pause);
235 		}
236 	}
237 }
238 
239 /*
240  * Context: softirq
241  */
oz_pd_heartbeat(struct oz_pd * pd,u16 apps)242 void oz_pd_heartbeat(struct oz_pd *pd, u16 apps)
243 {
244 	int i, more = 0;
245 
246 	for (i = 0; i < OZ_NB_APPS; i++) {
247 		if (g_app_if[i].heartbeat && (apps & (1 << i))) {
248 			if (g_app_if[i].heartbeat(pd))
249 				more = 1;
250 		}
251 	}
252 	if ((!more) && (hrtimer_active(&pd->heartbeat)))
253 		hrtimer_cancel(&pd->heartbeat);
254 	if (pd->mode & OZ_F_ISOC_ANYTIME) {
255 		int count = 8;
256 
257 		while (count-- && (oz_send_isoc_frame(pd) >= 0))
258 			;
259 	}
260 }
261 
262 /*
263  * Context: softirq or process
264  */
oz_pd_stop(struct oz_pd * pd)265 void oz_pd_stop(struct oz_pd *pd)
266 {
267 	u16 stop_apps;
268 
269 	oz_dbg(ON, "oz_pd_stop() State = 0x%x\n", pd->state);
270 	oz_pd_indicate_farewells(pd);
271 	spin_lock_bh(&g_polling_lock);
272 	stop_apps = pd->total_apps;
273 	pd->total_apps = 0;
274 	pd->paused_apps = 0;
275 	spin_unlock_bh(&g_polling_lock);
276 	oz_services_stop(pd, stop_apps, 0);
277 	spin_lock_bh(&g_polling_lock);
278 	oz_pd_set_state(pd, OZ_PD_S_STOPPED);
279 	/* Remove from PD list.*/
280 	list_del(&pd->link);
281 	spin_unlock_bh(&g_polling_lock);
282 	oz_dbg(ON, "pd ref count = %d\n", atomic_read(&pd->ref_count));
283 	oz_pd_put(pd);
284 }
285 
286 /*
287  * Context: softirq
288  */
oz_pd_sleep(struct oz_pd * pd)289 int oz_pd_sleep(struct oz_pd *pd)
290 {
291 	int do_stop = 0;
292 	u16 stop_apps;
293 
294 	spin_lock_bh(&g_polling_lock);
295 	if (pd->state & (OZ_PD_S_SLEEP | OZ_PD_S_STOPPED)) {
296 		spin_unlock_bh(&g_polling_lock);
297 		return 0;
298 	}
299 	if (pd->keep_alive && pd->session_id)
300 		oz_pd_set_state(pd, OZ_PD_S_SLEEP);
301 	else
302 		do_stop = 1;
303 
304 	stop_apps = pd->total_apps;
305 	spin_unlock_bh(&g_polling_lock);
306 	if (do_stop) {
307 		oz_pd_stop(pd);
308 	} else {
309 		oz_services_stop(pd, stop_apps, 1);
310 		oz_timer_add(pd, OZ_TIMER_STOP, pd->keep_alive);
311 	}
312 	return do_stop;
313 }
314 
315 /*
316  * Context: softirq
317  */
oz_tx_frame_alloc(struct oz_pd * pd)318 static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd)
319 {
320 	struct oz_tx_frame *f;
321 
322 	f = kmem_cache_alloc(oz_tx_frame_cache, GFP_ATOMIC);
323 	if (f) {
324 		f->total_size = sizeof(struct oz_hdr);
325 		INIT_LIST_HEAD(&f->link);
326 		INIT_LIST_HEAD(&f->elt_list);
327 	}
328 	return f;
329 }
330 
331 /*
332  * Context: softirq or process
333  */
oz_tx_isoc_free(struct oz_pd * pd,struct oz_tx_frame * f)334 static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f)
335 {
336 	pd->nb_queued_isoc_frames--;
337 	list_del_init(&f->link);
338 
339 	kmem_cache_free(oz_tx_frame_cache, f);
340 
341 	oz_dbg(TX_FRAMES, "Releasing ISOC Frame isoc_nb= %d\n",
342 	       pd->nb_queued_isoc_frames);
343 }
344 
345 /*
346  * Context: softirq or process
347  */
oz_tx_frame_free(struct oz_pd * pd,struct oz_tx_frame * f)348 static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f)
349 {
350 	kmem_cache_free(oz_tx_frame_cache, f);
351 }
352 
353 /*
354  * Context: softirq-serialized
355  */
oz_set_more_bit(struct sk_buff * skb)356 static void oz_set_more_bit(struct sk_buff *skb)
357 {
358 	struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
359 
360 	oz_hdr->control |= OZ_F_MORE_DATA;
361 }
362 
363 /*
364  * Context: softirq-serialized
365  */
oz_set_last_pkt_nb(struct oz_pd * pd,struct sk_buff * skb)366 static void oz_set_last_pkt_nb(struct oz_pd *pd, struct sk_buff *skb)
367 {
368 	struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
369 
370 	oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
371 }
372 
373 /*
374  * Context: softirq
375  */
oz_prepare_frame(struct oz_pd * pd,int empty)376 int oz_prepare_frame(struct oz_pd *pd, int empty)
377 {
378 	struct oz_tx_frame *f;
379 
380 	if ((pd->mode & OZ_MODE_MASK) != OZ_MODE_TRIGGERED)
381 		return -1;
382 	if (pd->nb_queued_frames >= OZ_MAX_QUEUED_FRAMES)
383 		return -1;
384 	if (!empty && !oz_are_elts_available(&pd->elt_buff))
385 		return -1;
386 	f = oz_tx_frame_alloc(pd);
387 	if (f == NULL)
388 		return -1;
389 	f->skb = NULL;
390 	f->hdr.control =
391 		(OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ACK_REQUESTED;
392 	++pd->last_tx_pkt_num;
393 	put_unaligned(cpu_to_le32(pd->last_tx_pkt_num), &f->hdr.pkt_num);
394 	if (empty == 0) {
395 		oz_select_elts_for_tx(&pd->elt_buff, 0, &f->total_size,
396 			pd->max_tx_size, &f->elt_list);
397 	}
398 	spin_lock(&pd->tx_frame_lock);
399 	list_add_tail(&f->link, &pd->tx_queue);
400 	pd->nb_queued_frames++;
401 	spin_unlock(&pd->tx_frame_lock);
402 	return 0;
403 }
404 
405 /*
406  * Context: softirq-serialized
407  */
oz_build_frame(struct oz_pd * pd,struct oz_tx_frame * f)408 static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f)
409 {
410 	struct sk_buff *skb;
411 	struct net_device *dev = pd->net_dev;
412 	struct oz_hdr *oz_hdr;
413 	struct oz_elt *elt;
414 	struct oz_elt_info *ei;
415 
416 	/* Allocate skb with enough space for the lower layers as well
417 	 * as the space we need.
418 	 */
419 	skb = alloc_skb(f->total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
420 	if (skb == NULL)
421 		return NULL;
422 	/* Reserve the head room for lower layers.
423 	 */
424 	skb_reserve(skb, LL_RESERVED_SPACE(dev));
425 	skb_reset_network_header(skb);
426 	skb->dev = dev;
427 	skb->protocol = htons(OZ_ETHERTYPE);
428 	if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
429 		dev->dev_addr, skb->len) < 0)
430 		goto fail;
431 	/* Push the tail to the end of the area we are going to copy to.
432 	 */
433 	oz_hdr = (struct oz_hdr *)skb_put(skb, f->total_size);
434 	f->hdr.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
435 	memcpy(oz_hdr, &f->hdr, sizeof(struct oz_hdr));
436 	/* Copy the elements into the frame body.
437 	 */
438 	elt = (struct oz_elt *)(oz_hdr+1);
439 	list_for_each_entry(ei, &f->elt_list, link) {
440 		memcpy(elt, ei->data, ei->length);
441 		elt = oz_next_elt(elt);
442 	}
443 	return skb;
444 fail:
445 	kfree_skb(skb);
446 	return NULL;
447 }
448 
449 /*
450  * Context: softirq or process
451  */
oz_retire_frame(struct oz_pd * pd,struct oz_tx_frame * f)452 static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f)
453 {
454 	struct oz_elt_info *ei, *n;
455 
456 	list_for_each_entry_safe(ei, n, &f->elt_list, link) {
457 		list_del_init(&ei->link);
458 		if (ei->callback)
459 			ei->callback(pd, ei->context);
460 		spin_lock_bh(&pd->elt_buff.lock);
461 		oz_elt_info_free(&pd->elt_buff, ei);
462 		spin_unlock_bh(&pd->elt_buff.lock);
463 	}
464 	oz_tx_frame_free(pd, f);
465 }
466 
467 /*
468  * Context: softirq-serialized
469  */
oz_send_next_queued_frame(struct oz_pd * pd,int more_data)470 static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data)
471 {
472 	struct sk_buff *skb;
473 	struct oz_tx_frame *f;
474 	struct list_head *e;
475 
476 	spin_lock(&pd->tx_frame_lock);
477 	e = pd->last_sent_frame->next;
478 	if (e == &pd->tx_queue) {
479 		spin_unlock(&pd->tx_frame_lock);
480 		return -1;
481 	}
482 	f = list_entry(e, struct oz_tx_frame, link);
483 
484 	if (f->skb != NULL) {
485 		skb = f->skb;
486 		oz_tx_isoc_free(pd, f);
487 		spin_unlock(&pd->tx_frame_lock);
488 		if (more_data)
489 			oz_set_more_bit(skb);
490 		oz_set_last_pkt_nb(pd, skb);
491 		if ((int)atomic_read(&g_submitted_isoc) <
492 							OZ_MAX_SUBMITTED_ISOC) {
493 			if (dev_queue_xmit(skb) < 0) {
494 				oz_dbg(TX_FRAMES, "Dropping ISOC Frame\n");
495 				return -1;
496 			}
497 			atomic_inc(&g_submitted_isoc);
498 			oz_dbg(TX_FRAMES, "Sending ISOC Frame, nb_isoc= %d\n",
499 			       pd->nb_queued_isoc_frames);
500 			return 0;
501 		}
502 		kfree_skb(skb);
503 		oz_dbg(TX_FRAMES, "Dropping ISOC Frame>\n");
504 		return -1;
505 	}
506 
507 	pd->last_sent_frame = e;
508 	skb = oz_build_frame(pd, f);
509 	spin_unlock(&pd->tx_frame_lock);
510 	if (!skb)
511 		return -1;
512 	if (more_data)
513 		oz_set_more_bit(skb);
514 	oz_dbg(TX_FRAMES, "TX frame PN=0x%x\n", f->hdr.pkt_num);
515 	if (dev_queue_xmit(skb) < 0)
516 		return -1;
517 
518 	return 0;
519 }
520 
521 /*
522  * Context: softirq-serialized
523  */
oz_send_queued_frames(struct oz_pd * pd,int backlog)524 void oz_send_queued_frames(struct oz_pd *pd, int backlog)
525 {
526 	while (oz_prepare_frame(pd, 0) >= 0)
527 		backlog++;
528 
529 	switch (pd->mode & (OZ_F_ISOC_NO_ELTS | OZ_F_ISOC_ANYTIME)) {
530 
531 		case OZ_F_ISOC_NO_ELTS: {
532 			backlog += pd->nb_queued_isoc_frames;
533 			if (backlog <= 0)
534 				goto out;
535 			if (backlog > OZ_MAX_SUBMITTED_ISOC)
536 				backlog = OZ_MAX_SUBMITTED_ISOC;
537 			break;
538 		}
539 		case OZ_NO_ELTS_ANYTIME: {
540 			if ((backlog <= 0) && (pd->isoc_sent == 0))
541 				goto out;
542 			break;
543 		}
544 		default: {
545 			if (backlog <= 0)
546 				goto out;
547 			break;
548 		}
549 	}
550 	while (backlog--) {
551 		if (oz_send_next_queued_frame(pd, backlog) < 0)
552 			break;
553 	}
554 	return;
555 
556 out:	oz_prepare_frame(pd, 1);
557 	oz_send_next_queued_frame(pd, 0);
558 }
559 
560 /*
561  * Context: softirq
562  */
oz_send_isoc_frame(struct oz_pd * pd)563 static int oz_send_isoc_frame(struct oz_pd *pd)
564 {
565 	struct sk_buff *skb;
566 	struct net_device *dev = pd->net_dev;
567 	struct oz_hdr *oz_hdr;
568 	struct oz_elt *elt;
569 	struct oz_elt_info *ei;
570 	LIST_HEAD(list);
571 	int total_size = sizeof(struct oz_hdr);
572 
573 	oz_select_elts_for_tx(&pd->elt_buff, 1, &total_size,
574 		pd->max_tx_size, &list);
575 	if (list_empty(&list))
576 		return 0;
577 	skb = alloc_skb(total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
578 	if (skb == NULL) {
579 		oz_dbg(ON, "Cannot alloc skb\n");
580 		oz_elt_info_free_chain(&pd->elt_buff, &list);
581 		return -1;
582 	}
583 	skb_reserve(skb, LL_RESERVED_SPACE(dev));
584 	skb_reset_network_header(skb);
585 	skb->dev = dev;
586 	skb->protocol = htons(OZ_ETHERTYPE);
587 	if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
588 		dev->dev_addr, skb->len) < 0) {
589 		kfree_skb(skb);
590 		return -1;
591 	}
592 	oz_hdr = (struct oz_hdr *)skb_put(skb, total_size);
593 	oz_hdr->control = (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
594 	oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
595 	elt = (struct oz_elt *)(oz_hdr+1);
596 
597 	list_for_each_entry(ei, &list, link) {
598 		memcpy(elt, ei->data, ei->length);
599 		elt = oz_next_elt(elt);
600 	}
601 	dev_queue_xmit(skb);
602 	oz_elt_info_free_chain(&pd->elt_buff, &list);
603 	return 0;
604 }
605 
606 /*
607  * Context: softirq-serialized
608  */
oz_retire_tx_frames(struct oz_pd * pd,u8 lpn)609 void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn)
610 {
611 	struct oz_tx_frame *f, *tmp = NULL;
612 	u8 diff;
613 	u32 pkt_num;
614 
615 	LIST_HEAD(list);
616 
617 	spin_lock(&pd->tx_frame_lock);
618 	list_for_each_entry(f, &pd->tx_queue, link) {
619 		pkt_num = le32_to_cpu(get_unaligned(&f->hdr.pkt_num));
620 		diff = (lpn - (pkt_num & OZ_LAST_PN_MASK)) & OZ_LAST_PN_MASK;
621 		if ((diff > OZ_LAST_PN_HALF_CYCLE) || (pkt_num == 0))
622 			break;
623 		oz_dbg(TX_FRAMES, "Releasing pkt_num= %u, nb= %d\n",
624 		       pkt_num, pd->nb_queued_frames);
625 		tmp = f;
626 		pd->nb_queued_frames--;
627 	}
628 	if (tmp)
629 		list_cut_position(&list, &pd->tx_queue, &tmp->link);
630 	pd->last_sent_frame = &pd->tx_queue;
631 	spin_unlock(&pd->tx_frame_lock);
632 
633 	list_for_each_entry_safe(f, tmp, &list, link)
634 		oz_retire_frame(pd, f);
635 }
636 
637 /*
638  * Precondition: stream_lock must be held.
639  * Context: softirq
640  */
pd_stream_find(struct oz_pd * pd,u8 ep_num)641 static struct oz_isoc_stream *pd_stream_find(struct oz_pd *pd, u8 ep_num)
642 {
643 	struct oz_isoc_stream *st;
644 
645 	list_for_each_entry(st, &pd->stream_list, link) {
646 		if (st->ep_num == ep_num)
647 			return st;
648 	}
649 	return NULL;
650 }
651 
652 /*
653  * Context: softirq
654  */
oz_isoc_stream_create(struct oz_pd * pd,u8 ep_num)655 int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num)
656 {
657 	struct oz_isoc_stream *st;
658 
659 	st = kzalloc(sizeof(struct oz_isoc_stream), GFP_ATOMIC);
660 	if (!st)
661 		return -ENOMEM;
662 	st->ep_num = ep_num;
663 	spin_lock_bh(&pd->stream_lock);
664 	if (!pd_stream_find(pd, ep_num)) {
665 		list_add(&st->link, &pd->stream_list);
666 		st = NULL;
667 	}
668 	spin_unlock_bh(&pd->stream_lock);
669 	kfree(st);
670 	return 0;
671 }
672 
673 /*
674  * Context: softirq or process
675  */
oz_isoc_stream_free(struct oz_isoc_stream * st)676 static void oz_isoc_stream_free(struct oz_isoc_stream *st)
677 {
678 	kfree_skb(st->skb);
679 	kfree(st);
680 }
681 
682 /*
683  * Context: softirq
684  */
oz_isoc_stream_delete(struct oz_pd * pd,u8 ep_num)685 int oz_isoc_stream_delete(struct oz_pd *pd, u8 ep_num)
686 {
687 	struct oz_isoc_stream *st;
688 
689 	spin_lock_bh(&pd->stream_lock);
690 	st = pd_stream_find(pd, ep_num);
691 	if (st)
692 		list_del(&st->link);
693 	spin_unlock_bh(&pd->stream_lock);
694 	if (st)
695 		oz_isoc_stream_free(st);
696 	return 0;
697 }
698 
699 /*
700  * Context: any
701  */
oz_isoc_destructor(struct sk_buff * skb)702 static void oz_isoc_destructor(struct sk_buff *skb)
703 {
704 	atomic_dec(&g_submitted_isoc);
705 }
706 
707 /*
708  * Context: softirq
709  */
oz_send_isoc_unit(struct oz_pd * pd,u8 ep_num,const u8 * data,int len)710 int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, const u8 *data, int len)
711 {
712 	struct net_device *dev = pd->net_dev;
713 	struct oz_isoc_stream *st;
714 	u8 nb_units = 0;
715 	struct sk_buff *skb = NULL;
716 	struct oz_hdr *oz_hdr = NULL;
717 	int size = 0;
718 
719 	spin_lock_bh(&pd->stream_lock);
720 	st = pd_stream_find(pd, ep_num);
721 	if (st) {
722 		skb = st->skb;
723 		st->skb = NULL;
724 		nb_units = st->nb_units;
725 		st->nb_units = 0;
726 		oz_hdr = st->oz_hdr;
727 		size = st->size;
728 	}
729 	spin_unlock_bh(&pd->stream_lock);
730 	if (!st)
731 		return 0;
732 	if (!skb) {
733 		/* Allocate enough space for max size frame. */
734 		skb = alloc_skb(pd->max_tx_size + OZ_ALLOCATED_SPACE(dev),
735 				GFP_ATOMIC);
736 		if (skb == NULL)
737 			return 0;
738 		/* Reserve the head room for lower layers. */
739 		skb_reserve(skb, LL_RESERVED_SPACE(dev));
740 		skb_reset_network_header(skb);
741 		skb->dev = dev;
742 		skb->protocol = htons(OZ_ETHERTYPE);
743 		/* For audio packet set priority to AC_VO */
744 		skb->priority = 0x7;
745 		size = sizeof(struct oz_hdr) + sizeof(struct oz_isoc_large);
746 		oz_hdr = (struct oz_hdr *)skb_put(skb, size);
747 	}
748 	memcpy(skb_put(skb, len), data, len);
749 	size += len;
750 	if (++nb_units < pd->ms_per_isoc) {
751 		spin_lock_bh(&pd->stream_lock);
752 		st->skb = skb;
753 		st->nb_units = nb_units;
754 		st->oz_hdr = oz_hdr;
755 		st->size = size;
756 		spin_unlock_bh(&pd->stream_lock);
757 	} else {
758 		struct oz_hdr oz;
759 		struct oz_isoc_large iso;
760 
761 		spin_lock_bh(&pd->stream_lock);
762 		iso.frame_number = st->frame_num;
763 		st->frame_num += nb_units;
764 		spin_unlock_bh(&pd->stream_lock);
765 		oz.control =
766 			(OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
767 		oz.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
768 		oz.pkt_num = 0;
769 		iso.endpoint = ep_num;
770 		iso.format = OZ_DATA_F_ISOC_LARGE;
771 		iso.ms_data = nb_units;
772 		memcpy(oz_hdr, &oz, sizeof(oz));
773 		memcpy(oz_hdr+1, &iso, sizeof(iso));
774 		if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
775 				dev->dev_addr, skb->len) < 0)
776 			goto out;
777 
778 		skb->destructor = oz_isoc_destructor;
779 		/*Queue for Xmit if mode is not ANYTIME*/
780 		if (!(pd->mode & OZ_F_ISOC_ANYTIME)) {
781 			struct oz_tx_frame *isoc_unit = NULL;
782 			int nb = pd->nb_queued_isoc_frames;
783 
784 			if (nb >= pd->isoc_latency) {
785 				struct oz_tx_frame *f;
786 
787 				oz_dbg(TX_FRAMES, "Dropping ISOC Unit nb= %d\n",
788 				       nb);
789 				spin_lock(&pd->tx_frame_lock);
790 				list_for_each_entry(f, &pd->tx_queue, link) {
791 					if (f->skb != NULL) {
792 						oz_tx_isoc_free(pd, f);
793 						break;
794 					}
795 				}
796 				spin_unlock(&pd->tx_frame_lock);
797 			}
798 			isoc_unit = oz_tx_frame_alloc(pd);
799 			if (isoc_unit == NULL)
800 				goto out;
801 			isoc_unit->hdr = oz;
802 			isoc_unit->skb = skb;
803 			spin_lock_bh(&pd->tx_frame_lock);
804 			list_add_tail(&isoc_unit->link, &pd->tx_queue);
805 			pd->nb_queued_isoc_frames++;
806 			spin_unlock_bh(&pd->tx_frame_lock);
807 			oz_dbg(TX_FRAMES,
808 			       "Added ISOC Frame to Tx Queue isoc_nb= %d, nb= %d\n",
809 			       pd->nb_queued_isoc_frames, pd->nb_queued_frames);
810 			return 0;
811 		}
812 
813 		/*In ANYTIME mode Xmit unit immediately*/
814 		if (atomic_read(&g_submitted_isoc) < OZ_MAX_SUBMITTED_ISOC) {
815 			atomic_inc(&g_submitted_isoc);
816 			if (dev_queue_xmit(skb) < 0)
817 				return -1;
818 			return 0;
819 		}
820 
821 out:	kfree_skb(skb);
822 	return -1;
823 
824 	}
825 	return 0;
826 }
827 
828 /*
829  * Context: process
830  */
oz_apps_init(void)831 void oz_apps_init(void)
832 {
833 	int i;
834 
835 	for (i = 0; i < OZ_NB_APPS; i++) {
836 		if (g_app_if[i].init)
837 			g_app_if[i].init();
838 	}
839 }
840 
841 /*
842  * Context: process
843  */
oz_apps_term(void)844 void oz_apps_term(void)
845 {
846 	int i;
847 
848 	/* Terminate all the apps. */
849 	for (i = 0; i < OZ_NB_APPS; i++) {
850 		if (g_app_if[i].term)
851 			g_app_if[i].term();
852 	}
853 }
854 
855 /*
856  * Context: softirq-serialized
857  */
oz_handle_app_elt(struct oz_pd * pd,u8 app_id,struct oz_elt * elt)858 void oz_handle_app_elt(struct oz_pd *pd, u8 app_id, struct oz_elt *elt)
859 {
860 	if (app_id < OZ_NB_APPS && g_app_if[app_id].rx)
861 		g_app_if[app_id].rx(pd, elt);
862 }
863 
864 /*
865  * Context: softirq or process
866  */
oz_pd_indicate_farewells(struct oz_pd * pd)867 void oz_pd_indicate_farewells(struct oz_pd *pd)
868 {
869 	struct oz_farewell *f;
870 	const struct oz_app_if *ai = &g_app_if[OZ_APPID_USB];
871 
872 	while (1) {
873 		spin_lock_bh(&g_polling_lock);
874 		if (list_empty(&pd->farewell_list)) {
875 			spin_unlock_bh(&g_polling_lock);
876 			break;
877 		}
878 		f = list_first_entry(&pd->farewell_list,
879 				struct oz_farewell, link);
880 		list_del(&f->link);
881 		spin_unlock_bh(&g_polling_lock);
882 		if (ai->farewell)
883 			ai->farewell(pd, f->ep_num, f->report, f->len);
884 		kfree(f);
885 	}
886 }
887