1/* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 * -----------------------------------------------------------------------------
5 */
6
7#include <linux/module.h>
8#include <linux/timer.h>
9#include <linux/sched.h>
10#include <linux/netdevice.h>
11#include <linux/etherdevice.h>
12#include <linux/errno.h>
13#include <linux/ieee80211.h>
14#include <linux/slab.h>
15#include "ozdbg.h"
16#include "ozprotocol.h"
17#include "ozeltbuf.h"
18#include "ozpd.h"
19#include "ozproto.h"
20#include "ozusbsvc.h"
21
22#include "ozappif.h"
23#include <asm/unaligned.h>
24#include <linux/uaccess.h>
25#include <net/psnap.h>
26
27#define OZ_CF_CONN_SUCCESS	1
28#define OZ_CF_CONN_FAILURE	2
29
30#define OZ_DO_STOP		1
31#define OZ_DO_SLEEP		2
32
33struct oz_binding {
34	struct packet_type ptype;
35	char name[OZ_MAX_BINDING_LEN];
36	struct list_head link;
37};
38
39/*
40 * External variable
41 */
42
43DEFINE_SPINLOCK(g_polling_lock);
44/*
45 * Static external variables.
46 */
47static LIST_HEAD(g_pd_list);
48static LIST_HEAD(g_binding);
49static DEFINE_SPINLOCK(g_binding_lock);
50static struct sk_buff_head g_rx_queue;
51static u8 g_session_id;
52static u16 g_apps = 0x1;
53static int g_processing_rx;
54
55struct kmem_cache *oz_elt_info_cache;
56struct kmem_cache *oz_tx_frame_cache;
57
58/*
59 * Context: softirq-serialized
60 */
61static u8 oz_get_new_session_id(u8 exclude)
62{
63	if (++g_session_id == 0)
64		g_session_id = 1;
65	if (g_session_id == exclude) {
66		if (++g_session_id == 0)
67			g_session_id = 1;
68	}
69	return g_session_id;
70}
71
72/*
73 * Context: softirq-serialized
74 */
75static void oz_send_conn_rsp(struct oz_pd *pd, u8 status)
76{
77	struct sk_buff *skb;
78	struct net_device *dev = pd->net_dev;
79	struct oz_hdr *oz_hdr;
80	struct oz_elt *elt;
81	struct oz_elt_connect_rsp *body;
82
83	int sz = sizeof(struct oz_hdr) + sizeof(struct oz_elt) +
84			sizeof(struct oz_elt_connect_rsp);
85	skb = alloc_skb(sz + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
86	if (skb == NULL)
87		return;
88	skb_reserve(skb, LL_RESERVED_SPACE(dev));
89	skb_reset_network_header(skb);
90	oz_hdr = (struct oz_hdr *)skb_put(skb, sz);
91	elt = (struct oz_elt *)(oz_hdr+1);
92	body = (struct oz_elt_connect_rsp *)(elt+1);
93	skb->dev = dev;
94	skb->protocol = htons(OZ_ETHERTYPE);
95	/* Fill in device header */
96	if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
97			dev->dev_addr, skb->len) < 0) {
98		kfree_skb(skb);
99		return;
100	}
101	oz_hdr->control = OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT;
102	oz_hdr->last_pkt_num = 0;
103	put_unaligned(0, &oz_hdr->pkt_num);
104	elt->type = OZ_ELT_CONNECT_RSP;
105	elt->length = sizeof(struct oz_elt_connect_rsp);
106	memset(body, 0, sizeof(struct oz_elt_connect_rsp));
107	body->status = status;
108	if (status == 0) {
109		body->mode = pd->mode;
110		body->session_id = pd->session_id;
111		put_unaligned(cpu_to_le16(pd->total_apps), &body->apps);
112	}
113	oz_dbg(ON, "TX: OZ_ELT_CONNECT_RSP %d", status);
114	dev_queue_xmit(skb);
115}
116
117/*
118 * Context: softirq-serialized
119 */
120static void pd_set_keepalive(struct oz_pd *pd, u8 kalive)
121{
122	unsigned long keep_alive = kalive & OZ_KALIVE_VALUE_MASK;
123
124	switch (kalive & OZ_KALIVE_TYPE_MASK) {
125	case OZ_KALIVE_SPECIAL:
126		pd->keep_alive = keep_alive * 1000*60*60*24*20;
127		break;
128	case OZ_KALIVE_SECS:
129		pd->keep_alive = keep_alive*1000;
130		break;
131	case OZ_KALIVE_MINS:
132		pd->keep_alive = keep_alive*1000*60;
133		break;
134	case OZ_KALIVE_HOURS:
135		pd->keep_alive = keep_alive*1000*60*60;
136		break;
137	default:
138		pd->keep_alive = 0;
139	}
140	oz_dbg(ON, "Keepalive = %lu mSec\n", pd->keep_alive);
141}
142
143/*
144 * Context: softirq-serialized
145 */
146static void pd_set_presleep(struct oz_pd *pd, u8 presleep, u8 start_timer)
147{
148	if (presleep)
149		pd->presleep = presleep*100;
150	else
151		pd->presleep = OZ_PRESLEEP_TOUT;
152	if (start_timer) {
153		spin_unlock(&g_polling_lock);
154		oz_timer_add(pd, OZ_TIMER_TOUT, pd->presleep);
155		spin_lock(&g_polling_lock);
156	}
157	oz_dbg(ON, "Presleep time = %lu mSec\n", pd->presleep);
158}
159
160/*
161 * Context: softirq-serialized
162 */
163static struct oz_pd *oz_connect_req(struct oz_pd *cur_pd, struct oz_elt *elt,
164			const u8 *pd_addr, struct net_device *net_dev)
165{
166	struct oz_pd *pd;
167	struct oz_elt_connect_req *body =
168			(struct oz_elt_connect_req *)(elt+1);
169	u8 rsp_status = OZ_STATUS_SUCCESS;
170	u8 stop_needed = 0;
171	u16 new_apps = g_apps;
172	struct net_device *old_net_dev = NULL;
173	struct oz_pd *free_pd = NULL;
174
175	if (cur_pd) {
176		pd = cur_pd;
177		spin_lock_bh(&g_polling_lock);
178	} else {
179		struct oz_pd *pd2 = NULL;
180		struct list_head *e;
181
182		pd = oz_pd_alloc(pd_addr);
183		if (pd == NULL)
184			return NULL;
185		getnstimeofday(&pd->last_rx_timestamp);
186		spin_lock_bh(&g_polling_lock);
187		list_for_each(e, &g_pd_list) {
188			pd2 = list_entry(e, struct oz_pd, link);
189			if (ether_addr_equal(pd2->mac_addr, pd_addr)) {
190				free_pd = pd;
191				pd = pd2;
192				break;
193			}
194		}
195		if (pd != pd2)
196			list_add_tail(&pd->link, &g_pd_list);
197	}
198	if (pd == NULL) {
199		spin_unlock_bh(&g_polling_lock);
200		return NULL;
201	}
202	if (pd->net_dev != net_dev) {
203		old_net_dev = pd->net_dev;
204		dev_hold(net_dev);
205		pd->net_dev = net_dev;
206	}
207	oz_dbg(ON, "Host vendor: %d\n", body->host_vendor);
208	pd->max_tx_size = OZ_MAX_TX_SIZE;
209	pd->mode = body->mode;
210	pd->pd_info = body->pd_info;
211	if (pd->mode & OZ_F_ISOC_NO_ELTS) {
212		pd->ms_per_isoc = body->ms_per_isoc;
213		if (!pd->ms_per_isoc)
214			pd->ms_per_isoc = 4;
215
216		switch (body->ms_isoc_latency & OZ_LATENCY_MASK) {
217		case OZ_ONE_MS_LATENCY:
218			pd->isoc_latency = (body->ms_isoc_latency &
219					~OZ_LATENCY_MASK) / pd->ms_per_isoc;
220			break;
221		case OZ_TEN_MS_LATENCY:
222			pd->isoc_latency = ((body->ms_isoc_latency &
223				~OZ_LATENCY_MASK) * 10) / pd->ms_per_isoc;
224			break;
225		default:
226			pd->isoc_latency = OZ_MAX_TX_QUEUE_ISOC;
227		}
228	}
229	if (body->max_len_div16)
230		pd->max_tx_size = ((u16)body->max_len_div16)<<4;
231	oz_dbg(ON, "Max frame:%u Ms per isoc:%u\n",
232	       pd->max_tx_size, pd->ms_per_isoc);
233	pd->max_stream_buffering = 3*1024;
234	pd->pulse_period = OZ_QUANTUM;
235	pd_set_presleep(pd, body->presleep, 0);
236	pd_set_keepalive(pd, body->keep_alive);
237
238	new_apps &= le16_to_cpu(get_unaligned(&body->apps));
239	if ((new_apps & 0x1) && (body->session_id)) {
240		if (pd->session_id) {
241			if (pd->session_id != body->session_id) {
242				rsp_status = OZ_STATUS_SESSION_MISMATCH;
243				goto done;
244			}
245		} else {
246			new_apps &= ~0x1;  /* Resume not permitted */
247			pd->session_id =
248				oz_get_new_session_id(body->session_id);
249		}
250	} else {
251		if (pd->session_id && !body->session_id) {
252			rsp_status = OZ_STATUS_SESSION_TEARDOWN;
253			stop_needed = 1;
254		} else {
255			new_apps &= ~0x1;  /* Resume not permitted */
256			pd->session_id =
257				oz_get_new_session_id(body->session_id);
258		}
259	}
260done:
261	if (rsp_status == OZ_STATUS_SUCCESS) {
262		u16 start_apps = new_apps & ~pd->total_apps & ~0x1;
263		u16 stop_apps = pd->total_apps & ~new_apps & ~0x1;
264		u16 resume_apps = new_apps & pd->paused_apps  & ~0x1;
265
266		spin_unlock_bh(&g_polling_lock);
267		oz_pd_set_state(pd, OZ_PD_S_CONNECTED);
268		oz_dbg(ON, "new_apps=0x%x total_apps=0x%x paused_apps=0x%x\n",
269		       new_apps, pd->total_apps, pd->paused_apps);
270		if (start_apps) {
271			if (oz_services_start(pd, start_apps, 0))
272				rsp_status = OZ_STATUS_TOO_MANY_PDS;
273		}
274		if (resume_apps)
275			if (oz_services_start(pd, resume_apps, 1))
276				rsp_status = OZ_STATUS_TOO_MANY_PDS;
277		if (stop_apps)
278			oz_services_stop(pd, stop_apps, 0);
279		oz_pd_request_heartbeat(pd);
280	} else {
281		spin_unlock_bh(&g_polling_lock);
282	}
283	oz_send_conn_rsp(pd, rsp_status);
284	if (rsp_status != OZ_STATUS_SUCCESS) {
285		if (stop_needed)
286			oz_pd_stop(pd);
287		oz_pd_put(pd);
288		pd = NULL;
289	}
290	if (old_net_dev)
291		dev_put(old_net_dev);
292	if (free_pd)
293		oz_pd_destroy(free_pd);
294	return pd;
295}
296
297/*
298 * Context: softirq-serialized
299 */
300static void oz_add_farewell(struct oz_pd *pd, u8 ep_num, u8 index,
301			const u8 *report, u8 len)
302{
303	struct oz_farewell *f;
304	struct oz_farewell *f2;
305	int found = 0;
306
307	f = kmalloc(sizeof(struct oz_farewell) + len, GFP_ATOMIC);
308	if (!f)
309		return;
310	f->ep_num = ep_num;
311	f->index = index;
312	f->len = len;
313	memcpy(f->report, report, len);
314	oz_dbg(ON, "RX: Adding farewell report\n");
315	spin_lock(&g_polling_lock);
316	list_for_each_entry(f2, &pd->farewell_list, link) {
317		if ((f2->ep_num == ep_num) && (f2->index == index)) {
318			found = 1;
319			list_del(&f2->link);
320			break;
321		}
322	}
323	list_add_tail(&f->link, &pd->farewell_list);
324	spin_unlock(&g_polling_lock);
325	if (found)
326		kfree(f2);
327}
328
329/*
330 * Context: softirq-serialized
331 */
332static void oz_rx_frame(struct sk_buff *skb)
333{
334	u8 *mac_hdr;
335	u8 *src_addr;
336	struct oz_elt *elt;
337	int length;
338	struct oz_pd *pd = NULL;
339	struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
340	struct timespec current_time;
341	int dup = 0;
342	u32 pkt_num;
343
344	oz_dbg(RX_FRAMES, "RX frame PN=0x%x LPN=0x%x control=0x%x\n",
345	       oz_hdr->pkt_num, oz_hdr->last_pkt_num, oz_hdr->control);
346	mac_hdr = skb_mac_header(skb);
347	src_addr = &mac_hdr[ETH_ALEN];
348	length = skb->len;
349
350	/* Check the version field */
351	if (oz_get_prot_ver(oz_hdr->control) != OZ_PROTOCOL_VERSION) {
352		oz_dbg(ON, "Incorrect protocol version: %d\n",
353		       oz_get_prot_ver(oz_hdr->control));
354		goto done;
355	}
356
357	pkt_num = le32_to_cpu(get_unaligned(&oz_hdr->pkt_num));
358
359	pd = oz_pd_find(src_addr);
360	if (pd) {
361		if (!(pd->state & OZ_PD_S_CONNECTED))
362			oz_pd_set_state(pd, OZ_PD_S_CONNECTED);
363		getnstimeofday(&current_time);
364		if ((current_time.tv_sec != pd->last_rx_timestamp.tv_sec) ||
365			(pd->presleep < MSEC_PER_SEC))  {
366			oz_timer_add(pd, OZ_TIMER_TOUT,	pd->presleep);
367			pd->last_rx_timestamp = current_time;
368		}
369		if (pkt_num != pd->last_rx_pkt_num) {
370			pd->last_rx_pkt_num = pkt_num;
371		} else {
372			dup = 1;
373			oz_dbg(ON, "Duplicate frame\n");
374		}
375	}
376
377	if (pd && !dup && ((pd->mode & OZ_MODE_MASK) == OZ_MODE_TRIGGERED)) {
378		oz_dbg(RX_FRAMES, "Received TRIGGER Frame\n");
379		pd->last_sent_frame = &pd->tx_queue;
380		if (oz_hdr->control & OZ_F_ACK) {
381			/* Retire completed frames */
382			oz_retire_tx_frames(pd, oz_hdr->last_pkt_num);
383		}
384		if ((oz_hdr->control & OZ_F_ACK_REQUESTED) &&
385				(pd->state == OZ_PD_S_CONNECTED)) {
386			int backlog = pd->nb_queued_frames;
387
388			pd->trigger_pkt_num = pkt_num;
389			/* Send queued frames */
390			oz_send_queued_frames(pd, backlog);
391		}
392	}
393
394	length -= sizeof(struct oz_hdr);
395	elt = (struct oz_elt *)((u8 *)oz_hdr + sizeof(struct oz_hdr));
396
397	while (length >= sizeof(struct oz_elt)) {
398		length -= sizeof(struct oz_elt) + elt->length;
399		if (length < 0)
400			break;
401		switch (elt->type) {
402		case OZ_ELT_CONNECT_REQ:
403			oz_dbg(ON, "RX: OZ_ELT_CONNECT_REQ\n");
404			pd = oz_connect_req(pd, elt, src_addr, skb->dev);
405			break;
406		case OZ_ELT_DISCONNECT:
407			oz_dbg(ON, "RX: OZ_ELT_DISCONNECT\n");
408			if (pd)
409				oz_pd_sleep(pd);
410			break;
411		case OZ_ELT_UPDATE_PARAM_REQ: {
412				struct oz_elt_update_param *body =
413					(struct oz_elt_update_param *)(elt + 1);
414				oz_dbg(ON, "RX: OZ_ELT_UPDATE_PARAM_REQ\n");
415				if (pd && (pd->state & OZ_PD_S_CONNECTED)) {
416					spin_lock(&g_polling_lock);
417					pd_set_keepalive(pd, body->keepalive);
418					pd_set_presleep(pd, body->presleep, 1);
419					spin_unlock(&g_polling_lock);
420				}
421			}
422			break;
423		case OZ_ELT_FAREWELL_REQ: {
424				struct oz_elt_farewell *body =
425					(struct oz_elt_farewell *)(elt + 1);
426				oz_dbg(ON, "RX: OZ_ELT_FAREWELL_REQ\n");
427				oz_add_farewell(pd, body->ep_num,
428					body->index, body->report,
429					elt->length + 1 - sizeof(*body));
430			}
431			break;
432		case OZ_ELT_APP_DATA:
433			if (pd && (pd->state & OZ_PD_S_CONNECTED)) {
434				struct oz_app_hdr *app_hdr =
435					(struct oz_app_hdr *)(elt+1);
436				if (dup)
437					break;
438				oz_handle_app_elt(pd, app_hdr->app_id, elt);
439			}
440			break;
441		default:
442			oz_dbg(ON, "RX: Unknown elt %02x\n", elt->type);
443		}
444		elt = oz_next_elt(elt);
445	}
446done:
447	if (pd)
448		oz_pd_put(pd);
449	consume_skb(skb);
450}
451
452/*
453 * Context: process
454 */
455void oz_protocol_term(void)
456{
457	struct oz_binding *b, *t;
458
459	/* Walk the list of bindings and remove each one.
460	 */
461	spin_lock_bh(&g_binding_lock);
462	list_for_each_entry_safe(b, t, &g_binding, link) {
463		list_del(&b->link);
464		spin_unlock_bh(&g_binding_lock);
465		dev_remove_pack(&b->ptype);
466		if (b->ptype.dev)
467			dev_put(b->ptype.dev);
468		kfree(b);
469		spin_lock_bh(&g_binding_lock);
470	}
471	spin_unlock_bh(&g_binding_lock);
472	/* Walk the list of PDs and stop each one. This causes the PD to be
473	 * removed from the list so we can just pull each one from the head
474	 * of the list.
475	 */
476	spin_lock_bh(&g_polling_lock);
477	while (!list_empty(&g_pd_list)) {
478		struct oz_pd *pd =
479			list_first_entry(&g_pd_list, struct oz_pd, link);
480		oz_pd_get(pd);
481		spin_unlock_bh(&g_polling_lock);
482		oz_pd_stop(pd);
483		oz_pd_put(pd);
484		spin_lock_bh(&g_polling_lock);
485	}
486	spin_unlock_bh(&g_polling_lock);
487	oz_dbg(ON, "Protocol stopped\n");
488
489	kmem_cache_destroy(oz_tx_frame_cache);
490	kmem_cache_destroy(oz_elt_info_cache);
491}
492
493/*
494 * Context: softirq
495 */
496void oz_pd_heartbeat_handler(unsigned long data)
497{
498	struct oz_pd *pd = (struct oz_pd *)data;
499	u16 apps = 0;
500
501	spin_lock_bh(&g_polling_lock);
502	if (pd->state & OZ_PD_S_CONNECTED)
503		apps = pd->total_apps;
504	spin_unlock_bh(&g_polling_lock);
505	if (apps)
506		oz_pd_heartbeat(pd, apps);
507	oz_pd_put(pd);
508}
509
510/*
511 * Context: softirq
512 */
513void oz_pd_timeout_handler(unsigned long data)
514{
515	int type;
516	struct oz_pd *pd = (struct oz_pd *)data;
517
518	spin_lock_bh(&g_polling_lock);
519	type = pd->timeout_type;
520	spin_unlock_bh(&g_polling_lock);
521	switch (type) {
522	case OZ_TIMER_TOUT:
523		oz_pd_sleep(pd);
524		break;
525	case OZ_TIMER_STOP:
526		oz_pd_stop(pd);
527		break;
528	}
529	oz_pd_put(pd);
530}
531
532/*
533 * Context: Interrupt
534 */
535enum hrtimer_restart oz_pd_heartbeat_event(struct hrtimer *timer)
536{
537	struct oz_pd *pd;
538
539	pd = container_of(timer, struct oz_pd, heartbeat);
540	hrtimer_forward_now(timer, ktime_set(pd->pulse_period /
541	MSEC_PER_SEC, (pd->pulse_period % MSEC_PER_SEC) * NSEC_PER_MSEC));
542	oz_pd_get(pd);
543	tasklet_schedule(&pd->heartbeat_tasklet);
544	return HRTIMER_RESTART;
545}
546
547/*
548 * Context: Interrupt
549 */
550enum hrtimer_restart oz_pd_timeout_event(struct hrtimer *timer)
551{
552	struct oz_pd *pd;
553
554	pd = container_of(timer, struct oz_pd, timeout);
555	oz_pd_get(pd);
556	tasklet_schedule(&pd->timeout_tasklet);
557	return HRTIMER_NORESTART;
558}
559
560/*
561 * Context: softirq or process
562 */
563void oz_timer_add(struct oz_pd *pd, int type, unsigned long due_time)
564{
565	spin_lock_bh(&g_polling_lock);
566	switch (type) {
567	case OZ_TIMER_TOUT:
568	case OZ_TIMER_STOP:
569		if (hrtimer_active(&pd->timeout)) {
570			hrtimer_set_expires(&pd->timeout, ktime_set(due_time /
571			MSEC_PER_SEC, (due_time % MSEC_PER_SEC) *
572							NSEC_PER_MSEC));
573			hrtimer_start_expires(&pd->timeout, HRTIMER_MODE_REL);
574		} else {
575			hrtimer_start(&pd->timeout, ktime_set(due_time /
576			MSEC_PER_SEC, (due_time % MSEC_PER_SEC) *
577					NSEC_PER_MSEC), HRTIMER_MODE_REL);
578		}
579		pd->timeout_type = type;
580		break;
581	case OZ_TIMER_HEARTBEAT:
582		if (!hrtimer_active(&pd->heartbeat))
583			hrtimer_start(&pd->heartbeat, ktime_set(due_time /
584			MSEC_PER_SEC, (due_time % MSEC_PER_SEC) *
585					NSEC_PER_MSEC), HRTIMER_MODE_REL);
586		break;
587	}
588	spin_unlock_bh(&g_polling_lock);
589}
590
591/*
592 * Context: softirq or process
593 */
594void oz_pd_request_heartbeat(struct oz_pd *pd)
595{
596	oz_timer_add(pd, OZ_TIMER_HEARTBEAT, pd->pulse_period > 0 ?
597					pd->pulse_period : OZ_QUANTUM);
598}
599
600/*
601 * Context: softirq or process
602 */
603struct oz_pd *oz_pd_find(const u8 *mac_addr)
604{
605	struct oz_pd *pd;
606
607	spin_lock_bh(&g_polling_lock);
608	list_for_each_entry(pd, &g_pd_list, link) {
609		if (ether_addr_equal(pd->mac_addr, mac_addr)) {
610			oz_pd_get(pd);
611			spin_unlock_bh(&g_polling_lock);
612			return pd;
613		}
614	}
615	spin_unlock_bh(&g_polling_lock);
616	return NULL;
617}
618
619/*
620 * Context: process
621 */
622void oz_app_enable(int app_id, int enable)
623{
624	if (app_id < OZ_NB_APPS) {
625		spin_lock_bh(&g_polling_lock);
626		if (enable)
627			g_apps |= (1<<app_id);
628		else
629			g_apps &= ~(1<<app_id);
630		spin_unlock_bh(&g_polling_lock);
631	}
632}
633
634/*
635 * Context: softirq
636 */
637static int oz_pkt_recv(struct sk_buff *skb, struct net_device *dev,
638		struct packet_type *pt, struct net_device *orig_dev)
639{
640	skb = skb_share_check(skb, GFP_ATOMIC);
641	if (skb == NULL)
642		return 0;
643	spin_lock_bh(&g_rx_queue.lock);
644	if (g_processing_rx) {
645		/* We already hold the lock so use __ variant.
646		 */
647		__skb_queue_head(&g_rx_queue, skb);
648		spin_unlock_bh(&g_rx_queue.lock);
649	} else {
650		g_processing_rx = 1;
651		do {
652
653			spin_unlock_bh(&g_rx_queue.lock);
654			oz_rx_frame(skb);
655			spin_lock_bh(&g_rx_queue.lock);
656			if (skb_queue_empty(&g_rx_queue)) {
657				g_processing_rx = 0;
658				spin_unlock_bh(&g_rx_queue.lock);
659				break;
660			}
661			/* We already hold the lock so use __ variant.
662			 */
663			skb = __skb_dequeue(&g_rx_queue);
664		} while (1);
665	}
666	return 0;
667}
668
669/*
670 * Context: process
671 */
672void oz_binding_add(const char *net_dev)
673{
674	struct oz_binding *binding;
675
676	binding = kzalloc(sizeof(struct oz_binding), GFP_KERNEL);
677	if (!binding)
678		return;
679
680	binding->ptype.type = htons(OZ_ETHERTYPE);
681	binding->ptype.func = oz_pkt_recv;
682	if (net_dev && *net_dev) {
683		memcpy(binding->name, net_dev, OZ_MAX_BINDING_LEN);
684		oz_dbg(ON, "Adding binding: %s\n", net_dev);
685		binding->ptype.dev = dev_get_by_name(&init_net, net_dev);
686		if (binding->ptype.dev == NULL) {
687			oz_dbg(ON, "Netdev %s not found\n", net_dev);
688			kfree(binding);
689			return;
690		}
691	}
692	dev_add_pack(&binding->ptype);
693	spin_lock_bh(&g_binding_lock);
694	list_add_tail(&binding->link, &g_binding);
695	spin_unlock_bh(&g_binding_lock);
696}
697
698/*
699 * Context: process
700 */
701static void pd_stop_all_for_device(struct net_device *net_dev)
702{
703	LIST_HEAD(h);
704	struct oz_pd *pd;
705	struct oz_pd *n;
706
707	spin_lock_bh(&g_polling_lock);
708	list_for_each_entry_safe(pd, n, &g_pd_list, link) {
709		if (pd->net_dev == net_dev) {
710			list_move(&pd->link, &h);
711			oz_pd_get(pd);
712		}
713	}
714	spin_unlock_bh(&g_polling_lock);
715	while (!list_empty(&h)) {
716		pd = list_first_entry(&h, struct oz_pd, link);
717		oz_pd_stop(pd);
718		oz_pd_put(pd);
719	}
720}
721
722/*
723 * Context: process
724 */
725void oz_binding_remove(const char *net_dev)
726{
727	struct oz_binding *binding;
728	int found = 0;
729
730	oz_dbg(ON, "Removing binding: %s\n", net_dev);
731	spin_lock_bh(&g_binding_lock);
732	list_for_each_entry(binding, &g_binding, link) {
733		if (strncmp(binding->name, net_dev, OZ_MAX_BINDING_LEN) == 0) {
734			oz_dbg(ON, "Binding '%s' found\n", net_dev);
735			found = 1;
736			break;
737		}
738	}
739	spin_unlock_bh(&g_binding_lock);
740	if (found) {
741		dev_remove_pack(&binding->ptype);
742		if (binding->ptype.dev) {
743			dev_put(binding->ptype.dev);
744			pd_stop_all_for_device(binding->ptype.dev);
745		}
746		list_del(&binding->link);
747		kfree(binding);
748	}
749}
750
751/*
752 * Context: process
753 */
754static char *oz_get_next_device_name(char *s, char *dname, int max_size)
755{
756	while (*s == ',')
757		s++;
758	while (*s && (*s != ',') && max_size > 1) {
759		*dname++ = *s++;
760		max_size--;
761	}
762	*dname = 0;
763	return s;
764}
765
766/*
767 * Context: process
768 */
769int oz_protocol_init(char *devs)
770{
771	oz_elt_info_cache = KMEM_CACHE(oz_elt_info, 0);
772	if (!oz_elt_info_cache)
773		return -ENOMEM;
774
775	oz_tx_frame_cache = KMEM_CACHE(oz_tx_frame, 0);
776	if (!oz_tx_frame_cache) {
777		kmem_cache_destroy(oz_elt_info_cache);
778		return -ENOMEM;
779	}
780
781	skb_queue_head_init(&g_rx_queue);
782	if (devs[0] == '*') {
783		oz_binding_add(NULL);
784	} else {
785		char d[32];
786
787		while (*devs) {
788			devs = oz_get_next_device_name(devs, d, sizeof(d));
789			if (d[0])
790				oz_binding_add(d);
791		}
792	}
793	return 0;
794}
795
796/*
797 * Context: process
798 */
799int oz_get_pd_list(struct oz_mac_addr *addr, int max_count)
800{
801	struct oz_pd *pd;
802	int count = 0;
803
804	spin_lock_bh(&g_polling_lock);
805	list_for_each_entry(pd, &g_pd_list, link) {
806		if (count >= max_count)
807			break;
808		ether_addr_copy((u8 *)&addr[count++], pd->mac_addr);
809	}
810	spin_unlock_bh(&g_polling_lock);
811	return count;
812}
813
814