1/*********************************************************************
2 *
3 * Filename:      irlap.c
4 * Version:       1.0
5 * Description:   IrLAP implementation for Linux
6 * Status:        Stable
7 * Author:        Dag Brattli <dagb@cs.uit.no>
8 * Created at:    Mon Aug  4 20:40:53 1997
9 * Modified at:   Tue Dec 14 09:26:44 1999
10 * Modified by:   Dag Brattli <dagb@cs.uit.no>
11 *
12 *     Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
13 *     Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
14 *
15 *     This program is free software; you can redistribute it and/or
16 *     modify it under the terms of the GNU General Public License as
17 *     published by the Free Software Foundation; either version 2 of
18 *     the License, or (at your option) any later version.
19 *
20 *     This program is distributed in the hope that it will be useful,
21 *     but WITHOUT ANY WARRANTY; without even the implied warranty of
22 *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 *     GNU General Public License for more details.
24 *
25 *     You should have received a copy of the GNU General Public License
26 *     along with this program; if not, see <http://www.gnu.org/licenses/>.
27 *
28 ********************************************************************/
29
30#include <linux/slab.h>
31#include <linux/string.h>
32#include <linux/skbuff.h>
33#include <linux/delay.h>
34#include <linux/proc_fs.h>
35#include <linux/init.h>
36#include <linux/random.h>
37#include <linux/module.h>
38#include <linux/seq_file.h>
39
40#include <net/irda/irda.h>
41#include <net/irda/irda_device.h>
42#include <net/irda/irqueue.h>
43#include <net/irda/irlmp.h>
44#include <net/irda/irlmp_frame.h>
45#include <net/irda/irlap_frame.h>
46#include <net/irda/irlap.h>
47#include <net/irda/timer.h>
48#include <net/irda/qos.h>
49
50static hashbin_t *irlap = NULL;
51int sysctl_slot_timeout = SLOT_TIMEOUT * 1000 / HZ;
52
53/* This is the delay of missed pf period before generating an event
54 * to the application. The spec mandate 3 seconds, but in some cases
55 * it's way too long. - Jean II */
56int sysctl_warn_noreply_time = 3;
57
58extern void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb);
59static void __irlap_close(struct irlap_cb *self);
60static void irlap_init_qos_capabilities(struct irlap_cb *self,
61					struct qos_info *qos_user);
62
63static const char *const lap_reasons[] __maybe_unused = {
64	"ERROR, NOT USED",
65	"LAP_DISC_INDICATION",
66	"LAP_NO_RESPONSE",
67	"LAP_RESET_INDICATION",
68	"LAP_FOUND_NONE",
69	"LAP_MEDIA_BUSY",
70	"LAP_PRIMARY_CONFLICT",
71	"ERROR, NOT USED",
72};
73
74int __init irlap_init(void)
75{
76	/* Check if the compiler did its job properly.
77	 * May happen on some ARM configuration, check with Russell King. */
78	IRDA_ASSERT(sizeof(struct xid_frame) == 14, ;);
79	IRDA_ASSERT(sizeof(struct test_frame) == 10, ;);
80	IRDA_ASSERT(sizeof(struct ua_frame) == 10, ;);
81	IRDA_ASSERT(sizeof(struct snrm_frame) == 11, ;);
82
83	/* Allocate master array */
84	irlap = hashbin_new(HB_LOCK);
85	if (irlap == NULL) {
86		net_err_ratelimited("%s: can't allocate irlap hashbin!\n",
87				    __func__);
88		return -ENOMEM;
89	}
90
91	return 0;
92}
93
94void irlap_cleanup(void)
95{
96	IRDA_ASSERT(irlap != NULL, return;);
97
98	hashbin_delete(irlap, (FREE_FUNC) __irlap_close);
99}
100
101/*
102 * Function irlap_open (driver)
103 *
104 *    Initialize IrLAP layer
105 *
106 */
107struct irlap_cb *irlap_open(struct net_device *dev, struct qos_info *qos,
108			    const char *hw_name)
109{
110	struct irlap_cb *self;
111
112	/* Initialize the irlap structure. */
113	self = kzalloc(sizeof(struct irlap_cb), GFP_KERNEL);
114	if (self == NULL)
115		return NULL;
116
117	self->magic = LAP_MAGIC;
118
119	/* Make a binding between the layers */
120	self->netdev = dev;
121	self->qos_dev = qos;
122	/* Copy hardware name */
123	if(hw_name != NULL) {
124		strlcpy(self->hw_name, hw_name, sizeof(self->hw_name));
125	} else {
126		self->hw_name[0] = '\0';
127	}
128
129	/* FIXME: should we get our own field? */
130	dev->atalk_ptr = self;
131
132	self->state = LAP_OFFLINE;
133
134	/* Initialize transmit queue */
135	skb_queue_head_init(&self->txq);
136	skb_queue_head_init(&self->txq_ultra);
137	skb_queue_head_init(&self->wx_list);
138
139	/* My unique IrLAP device address! */
140	/* We don't want the broadcast address, neither the NULL address
141	 * (most often used to signify "invalid"), and we don't want an
142	 * address already in use (otherwise connect won't be able
143	 * to select the proper link). - Jean II */
144	do {
145		get_random_bytes(&self->saddr, sizeof(self->saddr));
146	} while ((self->saddr == 0x0) || (self->saddr == BROADCAST) ||
147		 (hashbin_lock_find(irlap, self->saddr, NULL)) );
148	/* Copy to the driver */
149	memcpy(dev->dev_addr, &self->saddr, 4);
150
151	init_timer(&self->slot_timer);
152	init_timer(&self->query_timer);
153	init_timer(&self->discovery_timer);
154	init_timer(&self->final_timer);
155	init_timer(&self->poll_timer);
156	init_timer(&self->wd_timer);
157	init_timer(&self->backoff_timer);
158	init_timer(&self->media_busy_timer);
159
160	irlap_apply_default_connection_parameters(self);
161
162	self->N3 = 3; /* # connections attempts to try before giving up */
163
164	self->state = LAP_NDM;
165
166	hashbin_insert(irlap, (irda_queue_t *) self, self->saddr, NULL);
167
168	irlmp_register_link(self, self->saddr, &self->notify);
169
170	return self;
171}
172EXPORT_SYMBOL(irlap_open);
173
174/*
175 * Function __irlap_close (self)
176 *
177 *    Remove IrLAP and all allocated memory. Stop any pending timers.
178 *
179 */
180static void __irlap_close(struct irlap_cb *self)
181{
182	IRDA_ASSERT(self != NULL, return;);
183	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
184
185	/* Stop timers */
186	del_timer(&self->slot_timer);
187	del_timer(&self->query_timer);
188	del_timer(&self->discovery_timer);
189	del_timer(&self->final_timer);
190	del_timer(&self->poll_timer);
191	del_timer(&self->wd_timer);
192	del_timer(&self->backoff_timer);
193	del_timer(&self->media_busy_timer);
194
195	irlap_flush_all_queues(self);
196
197	self->magic = 0;
198
199	kfree(self);
200}
201
202/*
203 * Function irlap_close (self)
204 *
205 *    Remove IrLAP instance
206 *
207 */
208void irlap_close(struct irlap_cb *self)
209{
210	struct irlap_cb *lap;
211
212	IRDA_ASSERT(self != NULL, return;);
213	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
214
215	/* We used to send a LAP_DISC_INDICATION here, but this was
216	 * racy. This has been move within irlmp_unregister_link()
217	 * itself. Jean II */
218
219	/* Kill the LAP and all LSAPs on top of it */
220	irlmp_unregister_link(self->saddr);
221	self->notify.instance = NULL;
222
223	/* Be sure that we manage to remove ourself from the hash */
224	lap = hashbin_remove(irlap, self->saddr, NULL);
225	if (!lap) {
226		pr_debug("%s(), Didn't find myself!\n", __func__);
227		return;
228	}
229	__irlap_close(lap);
230}
231EXPORT_SYMBOL(irlap_close);
232
233/*
234 * Function irlap_connect_indication (self, skb)
235 *
236 *    Another device is attempting to make a connection
237 *
238 */
239void irlap_connect_indication(struct irlap_cb *self, struct sk_buff *skb)
240{
241	IRDA_ASSERT(self != NULL, return;);
242	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
243
244	irlap_init_qos_capabilities(self, NULL); /* No user QoS! */
245
246	irlmp_link_connect_indication(self->notify.instance, self->saddr,
247				      self->daddr, &self->qos_tx, skb);
248}
249
250/*
251 * Function irlap_connect_response (self, skb)
252 *
253 *    Service user has accepted incoming connection
254 *
255 */
256void irlap_connect_response(struct irlap_cb *self, struct sk_buff *userdata)
257{
258	irlap_do_event(self, CONNECT_RESPONSE, userdata, NULL);
259}
260
261/*
262 * Function irlap_connect_request (self, daddr, qos_user, sniff)
263 *
264 *    Request connection with another device, sniffing is not implemented
265 *    yet.
266 *
267 */
268void irlap_connect_request(struct irlap_cb *self, __u32 daddr,
269			   struct qos_info *qos_user, int sniff)
270{
271	pr_debug("%s(), daddr=0x%08x\n", __func__, daddr);
272
273	IRDA_ASSERT(self != NULL, return;);
274	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
275
276	self->daddr = daddr;
277
278	/*
279	 *  If the service user specifies QoS values for this connection,
280	 *  then use them
281	 */
282	irlap_init_qos_capabilities(self, qos_user);
283
284	if ((self->state == LAP_NDM) && !self->media_busy)
285		irlap_do_event(self, CONNECT_REQUEST, NULL, NULL);
286	else
287		self->connect_pending = TRUE;
288}
289
290/*
291 * Function irlap_connect_confirm (self, skb)
292 *
293 *    Connection request has been accepted
294 *
295 */
296void irlap_connect_confirm(struct irlap_cb *self, struct sk_buff *skb)
297{
298	IRDA_ASSERT(self != NULL, return;);
299	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
300
301	irlmp_link_connect_confirm(self->notify.instance, &self->qos_tx, skb);
302}
303
304/*
305 * Function irlap_data_indication (self, skb)
306 *
307 *    Received data frames from IR-port, so we just pass them up to
308 *    IrLMP for further processing
309 *
310 */
311void irlap_data_indication(struct irlap_cb *self, struct sk_buff *skb,
312			   int unreliable)
313{
314	/* Hide LAP header from IrLMP layer */
315	skb_pull(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
316
317	irlmp_link_data_indication(self->notify.instance, skb, unreliable);
318}
319
320
321/*
322 * Function irlap_data_request (self, skb)
323 *
324 *    Queue data for transmission, must wait until XMIT state
325 *
326 */
327void irlap_data_request(struct irlap_cb *self, struct sk_buff *skb,
328			int unreliable)
329{
330	IRDA_ASSERT(self != NULL, return;);
331	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
332
333	IRDA_ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER),
334		    return;);
335	skb_push(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
336
337	/*
338	 *  Must set frame format now so that the rest of the code knows
339	 *  if its dealing with an I or an UI frame
340	 */
341	if (unreliable)
342		skb->data[1] = UI_FRAME;
343	else
344		skb->data[1] = I_FRAME;
345
346	/* Don't forget to refcount it - see irlmp_connect_request(). */
347	skb_get(skb);
348
349	/* Add at the end of the queue (keep ordering) - Jean II */
350	skb_queue_tail(&self->txq, skb);
351
352	/*
353	 *  Send event if this frame only if we are in the right state
354	 *  FIXME: udata should be sent first! (skb_queue_head?)
355	 */
356	if ((self->state == LAP_XMIT_P) || (self->state == LAP_XMIT_S)) {
357		/* If we are not already processing the Tx queue, trigger
358		 * transmission immediately - Jean II */
359		if((skb_queue_len(&self->txq) <= 1) && (!self->local_busy))
360			irlap_do_event(self, DATA_REQUEST, skb, NULL);
361		/* Otherwise, the packets will be sent normally at the
362		 * next pf-poll - Jean II */
363	}
364}
365
366/*
367 * Function irlap_unitdata_request (self, skb)
368 *
369 *    Send Ultra data. This is data that must be sent outside any connection
370 *
371 */
372#ifdef CONFIG_IRDA_ULTRA
373void irlap_unitdata_request(struct irlap_cb *self, struct sk_buff *skb)
374{
375	IRDA_ASSERT(self != NULL, return;);
376	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
377
378	IRDA_ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER),
379	       return;);
380	skb_push(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
381
382	skb->data[0] = CBROADCAST;
383	skb->data[1] = UI_FRAME;
384
385	/* Don't need to refcount, see irlmp_connless_data_request() */
386
387	skb_queue_tail(&self->txq_ultra, skb);
388
389	irlap_do_event(self, SEND_UI_FRAME, NULL, NULL);
390}
391#endif /*CONFIG_IRDA_ULTRA */
392
393/*
394 * Function irlap_udata_indication (self, skb)
395 *
396 *    Receive Ultra data. This is data that is received outside any connection
397 *
398 */
399#ifdef CONFIG_IRDA_ULTRA
400void irlap_unitdata_indication(struct irlap_cb *self, struct sk_buff *skb)
401{
402	IRDA_ASSERT(self != NULL, return;);
403	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
404	IRDA_ASSERT(skb != NULL, return;);
405
406	/* Hide LAP header from IrLMP layer */
407	skb_pull(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
408
409	irlmp_link_unitdata_indication(self->notify.instance, skb);
410}
411#endif /* CONFIG_IRDA_ULTRA */
412
413/*
414 * Function irlap_disconnect_request (void)
415 *
416 *    Request to disconnect connection by service user
417 */
418void irlap_disconnect_request(struct irlap_cb *self)
419{
420	IRDA_ASSERT(self != NULL, return;);
421	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
422
423	/* Don't disconnect until all data frames are successfully sent */
424	if (!skb_queue_empty(&self->txq)) {
425		self->disconnect_pending = TRUE;
426		return;
427	}
428
429	/* Check if we are in the right state for disconnecting */
430	switch (self->state) {
431	case LAP_XMIT_P:        /* FALLTHROUGH */
432	case LAP_XMIT_S:        /* FALLTHROUGH */
433	case LAP_CONN:          /* FALLTHROUGH */
434	case LAP_RESET_WAIT:    /* FALLTHROUGH */
435	case LAP_RESET_CHECK:
436		irlap_do_event(self, DISCONNECT_REQUEST, NULL, NULL);
437		break;
438	default:
439		pr_debug("%s(), disconnect pending!\n", __func__);
440		self->disconnect_pending = TRUE;
441		break;
442	}
443}
444
445/*
446 * Function irlap_disconnect_indication (void)
447 *
448 *    Disconnect request from other device
449 *
450 */
451void irlap_disconnect_indication(struct irlap_cb *self, LAP_REASON reason)
452{
453	pr_debug("%s(), reason=%s\n", __func__, lap_reasons[reason]);
454
455	IRDA_ASSERT(self != NULL, return;);
456	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
457
458	/* Flush queues */
459	irlap_flush_all_queues(self);
460
461	switch (reason) {
462	case LAP_RESET_INDICATION:
463		pr_debug("%s(), Sending reset request!\n", __func__);
464		irlap_do_event(self, RESET_REQUEST, NULL, NULL);
465		break;
466	case LAP_NO_RESPONSE:	   /* FALLTHROUGH */
467	case LAP_DISC_INDICATION:  /* FALLTHROUGH */
468	case LAP_FOUND_NONE:       /* FALLTHROUGH */
469	case LAP_MEDIA_BUSY:
470		irlmp_link_disconnect_indication(self->notify.instance, self,
471						 reason, NULL);
472		break;
473	default:
474		net_err_ratelimited("%s: Unknown reason %d\n",
475				    __func__, reason);
476	}
477}
478
479/*
480 * Function irlap_discovery_request (gen_addr_bit)
481 *
482 *    Start one single discovery operation.
483 *
484 */
485void irlap_discovery_request(struct irlap_cb *self, discovery_t *discovery)
486{
487	struct irlap_info info;
488
489	IRDA_ASSERT(self != NULL, return;);
490	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
491	IRDA_ASSERT(discovery != NULL, return;);
492
493	pr_debug("%s(), nslots = %d\n", __func__, discovery->nslots);
494
495	IRDA_ASSERT((discovery->nslots == 1) || (discovery->nslots == 6) ||
496		    (discovery->nslots == 8) || (discovery->nslots == 16),
497		    return;);
498
499	/* Discovery is only possible in NDM mode */
500	if (self->state != LAP_NDM) {
501		pr_debug("%s(), discovery only possible in NDM mode\n",
502			 __func__);
503		irlap_discovery_confirm(self, NULL);
504		/* Note : in theory, if we are not in NDM, we could postpone
505		 * the discovery like we do for connection request.
506		 * In practice, it's not worth it. If the media was busy,
507		 * it's likely next time around it won't be busy. If we are
508		 * in REPLY state, we will get passive discovery info & event.
509		 * Jean II */
510		return;
511	}
512
513	/* Check if last discovery request finished in time, or if
514	 * it was aborted due to the media busy flag. */
515	if (self->discovery_log != NULL) {
516		hashbin_delete(self->discovery_log, (FREE_FUNC) kfree);
517		self->discovery_log = NULL;
518	}
519
520	/* All operations will occur at predictable time, no need to lock */
521	self->discovery_log = hashbin_new(HB_NOLOCK);
522
523	if (self->discovery_log == NULL) {
524		net_warn_ratelimited("%s(), Unable to allocate discovery log!\n",
525				     __func__);
526		return;
527	}
528
529	info.S = discovery->nslots; /* Number of slots */
530	info.s = 0; /* Current slot */
531
532	self->discovery_cmd = discovery;
533	info.discovery = discovery;
534
535	/* sysctl_slot_timeout bounds are checked in irsysctl.c - Jean II */
536	self->slot_timeout = msecs_to_jiffies(sysctl_slot_timeout);
537
538	irlap_do_event(self, DISCOVERY_REQUEST, NULL, &info);
539}
540
541/*
542 * Function irlap_discovery_confirm (log)
543 *
544 *    A device has been discovered in front of this station, we
545 *    report directly to LMP.
546 */
547void irlap_discovery_confirm(struct irlap_cb *self, hashbin_t *discovery_log)
548{
549	IRDA_ASSERT(self != NULL, return;);
550	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
551
552	IRDA_ASSERT(self->notify.instance != NULL, return;);
553
554	/*
555	 * Check for successful discovery, since we are then allowed to clear
556	 * the media busy condition (IrLAP 6.13.4 - p.94). This should allow
557	 * us to make connection attempts much faster and easier (i.e. no
558	 * collisions).
559	 * Setting media busy to false will also generate an event allowing
560	 * to process pending events in NDM state machine.
561	 * Note : the spec doesn't define what's a successful discovery is.
562	 * If we want Ultra to work, it's successful even if there is
563	 * nobody discovered - Jean II
564	 */
565	if (discovery_log)
566		irda_device_set_media_busy(self->netdev, FALSE);
567
568	/* Inform IrLMP */
569	irlmp_link_discovery_confirm(self->notify.instance, discovery_log);
570}
571
572/*
573 * Function irlap_discovery_indication (log)
574 *
575 *    Somebody is trying to discover us!
576 *
577 */
578void irlap_discovery_indication(struct irlap_cb *self, discovery_t *discovery)
579{
580	IRDA_ASSERT(self != NULL, return;);
581	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
582	IRDA_ASSERT(discovery != NULL, return;);
583
584	IRDA_ASSERT(self->notify.instance != NULL, return;);
585
586	/* A device is very likely to connect immediately after it performs
587	 * a successful discovery. This means that in our case, we are much
588	 * more likely to receive a connection request over the medium.
589	 * So, we backoff to avoid collisions.
590	 * IrLAP spec 6.13.4 suggest 100ms...
591	 * Note : this little trick actually make a *BIG* difference. If I set
592	 * my Linux box with discovery enabled and one Ultra frame sent every
593	 * second, my Palm has no trouble connecting to it every time !
594	 * Jean II */
595	irda_device_set_media_busy(self->netdev, SMALL);
596
597	irlmp_link_discovery_indication(self->notify.instance, discovery);
598}
599
600/*
601 * Function irlap_status_indication (quality_of_link)
602 */
603void irlap_status_indication(struct irlap_cb *self, int quality_of_link)
604{
605	switch (quality_of_link) {
606	case STATUS_NO_ACTIVITY:
607		net_info_ratelimited("IrLAP, no activity on link!\n");
608		break;
609	case STATUS_NOISY:
610		net_info_ratelimited("IrLAP, noisy link!\n");
611		break;
612	default:
613		break;
614	}
615	irlmp_status_indication(self->notify.instance,
616				quality_of_link, LOCK_NO_CHANGE);
617}
618
619/*
620 * Function irlap_reset_indication (void)
621 */
622void irlap_reset_indication(struct irlap_cb *self)
623{
624	IRDA_ASSERT(self != NULL, return;);
625	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
626
627	if (self->state == LAP_RESET_WAIT)
628		irlap_do_event(self, RESET_REQUEST, NULL, NULL);
629	else
630		irlap_do_event(self, RESET_RESPONSE, NULL, NULL);
631}
632
633/*
634 * Function irlap_reset_confirm (void)
635 */
636void irlap_reset_confirm(void)
637{
638}
639
640/*
641 * Function irlap_generate_rand_time_slot (S, s)
642 *
643 *    Generate a random time slot between s and S-1 where
644 *    S = Number of slots (0 -> S-1)
645 *    s = Current slot
646 */
647int irlap_generate_rand_time_slot(int S, int s)
648{
649	static int rand;
650	int slot;
651
652	IRDA_ASSERT((S - s) > 0, return 0;);
653
654	rand += jiffies;
655	rand ^= (rand << 12);
656	rand ^= (rand >> 20);
657
658	slot = s + rand % (S-s);
659
660	IRDA_ASSERT((slot >= s) || (slot < S), return 0;);
661
662	return slot;
663}
664
665/*
666 * Function irlap_update_nr_received (nr)
667 *
668 *    Remove all acknowledged frames in current window queue. This code is
669 *    not intuitive and you should not try to change it. If you think it
670 *    contains bugs, please mail a patch to the author instead.
671 */
672void irlap_update_nr_received(struct irlap_cb *self, int nr)
673{
674	struct sk_buff *skb = NULL;
675	int count = 0;
676
677	/*
678	 * Remove all the ack-ed frames from the window queue.
679	 */
680
681	/*
682	 *  Optimize for the common case. It is most likely that the receiver
683	 *  will acknowledge all the frames we have sent! So in that case we
684	 *  delete all frames stored in window.
685	 */
686	if (nr == self->vs) {
687		while ((skb = skb_dequeue(&self->wx_list)) != NULL) {
688			dev_kfree_skb(skb);
689		}
690		/* The last acked frame is the next to send minus one */
691		self->va = nr - 1;
692	} else {
693		/* Remove all acknowledged frames in current window */
694		while ((skb_peek(&self->wx_list) != NULL) &&
695		       (((self->va+1) % 8) != nr))
696		{
697			skb = skb_dequeue(&self->wx_list);
698			dev_kfree_skb(skb);
699
700			self->va = (self->va + 1) % 8;
701			count++;
702		}
703	}
704
705	/* Advance window */
706	self->window = self->window_size - skb_queue_len(&self->wx_list);
707}
708
709/*
710 * Function irlap_validate_ns_received (ns)
711 *
712 *    Validate the next to send (ns) field from received frame.
713 */
714int irlap_validate_ns_received(struct irlap_cb *self, int ns)
715{
716	/*  ns as expected?  */
717	if (ns == self->vr)
718		return NS_EXPECTED;
719	/*
720	 *  Stations are allowed to treat invalid NS as unexpected NS
721	 *  IrLAP, Recv ... with-invalid-Ns. p. 84
722	 */
723	return NS_UNEXPECTED;
724
725	/* return NR_INVALID; */
726}
727/*
728 * Function irlap_validate_nr_received (nr)
729 *
730 *    Validate the next to receive (nr) field from received frame.
731 *
732 */
733int irlap_validate_nr_received(struct irlap_cb *self, int nr)
734{
735	/*  nr as expected?  */
736	if (nr == self->vs) {
737		pr_debug("%s(), expected!\n", __func__);
738		return NR_EXPECTED;
739	}
740
741	/*
742	 *  unexpected nr? (but within current window), first we check if the
743	 *  ns numbers of the frames in the current window wrap.
744	 */
745	if (self->va < self->vs) {
746		if ((nr >= self->va) && (nr <= self->vs))
747			return NR_UNEXPECTED;
748	} else {
749		if ((nr >= self->va) || (nr <= self->vs))
750			return NR_UNEXPECTED;
751	}
752
753	/* Invalid nr!  */
754	return NR_INVALID;
755}
756
757/*
758 * Function irlap_initiate_connection_state ()
759 *
760 *    Initialize the connection state parameters
761 *
762 */
763void irlap_initiate_connection_state(struct irlap_cb *self)
764{
765	IRDA_ASSERT(self != NULL, return;);
766	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
767
768	/* Next to send and next to receive */
769	self->vs = self->vr = 0;
770
771	/* Last frame which got acked (0 - 1) % 8 */
772	self->va = 7;
773
774	self->window = 1;
775
776	self->remote_busy = FALSE;
777	self->retry_count = 0;
778}
779
780/*
781 * Function irlap_wait_min_turn_around (self, qos)
782 *
783 *    Wait negotiated minimum turn around time, this function actually sets
784 *    the number of BOS's that must be sent before the next transmitted
785 *    frame in order to delay for the specified amount of time. This is
786 *    done to avoid using timers, and the forbidden udelay!
787 */
788void irlap_wait_min_turn_around(struct irlap_cb *self, struct qos_info *qos)
789{
790	__u32 min_turn_time;
791	__u32 speed;
792
793	/* Get QoS values.  */
794	speed = qos->baud_rate.value;
795	min_turn_time = qos->min_turn_time.value;
796
797	/* No need to calculate XBOFs for speeds over 115200 bps */
798	if (speed > 115200) {
799		self->mtt_required = min_turn_time;
800		return;
801	}
802
803	/*
804	 *  Send additional BOF's for the next frame for the requested
805	 *  min turn time, so now we must calculate how many chars (XBOF's) we
806	 *  must send for the requested time period (min turn time)
807	 */
808	self->xbofs_delay = irlap_min_turn_time_in_bytes(speed, min_turn_time);
809}
810
811/*
812 * Function irlap_flush_all_queues (void)
813 *
814 *    Flush all queues
815 *
816 */
817void irlap_flush_all_queues(struct irlap_cb *self)
818{
819	struct sk_buff* skb;
820
821	IRDA_ASSERT(self != NULL, return;);
822	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
823
824	/* Free transmission queue */
825	while ((skb = skb_dequeue(&self->txq)) != NULL)
826		dev_kfree_skb(skb);
827
828	while ((skb = skb_dequeue(&self->txq_ultra)) != NULL)
829		dev_kfree_skb(skb);
830
831	/* Free sliding window buffered packets */
832	while ((skb = skb_dequeue(&self->wx_list)) != NULL)
833		dev_kfree_skb(skb);
834}
835
836/*
837 * Function irlap_setspeed (self, speed)
838 *
839 *    Change the speed of the IrDA port
840 *
841 */
842static void irlap_change_speed(struct irlap_cb *self, __u32 speed, int now)
843{
844	struct sk_buff *skb;
845
846	pr_debug("%s(), setting speed to %d\n", __func__, speed);
847
848	IRDA_ASSERT(self != NULL, return;);
849	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
850
851	self->speed = speed;
852
853	/* Change speed now, or just piggyback speed on frames */
854	if (now) {
855		/* Send down empty frame to trigger speed change */
856		skb = alloc_skb(0, GFP_ATOMIC);
857		if (skb)
858			irlap_queue_xmit(self, skb);
859	}
860}
861
862/*
863 * Function irlap_init_qos_capabilities (self, qos)
864 *
865 *    Initialize QoS for this IrLAP session, What we do is to compute the
866 *    intersection of the QoS capabilities for the user, driver and for
867 *    IrLAP itself. Normally, IrLAP will not specify any values, but it can
868 *    be used to restrict certain values.
869 */
870static void irlap_init_qos_capabilities(struct irlap_cb *self,
871					struct qos_info *qos_user)
872{
873	IRDA_ASSERT(self != NULL, return;);
874	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
875	IRDA_ASSERT(self->netdev != NULL, return;);
876
877	/* Start out with the maximum QoS support possible */
878	irda_init_max_qos_capabilies(&self->qos_rx);
879
880	/* Apply drivers QoS capabilities */
881	irda_qos_compute_intersection(&self->qos_rx, self->qos_dev);
882
883	/*
884	 *  Check for user supplied QoS parameters. The service user is only
885	 *  allowed to supply these values. We check each parameter since the
886	 *  user may not have set all of them.
887	 */
888	if (qos_user) {
889		pr_debug("%s(), Found user specified QoS!\n", __func__);
890
891		if (qos_user->baud_rate.bits)
892			self->qos_rx.baud_rate.bits &= qos_user->baud_rate.bits;
893
894		if (qos_user->max_turn_time.bits)
895			self->qos_rx.max_turn_time.bits &= qos_user->max_turn_time.bits;
896		if (qos_user->data_size.bits)
897			self->qos_rx.data_size.bits &= qos_user->data_size.bits;
898
899		if (qos_user->link_disc_time.bits)
900			self->qos_rx.link_disc_time.bits &= qos_user->link_disc_time.bits;
901	}
902
903	/* Use 500ms in IrLAP for now */
904	self->qos_rx.max_turn_time.bits &= 0x01;
905
906	/* Set data size */
907	/*self->qos_rx.data_size.bits &= 0x03;*/
908
909	irda_qos_bits_to_value(&self->qos_rx);
910}
911
912/*
913 * Function irlap_apply_default_connection_parameters (void, now)
914 *
915 *    Use the default connection and transmission parameters
916 */
917void irlap_apply_default_connection_parameters(struct irlap_cb *self)
918{
919	IRDA_ASSERT(self != NULL, return;);
920	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
921
922	/* xbofs : Default value in NDM */
923	self->next_bofs   = 12;
924	self->bofs_count  = 12;
925
926	/* NDM Speed is 9600 */
927	irlap_change_speed(self, 9600, TRUE);
928
929	/* Set mbusy when going to NDM state */
930	irda_device_set_media_busy(self->netdev, TRUE);
931
932	/*
933	 * Generate random connection address for this session, which must
934	 * be 7 bits wide and different from 0x00 and 0xfe
935	 */
936	while ((self->caddr == 0x00) || (self->caddr == 0xfe)) {
937		get_random_bytes(&self->caddr, sizeof(self->caddr));
938		self->caddr &= 0xfe;
939	}
940
941	/* Use default values until connection has been negitiated */
942	self->slot_timeout = sysctl_slot_timeout;
943	self->final_timeout = FINAL_TIMEOUT;
944	self->poll_timeout = POLL_TIMEOUT;
945	self->wd_timeout = WD_TIMEOUT;
946
947	/* Set some default values */
948	self->qos_tx.baud_rate.value = 9600;
949	self->qos_rx.baud_rate.value = 9600;
950	self->qos_tx.max_turn_time.value = 0;
951	self->qos_rx.max_turn_time.value = 0;
952	self->qos_tx.min_turn_time.value = 0;
953	self->qos_rx.min_turn_time.value = 0;
954	self->qos_tx.data_size.value = 64;
955	self->qos_rx.data_size.value = 64;
956	self->qos_tx.window_size.value = 1;
957	self->qos_rx.window_size.value = 1;
958	self->qos_tx.additional_bofs.value = 12;
959	self->qos_rx.additional_bofs.value = 12;
960	self->qos_tx.link_disc_time.value = 0;
961	self->qos_rx.link_disc_time.value = 0;
962
963	irlap_flush_all_queues(self);
964
965	self->disconnect_pending = FALSE;
966	self->connect_pending = FALSE;
967}
968
969/*
970 * Function irlap_apply_connection_parameters (qos, now)
971 *
972 *    Initialize IrLAP with the negotiated QoS values
973 *
974 * If 'now' is false, the speed and xbofs will be changed after the next
975 * frame is sent.
976 * If 'now' is true, the speed and xbofs is changed immediately
977 */
978void irlap_apply_connection_parameters(struct irlap_cb *self, int now)
979{
980	IRDA_ASSERT(self != NULL, return;);
981	IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
982
983	/* Set the negotiated xbofs value */
984	self->next_bofs   = self->qos_tx.additional_bofs.value;
985	if (now)
986		self->bofs_count = self->next_bofs;
987
988	/* Set the negotiated link speed (may need the new xbofs value) */
989	irlap_change_speed(self, self->qos_tx.baud_rate.value, now);
990
991	self->window_size = self->qos_tx.window_size.value;
992	self->window      = self->qos_tx.window_size.value;
993
994#ifdef CONFIG_IRDA_DYNAMIC_WINDOW
995	/*
996	 *  Calculate how many bytes it is possible to transmit before the
997	 *  link must be turned around
998	 */
999	self->line_capacity =
1000		irlap_max_line_capacity(self->qos_tx.baud_rate.value,
1001					self->qos_tx.max_turn_time.value);
1002	self->bytes_left = self->line_capacity;
1003#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
1004
1005
1006	/*
1007	 *  Initialize timeout values, some of the rules are listed on
1008	 *  page 92 in IrLAP.
1009	 */
1010	IRDA_ASSERT(self->qos_tx.max_turn_time.value != 0, return;);
1011	IRDA_ASSERT(self->qos_rx.max_turn_time.value != 0, return;);
1012	/* The poll timeout applies only to the primary station.
1013	 * It defines the maximum time the primary stay in XMIT mode
1014	 * before timeout and turning the link around (sending a RR).
1015	 * Or, this is how much we can keep the pf bit in primary mode.
1016	 * Therefore, it must be lower or equal than our *OWN* max turn around.
1017	 * Jean II */
1018	self->poll_timeout = msecs_to_jiffies(
1019				self->qos_tx.max_turn_time.value);
1020	/* The Final timeout applies only to the primary station.
1021	 * It defines the maximum time the primary wait (mostly in RECV mode)
1022	 * for an answer from the secondary station before polling it again.
1023	 * Therefore, it must be greater or equal than our *PARTNER*
1024	 * max turn around time - Jean II */
1025	self->final_timeout = msecs_to_jiffies(
1026				self->qos_rx.max_turn_time.value);
1027	/* The Watchdog Bit timeout applies only to the secondary station.
1028	 * It defines the maximum time the secondary wait (mostly in RECV mode)
1029	 * for poll from the primary station before getting annoyed.
1030	 * Therefore, it must be greater or equal than our *PARTNER*
1031	 * max turn around time - Jean II */
1032	self->wd_timeout = self->final_timeout * 2;
1033
1034	/*
1035	 * N1 and N2 are maximum retry count for *both* the final timer
1036	 * and the wd timer (with a factor 2) as defined above.
1037	 * After N1 retry of a timer, we give a warning to the user.
1038	 * After N2 retry, we consider the link dead and disconnect it.
1039	 * Jean II
1040	 */
1041
1042	/*
1043	 *  Set N1 to 0 if Link Disconnect/Threshold Time = 3 and set it to
1044	 *  3 seconds otherwise. See page 71 in IrLAP for more details.
1045	 *  Actually, it's not always 3 seconds, as we allow to set
1046	 *  it via sysctl... Max maxtt is 500ms, and N1 need to be multiple
1047	 *  of 2, so 1 second is minimum we can allow. - Jean II
1048	 */
1049	if (self->qos_tx.link_disc_time.value == sysctl_warn_noreply_time)
1050		/*
1051		 * If we set N1 to 0, it will trigger immediately, which is
1052		 * not what we want. What we really want is to disable it,
1053		 * Jean II
1054		 */
1055		self->N1 = -2; /* Disable - Need to be multiple of 2*/
1056	else
1057		self->N1 = sysctl_warn_noreply_time * 1000 /
1058		  self->qos_rx.max_turn_time.value;
1059
1060	pr_debug("Setting N1 = %d\n", self->N1);
1061
1062	/* Set N2 to match our own disconnect time */
1063	self->N2 = self->qos_tx.link_disc_time.value * 1000 /
1064		self->qos_rx.max_turn_time.value;
1065	pr_debug("Setting N2 = %d\n", self->N2);
1066}
1067
1068#ifdef CONFIG_PROC_FS
1069struct irlap_iter_state {
1070	int id;
1071};
1072
1073static void *irlap_seq_start(struct seq_file *seq, loff_t *pos)
1074{
1075	struct irlap_iter_state *iter = seq->private;
1076	struct irlap_cb *self;
1077
1078	/* Protect our access to the tsap list */
1079	spin_lock_irq(&irlap->hb_spinlock);
1080	iter->id = 0;
1081
1082	for (self = (struct irlap_cb *) hashbin_get_first(irlap);
1083	     self; self = (struct irlap_cb *) hashbin_get_next(irlap)) {
1084		if (iter->id == *pos)
1085			break;
1086		++iter->id;
1087	}
1088
1089	return self;
1090}
1091
1092static void *irlap_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1093{
1094	struct irlap_iter_state *iter = seq->private;
1095
1096	++*pos;
1097	++iter->id;
1098	return (void *) hashbin_get_next(irlap);
1099}
1100
1101static void irlap_seq_stop(struct seq_file *seq, void *v)
1102{
1103	spin_unlock_irq(&irlap->hb_spinlock);
1104}
1105
1106static int irlap_seq_show(struct seq_file *seq, void *v)
1107{
1108	const struct irlap_iter_state *iter = seq->private;
1109	const struct irlap_cb *self = v;
1110
1111	IRDA_ASSERT(self->magic == LAP_MAGIC, return -EINVAL;);
1112
1113	seq_printf(seq, "irlap%d ", iter->id);
1114	seq_printf(seq, "state: %s\n",
1115		   irlap_state[self->state]);
1116
1117	seq_printf(seq, "  device name: %s, ",
1118		   (self->netdev) ? self->netdev->name : "bug");
1119	seq_printf(seq, "hardware name: %s\n", self->hw_name);
1120
1121	seq_printf(seq, "  caddr: %#02x, ", self->caddr);
1122	seq_printf(seq, "saddr: %#08x, ", self->saddr);
1123	seq_printf(seq, "daddr: %#08x\n", self->daddr);
1124
1125	seq_printf(seq, "  win size: %d, ",
1126		   self->window_size);
1127	seq_printf(seq, "win: %d, ", self->window);
1128#ifdef CONFIG_IRDA_DYNAMIC_WINDOW
1129	seq_printf(seq, "line capacity: %d, ",
1130		   self->line_capacity);
1131	seq_printf(seq, "bytes left: %d\n", self->bytes_left);
1132#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
1133	seq_printf(seq, "  tx queue len: %d ",
1134		   skb_queue_len(&self->txq));
1135	seq_printf(seq, "win queue len: %d ",
1136		   skb_queue_len(&self->wx_list));
1137	seq_printf(seq, "rbusy: %s", self->remote_busy ?
1138		   "TRUE" : "FALSE");
1139	seq_printf(seq, " mbusy: %s\n", self->media_busy ?
1140		   "TRUE" : "FALSE");
1141
1142	seq_printf(seq, "  retrans: %d ", self->retry_count);
1143	seq_printf(seq, "vs: %d ", self->vs);
1144	seq_printf(seq, "vr: %d ", self->vr);
1145	seq_printf(seq, "va: %d\n", self->va);
1146
1147	seq_printf(seq, "  qos\tbps\tmaxtt\tdsize\twinsize\taddbofs\tmintt\tldisc\tcomp\n");
1148
1149	seq_printf(seq, "  tx\t%d\t",
1150		   self->qos_tx.baud_rate.value);
1151	seq_printf(seq, "%d\t",
1152		   self->qos_tx.max_turn_time.value);
1153	seq_printf(seq, "%d\t",
1154		   self->qos_tx.data_size.value);
1155	seq_printf(seq, "%d\t",
1156		   self->qos_tx.window_size.value);
1157	seq_printf(seq, "%d\t",
1158		   self->qos_tx.additional_bofs.value);
1159	seq_printf(seq, "%d\t",
1160		   self->qos_tx.min_turn_time.value);
1161	seq_printf(seq, "%d\t",
1162		   self->qos_tx.link_disc_time.value);
1163	seq_printf(seq, "\n");
1164
1165	seq_printf(seq, "  rx\t%d\t",
1166		   self->qos_rx.baud_rate.value);
1167	seq_printf(seq, "%d\t",
1168		   self->qos_rx.max_turn_time.value);
1169	seq_printf(seq, "%d\t",
1170		   self->qos_rx.data_size.value);
1171	seq_printf(seq, "%d\t",
1172		   self->qos_rx.window_size.value);
1173	seq_printf(seq, "%d\t",
1174		   self->qos_rx.additional_bofs.value);
1175	seq_printf(seq, "%d\t",
1176		   self->qos_rx.min_turn_time.value);
1177	seq_printf(seq, "%d\n",
1178		   self->qos_rx.link_disc_time.value);
1179
1180	return 0;
1181}
1182
1183static const struct seq_operations irlap_seq_ops = {
1184	.start  = irlap_seq_start,
1185	.next   = irlap_seq_next,
1186	.stop   = irlap_seq_stop,
1187	.show   = irlap_seq_show,
1188};
1189
1190static int irlap_seq_open(struct inode *inode, struct file *file)
1191{
1192	if (irlap == NULL)
1193		return -EINVAL;
1194
1195	return seq_open_private(file, &irlap_seq_ops,
1196			sizeof(struct irlap_iter_state));
1197}
1198
1199const struct file_operations irlap_seq_fops = {
1200	.owner		= THIS_MODULE,
1201	.open           = irlap_seq_open,
1202	.read           = seq_read,
1203	.llseek         = seq_lseek,
1204	.release	= seq_release_private,
1205};
1206
1207#endif /* CONFIG_PROC_FS */
1208